//===----------------------------------------------------------------------===//
//
// This source file is part of the Soto for AWS open source project
//
// Copyright (c) 2017-2024 the Soto project authors
// Licensed under Apache License v2.0
//
// See LICENSE.txt for license information
// See CONTRIBUTORS.txt for the list of Soto project authors
//
// SPDX-License-Identifier: Apache-2.0
//
//===----------------------------------------------------------------------===//

// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator.
// DO NOT EDIT.

#if canImport(FoundationEssentials)
import FoundationEssentials
#else
import Foundation
#endif
@_spi(SotoInternal) import SotoCore

extension TranscribeStreaming {
    // MARK: Enums

    public enum CallAnalyticsLanguageCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case deDe = "de-DE"
        case enAu = "en-AU"
        case enGb = "en-GB"
        case enUs = "en-US"
        case esUs = "es-US"
        case frCa = "fr-CA"
        case frFr = "fr-FR"
        case itIt = "it-IT"
        case ptBr = "pt-BR"
        public var description: String { return self.rawValue }
    }

    public enum ClinicalNoteGenerationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case completed = "COMPLETED"
        case failed = "FAILED"
        case inProgress = "IN_PROGRESS"
        public var description: String { return self.rawValue }
    }

    public enum ContentIdentificationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case pii = "PII"
        public var description: String { return self.rawValue }
    }

    public enum ContentRedactionOutput: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case redacted = "redacted"
        case redactedAndUnredacted = "redacted_and_unredacted"
        public var description: String { return self.rawValue }
    }

    public enum ContentRedactionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case pii = "PII"
        public var description: String { return self.rawValue }
    }

    public enum ItemType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case pronunciation = "pronunciation"
        case punctuation = "punctuation"
        public var description: String { return self.rawValue }
    }

    public enum LanguageCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case afZa = "af-ZA"
        case arAe = "ar-AE"
        case arSa = "ar-SA"
        case caEs = "ca-ES"
        case csCz = "cs-CZ"
        case daDk = "da-DK"
        case deCh = "de-CH"
        case deDe = "de-DE"
        case elGr = "el-GR"
        case enAb = "en-AB"
        case enAu = "en-AU"
        case enGb = "en-GB"
        case enIe = "en-IE"
        case enIn = "en-IN"
        case enNz = "en-NZ"
        case enUs = "en-US"
        case enWl = "en-WL"
        case enZa = "en-ZA"
        case esEs = "es-ES"
        case esUs = "es-US"
        case euEs = "eu-ES"
        case faIr = "fa-IR"
        case fiFi = "fi-FI"
        case frCa = "fr-CA"
        case frFr = "fr-FR"
        case glEs = "gl-ES"
        case heIl = "he-IL"
        case hiIn = "hi-IN"
        case hrHr = "hr-HR"
        case idId = "id-ID"
        case itIt = "it-IT"
        case jaJp = "ja-JP"
        case koKr = "ko-KR"
        case lvLv = "lv-LV"
        case msMy = "ms-MY"
        case nlNl = "nl-NL"
        case noNo = "no-NO"
        case plPl = "pl-PL"
        case ptBr = "pt-BR"
        case ptPt = "pt-PT"
        case roRo = "ro-RO"
        case ruRu = "ru-RU"
        case skSk = "sk-SK"
        case soSo = "so-SO"
        case srRs = "sr-RS"
        case svSe = "sv-SE"
        case thTh = "th-TH"
        case tlPh = "tl-PH"
        case ukUa = "uk-UA"
        case viVn = "vi-VN"
        case zhCn = "zh-CN"
        case zhHk = "zh-HK"
        case zhTw = "zh-TW"
        case zuZa = "zu-ZA"
        public var description: String { return self.rawValue }
    }

    public enum MediaEncoding: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case flac = "flac"
        case oggOpus = "ogg-opus"
        case pcm = "pcm"
        public var description: String { return self.rawValue }
    }

    public enum MedicalContentIdentificationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case phi = "PHI"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeLanguageCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case enUs = "en-US"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeMediaEncoding: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case flac = "flac"
        case oggOpus = "ogg-opus"
        case pcm = "pcm"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeNoteTemplate: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case behavioralSoap = "BEHAVIORAL_SOAP"
        case birp = "BIRP"
        case dap = "DAP"
        case girpp = "GIRPP"
        case historyAndPhysical = "HISTORY_AND_PHYSICAL"
        case physicalSoap = "PHYSICAL_SOAP"
        case sirp = "SIRP"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeParticipantRole: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case clinician = "CLINICIAN"
        case patient = "PATIENT"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeSessionControlEventType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case endOfSession = "END_OF_SESSION"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeStreamStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case completed = "COMPLETED"
        case failed = "FAILED"
        case inProgress = "IN_PROGRESS"
        case paused = "PAUSED"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeTranscriptItemType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case pronunciation = "pronunciation"
        case punctuation = "punctuation"
        public var description: String { return self.rawValue }
    }

    public enum MedicalScribeVocabularyFilterMethod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case mask = "mask"
        case remove = "remove"
        case tag = "tag"
        public var description: String { return self.rawValue }
    }

    public enum PartialResultsStability: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case high = "high"
        case low = "low"
        case medium = "medium"
        public var description: String { return self.rawValue }
    }

    public enum ParticipantRole: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case agent = "AGENT"
        case customer = "CUSTOMER"
        public var description: String { return self.rawValue }
    }

    public enum Pronouns: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case heHim = "HE_HIM"
        case sheHer = "SHE_HER"
        case theyThem = "THEY_THEM"
        public var description: String { return self.rawValue }
    }

    public enum Sentiment: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case mixed = "MIXED"
        case negative = "NEGATIVE"
        case neutral = "NEUTRAL"
        case positive = "POSITIVE"
        public var description: String { return self.rawValue }
    }

    public enum Specialty: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case cardiology = "CARDIOLOGY"
        case neurology = "NEUROLOGY"
        case oncology = "ONCOLOGY"
        case primarycare = "PRIMARYCARE"
        case radiology = "RADIOLOGY"
        case urology = "UROLOGY"
        public var description: String { return self.rawValue }
    }

    public enum VocabularyFilterMethod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case mask = "mask"
        case remove = "remove"
        case tag = "tag"
        public var description: String { return self.rawValue }
    }

    public enum `Type`: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case conversation = "CONVERSATION"
        case dictation = "DICTATION"
        public var description: String { return self.rawValue }
    }

    public enum AudioStream: AWSEncodableShape, Sendable {
        /// A blob of audio from your application. Your audio stream consists of one or more audio events. For more information, see Event stream encoding.
        case audioEvent(AudioEvent)
        /// Contains audio channel definitions and post-call analytics settings.
        case configurationEvent(ConfigurationEvent)

        public func encode(to encoder: Encoder) throws {
            var container = encoder.container(keyedBy: CodingKeys.self)
            switch self {
            case .audioEvent(let value):
                try container.encode(value, forKey: .audioEvent)
            case .configurationEvent(let value):
                try container.encode(value, forKey: .configurationEvent)
            }
        }

        public func validate(name: String) throws {
            switch self {
            case .configurationEvent(let value):
                try value.validate(name: "\(name).configurationEvent")
            default:
                break
            }
        }

        private enum CodingKeys: String, CodingKey {
            case audioEvent = "AudioEvent"
            case configurationEvent = "ConfigurationEvent"
        }
    }

    public enum CallAnalyticsTranscriptResultStream: AWSDecodableShape, Sendable {
        case badRequestException(BadRequestException)
        /// Provides information on matched categories that were used to generate real-time supervisor  alerts.
        case categoryEvent(CategoryEvent)
        case conflictException(ConflictException)
        case internalFailureException(InternalFailureException)
        case limitExceededException(LimitExceededException)
        case serviceUnavailableException(ServiceUnavailableException)
        /// Contains set of transcription results from one or more audio segments, along with additional  information per your request parameters. This can include information relating to channel definitions, partial result stabilization, sentiment, issue detection, and other transcription-related data.
        case utteranceEvent(UtteranceEvent)

        public init(from decoder: Decoder) throws {
            let container = try decoder.container(keyedBy: CodingKeys.self)
            guard container.allKeys.count == 1, let key = container.allKeys.first else {
                let context = DecodingError.Context(
                    codingPath: container.codingPath,
                    debugDescription: "Expected exactly one key, but got \(container.allKeys.count)"
                )
                throw DecodingError.dataCorrupted(context)
            }
            switch key {
            case .badRequestException:
                let value = try container.decode(BadRequestException.self, forKey: .badRequestException)
                self = .badRequestException(value)
            case .categoryEvent:
                let value = try container.decode(CategoryEvent.self, forKey: .categoryEvent)
                self = .categoryEvent(value)
            case .conflictException:
                let value = try container.decode(ConflictException.self, forKey: .conflictException)
                self = .conflictException(value)
            case .internalFailureException:
                let value = try container.decode(InternalFailureException.self, forKey: .internalFailureException)
                self = .internalFailureException(value)
            case .limitExceededException:
                let value = try container.decode(LimitExceededException.self, forKey: .limitExceededException)
                self = .limitExceededException(value)
            case .serviceUnavailableException:
                let value = try container.decode(ServiceUnavailableException.self, forKey: .serviceUnavailableException)
                self = .serviceUnavailableException(value)
            case .utteranceEvent:
                let value = try container.decode(UtteranceEvent.self, forKey: .utteranceEvent)
                self = .utteranceEvent(value)
            }
        }

        private enum CodingKeys: String, CodingKey {
            case badRequestException = "BadRequestException"
            case categoryEvent = "CategoryEvent"
            case conflictException = "ConflictException"
            case internalFailureException = "InternalFailureException"
            case limitExceededException = "LimitExceededException"
            case serviceUnavailableException = "ServiceUnavailableException"
            case utteranceEvent = "UtteranceEvent"
        }
    }

    public enum MedicalScribeInputStream: AWSEncodableShape, Sendable {
        case audioEvent(MedicalScribeAudioEvent)
        /// Specify additional streaming session configurations beyond those provided in your initial start request headers. For example, specify channel definitions, encryption settings, and post-stream analytics settings.  Whether you are starting a new session or resuming an existing session,  your first event must be a MedicalScribeConfigurationEvent.
        case configurationEvent(MedicalScribeConfigurationEvent)
        /// Specify the lifecycle of your streaming session, such as ending the session.
        case sessionControlEvent(MedicalScribeSessionControlEvent)

        public func encode(to encoder: Encoder) throws {
            var container = encoder.container(keyedBy: CodingKeys.self)
            switch self {
            case .audioEvent(let value):
                try container.encode(value, forKey: .audioEvent)
            case .configurationEvent(let value):
                try container.encode(value, forKey: .configurationEvent)
            case .sessionControlEvent(let value):
                try container.encode(value, forKey: .sessionControlEvent)
            }
        }

        public func validate(name: String) throws {
            switch self {
            case .configurationEvent(let value):
                try value.validate(name: "\(name).configurationEvent")
            default:
                break
            }
        }

        private enum CodingKeys: String, CodingKey {
            case audioEvent = "AudioEvent"
            case configurationEvent = "ConfigurationEvent"
            case sessionControlEvent = "SessionControlEvent"
        }
    }

    public enum MedicalScribeResultStream: AWSDecodableShape, Sendable {
        case badRequestException(BadRequestException)
        case conflictException(ConflictException)
        case internalFailureException(InternalFailureException)
        case limitExceededException(LimitExceededException)
        case serviceUnavailableException(ServiceUnavailableException)
        /// The transcript event that contains real-time transcription results.
        case transcriptEvent(MedicalScribeTranscriptEvent)

        public init(from decoder: Decoder) throws {
            let container = try decoder.container(keyedBy: CodingKeys.self)
            guard container.allKeys.count == 1, let key = container.allKeys.first else {
                let context = DecodingError.Context(
                    codingPath: container.codingPath,
                    debugDescription: "Expected exactly one key, but got \(container.allKeys.count)"
                )
                throw DecodingError.dataCorrupted(context)
            }
            switch key {
            case .badRequestException:
                let value = try container.decode(BadRequestException.self, forKey: .badRequestException)
                self = .badRequestException(value)
            case .conflictException:
                let value = try container.decode(ConflictException.self, forKey: .conflictException)
                self = .conflictException(value)
            case .internalFailureException:
                let value = try container.decode(InternalFailureException.self, forKey: .internalFailureException)
                self = .internalFailureException(value)
            case .limitExceededException:
                let value = try container.decode(LimitExceededException.self, forKey: .limitExceededException)
                self = .limitExceededException(value)
            case .serviceUnavailableException:
                let value = try container.decode(ServiceUnavailableException.self, forKey: .serviceUnavailableException)
                self = .serviceUnavailableException(value)
            case .transcriptEvent:
                let value = try container.decode(MedicalScribeTranscriptEvent.self, forKey: .transcriptEvent)
                self = .transcriptEvent(value)
            }
        }

        private enum CodingKeys: String, CodingKey {
            case badRequestException = "BadRequestException"
            case conflictException = "ConflictException"
            case internalFailureException = "InternalFailureException"
            case limitExceededException = "LimitExceededException"
            case serviceUnavailableException = "ServiceUnavailableException"
            case transcriptEvent = "TranscriptEvent"
        }
    }

    public enum MedicalTranscriptResultStream: AWSDecodableShape, Sendable {
        case badRequestException(BadRequestException)
        case conflictException(ConflictException)
        case internalFailureException(InternalFailureException)
        case limitExceededException(LimitExceededException)
        case serviceUnavailableException(ServiceUnavailableException)
        /// The MedicalTranscriptEvent associated with a  MedicalTranscriptResultStream. Contains a set of transcription results from one or more audio segments, along with  additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language  identification, and other transcription-related data.
        case transcriptEvent(MedicalTranscriptEvent)

        public init(from decoder: Decoder) throws {
            let container = try decoder.container(keyedBy: CodingKeys.self)
            guard container.allKeys.count == 1, let key = container.allKeys.first else {
                let context = DecodingError.Context(
                    codingPath: container.codingPath,
                    debugDescription: "Expected exactly one key, but got \(container.allKeys.count)"
                )
                throw DecodingError.dataCorrupted(context)
            }
            switch key {
            case .badRequestException:
                let value = try container.decode(BadRequestException.self, forKey: .badRequestException)
                self = .badRequestException(value)
            case .conflictException:
                let value = try container.decode(ConflictException.self, forKey: .conflictException)
                self = .conflictException(value)
            case .internalFailureException:
                let value = try container.decode(InternalFailureException.self, forKey: .internalFailureException)
                self = .internalFailureException(value)
            case .limitExceededException:
                let value = try container.decode(LimitExceededException.self, forKey: .limitExceededException)
                self = .limitExceededException(value)
            case .serviceUnavailableException:
                let value = try container.decode(ServiceUnavailableException.self, forKey: .serviceUnavailableException)
                self = .serviceUnavailableException(value)
            case .transcriptEvent:
                let value = try container.decode(MedicalTranscriptEvent.self, forKey: .transcriptEvent)
                self = .transcriptEvent(value)
            }
        }

        private enum CodingKeys: String, CodingKey {
            case badRequestException = "BadRequestException"
            case conflictException = "ConflictException"
            case internalFailureException = "InternalFailureException"
            case limitExceededException = "LimitExceededException"
            case serviceUnavailableException = "ServiceUnavailableException"
            case transcriptEvent = "TranscriptEvent"
        }
    }

    public enum TranscriptResultStream: AWSDecodableShape, Sendable {
        /// A client error occurred when the stream was created. Check the parameters of the request and try your request again.
        case badRequestException(BadRequestException)
        /// A new stream started with the same session ID. The current stream has been terminated.
        case conflictException(ConflictException)
        /// A problem occurred while processing the audio. Amazon Transcribe terminated  processing.
        case internalFailureException(InternalFailureException)
        /// Your client has exceeded one of the Amazon Transcribe limits. This is typically the audio length limit. Break your audio stream into smaller chunks and try your request again.
        case limitExceededException(LimitExceededException)
        /// The service is currently unavailable. Try your request later.
        case serviceUnavailableException(ServiceUnavailableException)
        /// Contains Transcript, which contains Results. The  object contains a set of transcription  results from one or more audio segments, along with additional information per your request  parameters.
        case transcriptEvent(TranscriptEvent)

        public init(from decoder: Decoder) throws {
            let container = try decoder.container(keyedBy: CodingKeys.self)
            guard container.allKeys.count == 1, let key = container.allKeys.first else {
                let context = DecodingError.Context(
                    codingPath: container.codingPath,
                    debugDescription: "Expected exactly one key, but got \(container.allKeys.count)"
                )
                throw DecodingError.dataCorrupted(context)
            }
            switch key {
            case .badRequestException:
                let value = try container.decode(BadRequestException.self, forKey: .badRequestException)
                self = .badRequestException(value)
            case .conflictException:
                let value = try container.decode(ConflictException.self, forKey: .conflictException)
                self = .conflictException(value)
            case .internalFailureException:
                let value = try container.decode(InternalFailureException.self, forKey: .internalFailureException)
                self = .internalFailureException(value)
            case .limitExceededException:
                let value = try container.decode(LimitExceededException.self, forKey: .limitExceededException)
                self = .limitExceededException(value)
            case .serviceUnavailableException:
                let value = try container.decode(ServiceUnavailableException.self, forKey: .serviceUnavailableException)
                self = .serviceUnavailableException(value)
            case .transcriptEvent:
                let value = try container.decode(TranscriptEvent.self, forKey: .transcriptEvent)
                self = .transcriptEvent(value)
            }
        }

        private enum CodingKeys: String, CodingKey {
            case badRequestException = "BadRequestException"
            case conflictException = "ConflictException"
            case internalFailureException = "InternalFailureException"
            case limitExceededException = "LimitExceededException"
            case serviceUnavailableException = "ServiceUnavailableException"
            case transcriptEvent = "TranscriptEvent"
        }
    }

    // MARK: Shapes

    public struct Alternative: AWSDecodableShape {
        /// Contains entities identified as personally identifiable information (PII) in your transcription  output.
        public let entities: [Entity]?
        /// Contains words, phrases, or punctuation marks in your transcription output.
        public let items: [Item]?
        /// Contains transcribed text.
        public let transcript: String?

        @inlinable
        public init(entities: [Entity]? = nil, items: [Item]? = nil, transcript: String? = nil) {
            self.entities = entities
            self.items = items
            self.transcript = transcript
        }

        private enum CodingKeys: String, CodingKey {
            case entities = "Entities"
            case items = "Items"
            case transcript = "Transcript"
        }
    }

    public struct AudioEvent: AWSEncodableShape {
        ///  An audio blob containing the next segment of audio from your application, with a maximum duration of 1 second.  The maximum size in bytes varies based on audio properties.  Find recommended size in Transcribing streaming best practices.   Size calculation: Duration (s) * Sample Rate (Hz) * Number of Channels * 2 (Bytes per Sample)   For example, a 1-second chunk of 16 kHz, 2-channel, 16-bit audio would be  1 * 16000 * 2 * 2 = 64000 bytes.   For 8 kHz, 1-channel, 16-bit audio, a 1-second chunk would be  1 * 8000 * 1 * 2 = 16000 bytes.
        public let audioChunk: AWSEventPayload?

        @inlinable
        public init(audioChunk: AWSEventPayload? = nil) {
            self.audioChunk = audioChunk
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct BadRequestException: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct CallAnalyticsEntity: AWSDecodableShape {
        /// The time, in milliseconds, from the beginning of the audio stream to the start of the identified entity.
        public let beginOffsetMillis: Int64?
        /// The category of information identified. For example, PII.
        public let category: String?
        /// The confidence score associated with the identification of an entity in your transcript. Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified entity correctly matches the entity spoken in your media.
        public let confidence: Double?
        /// The word or words that represent the identified entity.
        public let content: String?
        /// The time, in milliseconds, from the beginning of the audio stream to the end of the identified entity.
        public let endOffsetMillis: Int64?
        /// The type of PII identified. For example, NAME or  CREDIT_DEBIT_NUMBER.
        public let type: String?

        @inlinable
        public init(beginOffsetMillis: Int64? = nil, category: String? = nil, confidence: Double? = nil, content: String? = nil, endOffsetMillis: Int64? = nil, type: String? = nil) {
            self.beginOffsetMillis = beginOffsetMillis
            self.category = category
            self.confidence = confidence
            self.content = content
            self.endOffsetMillis = endOffsetMillis
            self.type = type
        }

        private enum CodingKeys: String, CodingKey {
            case beginOffsetMillis = "BeginOffsetMillis"
            case category = "Category"
            case confidence = "Confidence"
            case content = "Content"
            case endOffsetMillis = "EndOffsetMillis"
            case type = "Type"
        }
    }

    public struct CallAnalyticsItem: AWSDecodableShape {
        /// The time, in milliseconds, from the beginning of the audio stream to the start of the identified item.
        public let beginOffsetMillis: Int64?
        /// The confidence score associated with a word or phrase in your transcript. Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified item correctly matches the item spoken in your media.
        public let confidence: Double?
        /// The word or punctuation that was transcribed.
        public let content: String?
        /// The time, in milliseconds, from the beginning of the audio stream to the end of the identified item.
        public let endOffsetMillis: Int64?
        /// If partial result stabilization is enabled, Stable indicates whether the specified  item is stable (true) or if it may change when the segment is complete  (false).
        public let stable: Bool?
        /// The type of item identified. Options are: PRONUNCIATION (spoken words) and PUNCTUATION.
        public let type: ItemType?
        /// Indicates whether the specified item matches a word in the vocabulary filter included in your Call Analytics request. If true, there is a vocabulary filter match.
        public let vocabularyFilterMatch: Bool?

        @inlinable
        public init(beginOffsetMillis: Int64? = nil, confidence: Double? = nil, content: String? = nil, endOffsetMillis: Int64? = nil, stable: Bool? = nil, type: ItemType? = nil, vocabularyFilterMatch: Bool? = nil) {
            self.beginOffsetMillis = beginOffsetMillis
            self.confidence = confidence
            self.content = content
            self.endOffsetMillis = endOffsetMillis
            self.stable = stable
            self.type = type
            self.vocabularyFilterMatch = vocabularyFilterMatch
        }

        private enum CodingKeys: String, CodingKey {
            case beginOffsetMillis = "BeginOffsetMillis"
            case confidence = "Confidence"
            case content = "Content"
            case endOffsetMillis = "EndOffsetMillis"
            case stable = "Stable"
            case type = "Type"
            case vocabularyFilterMatch = "VocabularyFilterMatch"
        }
    }

    public struct CallAnalyticsLanguageWithScore: AWSDecodableShape {
        /// The language code of the identified language.
        public let languageCode: CallAnalyticsLanguageCode?
        /// The confidence score associated with the identified language code. Confidence scores are values between zero and one; larger values indicate a higher confidence in the identified language.
        public let score: Double?

        @inlinable
        public init(languageCode: CallAnalyticsLanguageCode? = nil, score: Double? = nil) {
            self.languageCode = languageCode
            self.score = score
        }

        private enum CodingKeys: String, CodingKey {
            case languageCode = "LanguageCode"
            case score = "Score"
        }
    }

    public struct CategoryEvent: AWSDecodableShape {
        /// Lists the categories that were matched in your audio segment.
        public let matchedCategories: [String]?
        /// Contains information about the matched categories, including category names and timestamps.
        public let matchedDetails: [String: PointsOfInterest]?

        @inlinable
        public init(matchedCategories: [String]? = nil, matchedDetails: [String: PointsOfInterest]? = nil) {
            self.matchedCategories = matchedCategories
            self.matchedDetails = matchedDetails
        }

        private enum CodingKeys: String, CodingKey {
            case matchedCategories = "MatchedCategories"
            case matchedDetails = "MatchedDetails"
        }
    }

    public struct ChannelDefinition: AWSEncodableShape {
        /// Specify the audio channel you want to define.
        public let channelId: Int
        /// Specify the speaker you want to define. Omitting this parameter is equivalent to specifying both participants.
        public let participantRole: ParticipantRole

        @inlinable
        public init(channelId: Int = 0, participantRole: ParticipantRole) {
            self.channelId = channelId
            self.participantRole = participantRole
        }

        public func validate(name: String) throws {
            try self.validate(self.channelId, name: "channelId", parent: name, max: 1)
            try self.validate(self.channelId, name: "channelId", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case channelId = "ChannelId"
            case participantRole = "ParticipantRole"
        }
    }

    public struct CharacterOffsets: AWSDecodableShape {
        /// Provides the character count of the first character where a match is identified. For example, the first character associated with an issue or a category match in a segment transcript.
        public let begin: Int?
        /// Provides the character count of the last character where a match is identified. For example, the last  character associated with an issue or a category match in a segment transcript.
        public let end: Int?

        @inlinable
        public init(begin: Int? = nil, end: Int? = nil) {
            self.begin = begin
            self.end = end
        }

        private enum CodingKeys: String, CodingKey {
            case begin = "Begin"
            case end = "End"
        }
    }

    public struct ClinicalNoteGenerationResult: AWSDecodableShape {
        /// Holds the Amazon S3 URI for the output Clinical Note.
        public let clinicalNoteOutputLocation: String?
        /// If ClinicalNoteGenerationResult is FAILED, information about why it failed.
        public let failureReason: String?
        /// The status of the clinical note generation. Possible Values:    IN_PROGRESS     FAILED     COMPLETED     After audio streaming finishes, and you send a MedicalScribeSessionControlEvent event (with END_OF_SESSION as the Type), the status is set to IN_PROGRESS. If the status is COMPLETED, the analytics completed successfully, and you can find the results at the locations specified in ClinicalNoteOutputLocation and TranscriptOutputLocation. If the status is FAILED, FailureReason provides details about the failure.
        public let status: ClinicalNoteGenerationStatus?
        /// Holds the Amazon S3 URI for the output Transcript.
        public let transcriptOutputLocation: String?

        @inlinable
        public init(clinicalNoteOutputLocation: String? = nil, failureReason: String? = nil, status: ClinicalNoteGenerationStatus? = nil, transcriptOutputLocation: String? = nil) {
            self.clinicalNoteOutputLocation = clinicalNoteOutputLocation
            self.failureReason = failureReason
            self.status = status
            self.transcriptOutputLocation = transcriptOutputLocation
        }

        private enum CodingKeys: String, CodingKey {
            case clinicalNoteOutputLocation = "ClinicalNoteOutputLocation"
            case failureReason = "FailureReason"
            case status = "Status"
            case transcriptOutputLocation = "TranscriptOutputLocation"
        }
    }

    public struct ClinicalNoteGenerationSettings: AWSEncodableShape & AWSDecodableShape {
        /// Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.   HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Examples of sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.    GIRPP: Provides summaries based on the patients progress toward goals. Examples of sections include Goal, Intervention, Response, Progress, and Plan.   BIRP: Focuses on the patient's behavioral patterns and responses. Examples of sections include Behavior, Intervention, Response, and Plan.   SIRP: Emphasizes the situational context of therapy. Examples of sections include Situation, Intervention, Response, and Plan.   DAP: Provides a simplified format for clinical documentation. Examples of sections include Data, Assessment, and Plan.   BEHAVIORAL_SOAP: Behavioral health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.   PHYSICAL_SOAP: Physical health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.
        public let noteTemplate: MedicalScribeNoteTemplate?
        /// The name of the Amazon S3 bucket where you want the output of Amazon Web Services HealthScribe post-stream analytics stored. Don't include the S3:// prefix of the specified bucket.  HealthScribe outputs transcript and clinical note files under the prefix: S3://$output-bucket-name/healthscribe-streaming/session-id/post-stream-analytics/clinical-notes  The role ResourceAccessRoleArn specified in the MedicalScribeConfigurationEvent must have permission to use the specified location. You can change Amazon S3 permissions using the  Amazon Web Services Management Console . See also Permissions Required for IAM User Roles  .
        public let outputBucketName: String

        @inlinable
        public init(noteTemplate: MedicalScribeNoteTemplate? = nil, outputBucketName: String) {
            self.noteTemplate = noteTemplate
            self.outputBucketName = outputBucketName
        }

        public func validate(name: String) throws {
            try self.validate(self.outputBucketName, name: "outputBucketName", parent: name, max: 64)
            try self.validate(self.outputBucketName, name: "outputBucketName", parent: name, pattern: "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$")
        }

        private enum CodingKeys: String, CodingKey {
            case noteTemplate = "NoteTemplate"
            case outputBucketName = "OutputBucketName"
        }
    }

    public struct ConfigurationEvent: AWSEncodableShape {
        /// Indicates which speaker is on which audio channel.
        public let channelDefinitions: [ChannelDefinition]?
        /// Provides additional optional settings for your Call Analytics post-call request, including  encryption and output locations for your redacted transcript.  PostCallAnalyticsSettings provides you with the same insights as a  Call Analytics post-call transcription. Refer to Post-call analytics for more information  on this feature.
        public let postCallAnalyticsSettings: PostCallAnalyticsSettings?

        @inlinable
        public init(channelDefinitions: [ChannelDefinition]? = nil, postCallAnalyticsSettings: PostCallAnalyticsSettings? = nil) {
            self.channelDefinitions = channelDefinitions
            self.postCallAnalyticsSettings = postCallAnalyticsSettings
        }

        public func validate(name: String) throws {
            try self.channelDefinitions?.forEach {
                try $0.validate(name: "\(name).channelDefinitions[]")
            }
            try self.validate(self.channelDefinitions, name: "channelDefinitions", parent: name, max: 2)
            try self.validate(self.channelDefinitions, name: "channelDefinitions", parent: name, min: 2)
        }

        private enum CodingKeys: String, CodingKey {
            case channelDefinitions = "ChannelDefinitions"
            case postCallAnalyticsSettings = "PostCallAnalyticsSettings"
        }
    }

    public struct ConflictException: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct Entity: AWSDecodableShape {
        /// The category of information identified. The only category is PII.
        public let category: String?
        /// The confidence score associated with the identified PII entity in your audio. Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified entity correctly matches the entity spoken in your media.
        public let confidence: Double?
        /// The word or words identified as PII.
        public let content: String?
        /// The end time of the utterance that was identified as PII in seconds, with millisecond precision (e.g., 1.056)
        public let endTime: Double?
        /// The start time of the utterance that was identified as PII in seconds, with millisecond precision (e.g., 1.056)
        public let startTime: Double?
        /// The type of PII identified. For example, NAME or  CREDIT_DEBIT_NUMBER.
        public let type: String?

        @inlinable
        public init(category: String? = nil, confidence: Double? = nil, content: String? = nil, endTime: Double? = nil, startTime: Double? = nil, type: String? = nil) {
            self.category = category
            self.confidence = confidence
            self.content = content
            self.endTime = endTime
            self.startTime = startTime
            self.type = type
        }

        private enum CodingKeys: String, CodingKey {
            case category = "Category"
            case confidence = "Confidence"
            case content = "Content"
            case endTime = "EndTime"
            case startTime = "StartTime"
            case type = "Type"
        }
    }

    public struct GetMedicalScribeStreamRequest: AWSEncodableShape {
        /// The identifier of the HealthScribe streaming session you want information about.
        public let sessionId: String

        @inlinable
        public init(sessionId: String) {
            self.sessionId = sessionId
        }

        public func encode(to encoder: Encoder) throws {
            let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer
            _ = encoder.container(keyedBy: CodingKeys.self)
            request.encodePath(self.sessionId, key: "SessionId")
        }

        public func validate(name: String) throws {
            try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct GetMedicalScribeStreamResponse: AWSDecodableShape {
        /// Provides details about a HealthScribe streaming session.
        public let medicalScribeStreamDetails: MedicalScribeStreamDetails?

        @inlinable
        public init(medicalScribeStreamDetails: MedicalScribeStreamDetails? = nil) {
            self.medicalScribeStreamDetails = medicalScribeStreamDetails
        }

        private enum CodingKeys: String, CodingKey {
            case medicalScribeStreamDetails = "MedicalScribeStreamDetails"
        }
    }

    public struct InternalFailureException: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct IssueDetected: AWSDecodableShape {
        /// Provides the timestamps that identify when in an audio segment the specified issue occurs.
        public let characterOffsets: CharacterOffsets?

        @inlinable
        public init(characterOffsets: CharacterOffsets? = nil) {
            self.characterOffsets = characterOffsets
        }

        private enum CodingKeys: String, CodingKey {
            case characterOffsets = "CharacterOffsets"
        }
    }

    public struct Item: AWSDecodableShape {
        /// The confidence score associated with a word or phrase in your transcript. Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified item correctly matches the item spoken in your media.
        public let confidence: Double?
        /// The word or punctuation that was transcribed.
        public let content: String?
        /// The end time of the transcribed item in seconds, with millisecond precision (e.g., 1.056)
        public let endTime: Double?
        /// If speaker partitioning is enabled, Speaker labels the speaker of the specified item.
        public let speaker: String?
        /// If partial result stabilization is enabled, Stable indicates whether the specified  item is stable (true) or if it may change when the segment is complete  (false).
        public let stable: Bool?
        /// The start time of the transcribed item in seconds, with millisecond precision (e.g., 1.056)
        public let startTime: Double?
        /// The type of item identified. Options are: PRONUNCIATION (spoken words) and PUNCTUATION.
        public let type: ItemType?
        /// Indicates whether the specified item matches a word in the vocabulary filter included in your request. If true, there is a vocabulary filter match.
        public let vocabularyFilterMatch: Bool?

        @inlinable
        public init(confidence: Double? = nil, content: String? = nil, endTime: Double? = nil, speaker: String? = nil, stable: Bool? = nil, startTime: Double? = nil, type: ItemType? = nil, vocabularyFilterMatch: Bool? = nil) {
            self.confidence = confidence
            self.content = content
            self.endTime = endTime
            self.speaker = speaker
            self.stable = stable
            self.startTime = startTime
            self.type = type
            self.vocabularyFilterMatch = vocabularyFilterMatch
        }

        private enum CodingKeys: String, CodingKey {
            case confidence = "Confidence"
            case content = "Content"
            case endTime = "EndTime"
            case speaker = "Speaker"
            case stable = "Stable"
            case startTime = "StartTime"
            case type = "Type"
            case vocabularyFilterMatch = "VocabularyFilterMatch"
        }
    }

    public struct LanguageWithScore: AWSDecodableShape {
        /// The language code of the identified language.
        public let languageCode: LanguageCode?
        /// The confidence score associated with the identified language code. Confidence scores are values between zero and one; larger values indicate a higher confidence in the identified language.
        public let score: Double?

        @inlinable
        public init(languageCode: LanguageCode? = nil, score: Double? = nil) {
            self.languageCode = languageCode
            self.score = score
        }

        private enum CodingKeys: String, CodingKey {
            case languageCode = "LanguageCode"
            case score = "Score"
        }
    }

    public struct LimitExceededException: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct MedicalAlternative: AWSDecodableShape {
        /// Contains entities identified as personal health information (PHI) in your transcription  output.
        public let entities: [MedicalEntity]?
        /// Contains words, phrases, or punctuation marks in your transcription output.
        public let items: [MedicalItem]?
        /// Contains transcribed text.
        public let transcript: String?

        @inlinable
        public init(entities: [MedicalEntity]? = nil, items: [MedicalItem]? = nil, transcript: String? = nil) {
            self.entities = entities
            self.items = items
            self.transcript = transcript
        }

        private enum CodingKeys: String, CodingKey {
            case entities = "Entities"
            case items = "Items"
            case transcript = "Transcript"
        }
    }

    public struct MedicalEntity: AWSDecodableShape {
        /// The category of information identified. The only category is PHI.
        public let category: String?
        /// The confidence score associated with the identified PHI entity in your audio. Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified entity correctly matches the entity spoken in your media.
        public let confidence: Double?
        /// The word or words identified as PHI.
        public let content: String?
        /// The end time, in seconds, of the utterance that was identified as PHI.
        public let endTime: Double?
        /// The start time, in seconds, of the utterance that was identified as PHI.
        public let startTime: Double?

        @inlinable
        public init(category: String? = nil, confidence: Double? = nil, content: String? = nil, endTime: Double? = nil, startTime: Double? = nil) {
            self.category = category
            self.confidence = confidence
            self.content = content
            self.endTime = endTime
            self.startTime = startTime
        }

        private enum CodingKeys: String, CodingKey {
            case category = "Category"
            case confidence = "Confidence"
            case content = "Content"
            case endTime = "EndTime"
            case startTime = "StartTime"
        }
    }

    public struct MedicalItem: AWSDecodableShape {
        /// The confidence score associated with a word or phrase in your transcript. Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified item correctly matches the item spoken in your media.
        public let confidence: Double?
        /// The word or punctuation that was transcribed.
        public let content: String?
        /// The end time, in seconds, of the transcribed item.
        public let endTime: Double?
        /// If speaker partitioning is enabled, Speaker labels the speaker of the specified item.
        public let speaker: String?
        /// The start time, in seconds, of the transcribed item.
        public let startTime: Double?
        /// The type of item identified. Options are: PRONUNCIATION (spoken  words) and PUNCTUATION.
        public let type: ItemType?

        @inlinable
        public init(confidence: Double? = nil, content: String? = nil, endTime: Double? = nil, speaker: String? = nil, startTime: Double? = nil, type: ItemType? = nil) {
            self.confidence = confidence
            self.content = content
            self.endTime = endTime
            self.speaker = speaker
            self.startTime = startTime
            self.type = type
        }

        private enum CodingKeys: String, CodingKey {
            case confidence = "Confidence"
            case content = "Content"
            case endTime = "EndTime"
            case speaker = "Speaker"
            case startTime = "StartTime"
            case type = "Type"
        }
    }

    public struct MedicalResult: AWSDecodableShape {
        /// A list of possible alternative transcriptions for the input audio. Each alternative may  contain one or more of Items, Entities, or Transcript.
        public let alternatives: [MedicalAlternative]?
        /// Indicates the channel identified for the Result.
        public let channelId: String?
        /// The end time, in seconds, of the Result.
        public let endTime: Double?
        /// Indicates if the segment is complete. If IsPartial is true, the segment is not complete. If IsPartial is false, the segment is complete.
        public let isPartial: Bool?
        /// Provides a unique identifier for the Result.
        public let resultId: String?
        /// The start time, in seconds, of the Result.
        public let startTime: Double?

        @inlinable
        public init(alternatives: [MedicalAlternative]? = nil, channelId: String? = nil, endTime: Double? = nil, isPartial: Bool? = nil, resultId: String? = nil, startTime: Double? = nil) {
            self.alternatives = alternatives
            self.channelId = channelId
            self.endTime = endTime
            self.isPartial = isPartial
            self.resultId = resultId
            self.startTime = startTime
        }

        private enum CodingKeys: String, CodingKey {
            case alternatives = "Alternatives"
            case channelId = "ChannelId"
            case endTime = "EndTime"
            case isPartial = "IsPartial"
            case resultId = "ResultId"
            case startTime = "StartTime"
        }
    }

    public struct MedicalScribeAudioEvent: AWSEncodableShape {
        ///  An audio blob containing the next segment of audio from your application, with a maximum duration of 1 second.  The maximum size in bytes varies based on audio properties.  Find recommended size in Transcribing streaming best practices.   Size calculation: Duration (s) * Sample Rate (Hz) * Number of Channels * 2 (Bytes per Sample)   For example, a 1-second chunk of 16 kHz, 2-channel, 16-bit audio would be  1 * 16000 * 2 * 2 = 64000 bytes.   For 8 kHz, 1-channel, 16-bit audio, a 1-second chunk would be  1 * 8000 * 1 * 2 = 16000 bytes.
        public let audioChunk: AWSEventPayload

        @inlinable
        public init(audioChunk: AWSEventPayload) {
            self.audioChunk = audioChunk
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct MedicalScribeChannelDefinition: AWSEncodableShape & AWSDecodableShape {
        /// Specify the audio channel you want to define.
        public let channelId: Int
        /// Specify the participant that you want to flag. The allowed options are CLINICIAN and PATIENT.
        public let participantRole: MedicalScribeParticipantRole

        @inlinable
        public init(channelId: Int, participantRole: MedicalScribeParticipantRole) {
            self.channelId = channelId
            self.participantRole = participantRole
        }

        public func validate(name: String) throws {
            try self.validate(self.channelId, name: "channelId", parent: name, max: 1)
            try self.validate(self.channelId, name: "channelId", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case channelId = "ChannelId"
            case participantRole = "ParticipantRole"
        }
    }

    public struct MedicalScribeConfigurationEvent: AWSEncodableShape {
        /// Specify which speaker is on which audio channel.
        public let channelDefinitions: [MedicalScribeChannelDefinition]?
        /// Specify the encryption settings for your streaming session.
        public let encryptionSettings: MedicalScribeEncryptionSettings?
        /// The MedicalScribeContext object that contains contextual information used to generate customized clinical notes.
        public let medicalScribeContext: MedicalScribeContext?
        /// Specify settings for post-stream analytics.
        public let postStreamAnalyticsSettings: MedicalScribePostStreamAnalyticsSettings
        /// The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 output bucket you specified, and use your KMS key if supplied. If the role that you specify doesn’t have the appropriate permissions, your request fails.   IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path. For example: arn:aws:iam::111122223333:role/Admin.  For more information, see Amazon Web Services HealthScribe.
        public let resourceAccessRoleArn: String
        /// Specify how you want your custom vocabulary filter applied to the streaming session. To replace words with ***, specify mask.  To delete words, specify remove.  To flag words without changing them, specify tag.
        public let vocabularyFilterMethod: MedicalScribeVocabularyFilterMethod?
        /// Specify the name of the custom vocabulary filter you want to include in your streaming session. Custom vocabulary filter names are case-sensitive.  If you include VocabularyFilterName in the MedicalScribeConfigurationEvent, you must also include VocabularyFilterMethod.
        public let vocabularyFilterName: String?
        /// Specify the name of the custom vocabulary you want to use for your streaming session. Custom vocabulary names are case-sensitive.
        public let vocabularyName: String?

        @inlinable
        public init(channelDefinitions: [MedicalScribeChannelDefinition]? = nil, encryptionSettings: MedicalScribeEncryptionSettings? = nil, medicalScribeContext: MedicalScribeContext? = nil, postStreamAnalyticsSettings: MedicalScribePostStreamAnalyticsSettings, resourceAccessRoleArn: String, vocabularyFilterMethod: MedicalScribeVocabularyFilterMethod? = nil, vocabularyFilterName: String? = nil, vocabularyName: String? = nil) {
            self.channelDefinitions = channelDefinitions
            self.encryptionSettings = encryptionSettings
            self.medicalScribeContext = medicalScribeContext
            self.postStreamAnalyticsSettings = postStreamAnalyticsSettings
            self.resourceAccessRoleArn = resourceAccessRoleArn
            self.vocabularyFilterMethod = vocabularyFilterMethod
            self.vocabularyFilterName = vocabularyFilterName
            self.vocabularyName = vocabularyName
        }

        public func validate(name: String) throws {
            try self.channelDefinitions?.forEach {
                try $0.validate(name: "\(name).channelDefinitions[]")
            }
            try self.validate(self.channelDefinitions, name: "channelDefinitions", parent: name, max: 2)
            try self.validate(self.channelDefinitions, name: "channelDefinitions", parent: name, min: 2)
            try self.encryptionSettings?.validate(name: "\(name).encryptionSettings")
            try self.postStreamAnalyticsSettings.validate(name: "\(name).postStreamAnalyticsSettings")
            try self.validate(self.resourceAccessRoleArn, name: "resourceAccessRoleArn", parent: name, max: 2048)
            try self.validate(self.resourceAccessRoleArn, name: "resourceAccessRoleArn", parent: name, min: 20)
            try self.validate(self.resourceAccessRoleArn, name: "resourceAccessRoleArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|aws-iso-{0,1}[a-z]{0,1}):iam::[0-9]{0,63}:role/[A-Za-z0-9:_/+=,@.-]{0,1024}$")
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, max: 200)
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, min: 1)
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, max: 200)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, min: 1)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
        }

        private enum CodingKeys: String, CodingKey {
            case channelDefinitions = "ChannelDefinitions"
            case encryptionSettings = "EncryptionSettings"
            case medicalScribeContext = "MedicalScribeContext"
            case postStreamAnalyticsSettings = "PostStreamAnalyticsSettings"
            case resourceAccessRoleArn = "ResourceAccessRoleArn"
            case vocabularyFilterMethod = "VocabularyFilterMethod"
            case vocabularyFilterName = "VocabularyFilterName"
            case vocabularyName = "VocabularyName"
        }
    }

    public struct MedicalScribeContext: AWSEncodableShape {
        /// Contains patient-specific information used to customize the clinical note generation.
        public let patientContext: MedicalScribePatientContext?

        @inlinable
        public init(patientContext: MedicalScribePatientContext? = nil) {
            self.patientContext = patientContext
        }

        private enum CodingKeys: String, CodingKey {
            case patientContext = "PatientContext"
        }
    }

    public struct MedicalScribeEncryptionSettings: AWSEncodableShape & AWSDecodableShape {
        /// A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added layer of security for your data. For more information, see KMSencryption context  and Asymmetric keys in KMS .
        public let kmsEncryptionContext: [String: String]?
        /// The ID of the KMS key you want to use for your streaming session. You can specify its KMS key ID, key Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with "alias/".  To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN. For example:   Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab   Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab    Alias name: alias/ExampleAlias    Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias       To get the key ID and key ARN for a KMS key, use the ListKeys or DescribeKey KMS API operations.  To get the alias name and alias ARN, use ListKeys API operation.
        public let kmsKeyId: String

        @inlinable
        public init(kmsEncryptionContext: [String: String]? = nil, kmsKeyId: String) {
            self.kmsEncryptionContext = kmsEncryptionContext
            self.kmsKeyId = kmsKeyId
        }

        public func validate(name: String) throws {
            try self.kmsEncryptionContext?.forEach {
                try validate($0.key, name: "kmsEncryptionContext.key", parent: name, max: 2000)
                try validate($0.key, name: "kmsEncryptionContext.key", parent: name, min: 1)
                try validate($0.key, name: "kmsEncryptionContext.key", parent: name, pattern: "\\S")
                try validate($0.value, name: "kmsEncryptionContext[\"\($0.key)\"]", parent: name, max: 2000)
                try validate($0.value, name: "kmsEncryptionContext[\"\($0.key)\"]", parent: name, min: 1)
                try validate($0.value, name: "kmsEncryptionContext[\"\($0.key)\"]", parent: name, pattern: "\\S")
            }
            try self.validate(self.kmsEncryptionContext, name: "kmsEncryptionContext", parent: name, max: 10)
            try self.validate(self.kmsEncryptionContext, name: "kmsEncryptionContext", parent: name, min: 1)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$")
        }

        private enum CodingKeys: String, CodingKey {
            case kmsEncryptionContext = "KmsEncryptionContext"
            case kmsKeyId = "KmsKeyId"
        }
    }

    public struct MedicalScribePatientContext: AWSEncodableShape {
        /// The patient's preferred pronouns that the user wants to provide as a context for clinical note generation .
        public let pronouns: Pronouns?

        @inlinable
        public init(pronouns: Pronouns? = nil) {
            self.pronouns = pronouns
        }

        private enum CodingKeys: String, CodingKey {
            case pronouns = "Pronouns"
        }
    }

    public struct MedicalScribePostStreamAnalyticsResult: AWSDecodableShape {
        /// Provides the Clinical Note Generation result for post-stream analytics.
        public let clinicalNoteGenerationResult: ClinicalNoteGenerationResult?

        @inlinable
        public init(clinicalNoteGenerationResult: ClinicalNoteGenerationResult? = nil) {
            self.clinicalNoteGenerationResult = clinicalNoteGenerationResult
        }

        private enum CodingKeys: String, CodingKey {
            case clinicalNoteGenerationResult = "ClinicalNoteGenerationResult"
        }
    }

    public struct MedicalScribePostStreamAnalyticsSettings: AWSEncodableShape & AWSDecodableShape {
        /// Specify settings for the post-stream clinical note generation.
        public let clinicalNoteGenerationSettings: ClinicalNoteGenerationSettings

        @inlinable
        public init(clinicalNoteGenerationSettings: ClinicalNoteGenerationSettings) {
            self.clinicalNoteGenerationSettings = clinicalNoteGenerationSettings
        }

        public func validate(name: String) throws {
            try self.clinicalNoteGenerationSettings.validate(name: "\(name).clinicalNoteGenerationSettings")
        }

        private enum CodingKeys: String, CodingKey {
            case clinicalNoteGenerationSettings = "ClinicalNoteGenerationSettings"
        }
    }

    public struct MedicalScribeSessionControlEvent: AWSEncodableShape {
        /// The type of MedicalScribeSessionControlEvent.  Possible Values:    END_OF_SESSION - Indicates the audio streaming is complete. After you send an END_OF_SESSION event, Amazon Web Services HealthScribe starts the post-stream analytics. The session can't be resumed after this event is sent. After Amazon Web Services HealthScribe processes the event, the real-time StreamStatus is COMPLETED. You get the StreamStatus and other stream details with the GetMedicalScribeStream API operation. For more information about different streaming statuses, see the StreamStatus description in the MedicalScribeStreamDetails.
        public let type: MedicalScribeSessionControlEventType

        @inlinable
        public init(type: MedicalScribeSessionControlEventType) {
            self.type = type
        }

        private enum CodingKeys: String, CodingKey {
            case type = "Type"
        }
    }

    public struct MedicalScribeStreamDetails: AWSDecodableShape {
        /// The Channel Definitions of the HealthScribe streaming session.
        public let channelDefinitions: [MedicalScribeChannelDefinition]?
        /// The Encryption Settings of the HealthScribe streaming session.
        public let encryptionSettings: MedicalScribeEncryptionSettings?
        /// The Language Code of the HealthScribe streaming session.
        public let languageCode: MedicalScribeLanguageCode?
        /// The Media Encoding of the HealthScribe streaming session.
        public let mediaEncoding: MedicalScribeMediaEncoding?
        /// The sample rate (in hertz) of the HealthScribe streaming session.
        public let mediaSampleRateHertz: Int?
        /// Indicates whether the MedicalScribeContext object was provided when the stream was started.
        public let medicalScribeContextProvided: Bool?
        /// The result of post-stream analytics for the HealthScribe streaming session.
        public let postStreamAnalyticsResult: MedicalScribePostStreamAnalyticsResult?
        /// The post-stream analytics settings of the HealthScribe streaming session.
        public let postStreamAnalyticsSettings: MedicalScribePostStreamAnalyticsSettings?
        /// The Amazon Resource Name (ARN) of the role used in the HealthScribe streaming session.
        public let resourceAccessRoleArn: String?
        /// The identifier of the HealthScribe streaming session.
        public let sessionId: String?
        /// The date and time when the HealthScribe streaming session was created.
        public let streamCreatedAt: Date?
        /// The date and time when the HealthScribe streaming session was ended.
        public let streamEndedAt: Date?
        /// The streaming status of the HealthScribe streaming session. Possible Values:    IN_PROGRESS     PAUSED     FAILED     COMPLETED     This status is specific to real-time streaming. A COMPLETED status doesn't mean that the post-stream analytics is complete. To get status of an analytics result, check the Status field for the analytics result within the MedicalScribePostStreamAnalyticsResult. For example, you can view the status of the  ClinicalNoteGenerationResult.
        public let streamStatus: MedicalScribeStreamStatus?
        /// The method of the vocabulary filter for the HealthScribe streaming session.
        public let vocabularyFilterMethod: MedicalScribeVocabularyFilterMethod?
        /// The name of the vocabulary filter used for the HealthScribe streaming session .
        public let vocabularyFilterName: String?
        /// The vocabulary name of the HealthScribe streaming session.
        public let vocabularyName: String?

        @inlinable
        public init(channelDefinitions: [MedicalScribeChannelDefinition]? = nil, encryptionSettings: MedicalScribeEncryptionSettings? = nil, languageCode: MedicalScribeLanguageCode? = nil, mediaEncoding: MedicalScribeMediaEncoding? = nil, mediaSampleRateHertz: Int? = nil, medicalScribeContextProvided: Bool? = nil, postStreamAnalyticsResult: MedicalScribePostStreamAnalyticsResult? = nil, postStreamAnalyticsSettings: MedicalScribePostStreamAnalyticsSettings? = nil, resourceAccessRoleArn: String? = nil, sessionId: String? = nil, streamCreatedAt: Date? = nil, streamEndedAt: Date? = nil, streamStatus: MedicalScribeStreamStatus? = nil, vocabularyFilterMethod: MedicalScribeVocabularyFilterMethod? = nil, vocabularyFilterName: String? = nil, vocabularyName: String? = nil) {
            self.channelDefinitions = channelDefinitions
            self.encryptionSettings = encryptionSettings
            self.languageCode = languageCode
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.medicalScribeContextProvided = medicalScribeContextProvided
            self.postStreamAnalyticsResult = postStreamAnalyticsResult
            self.postStreamAnalyticsSettings = postStreamAnalyticsSettings
            self.resourceAccessRoleArn = resourceAccessRoleArn
            self.sessionId = sessionId
            self.streamCreatedAt = streamCreatedAt
            self.streamEndedAt = streamEndedAt
            self.streamStatus = streamStatus
            self.vocabularyFilterMethod = vocabularyFilterMethod
            self.vocabularyFilterName = vocabularyFilterName
            self.vocabularyName = vocabularyName
        }

        private enum CodingKeys: String, CodingKey {
            case channelDefinitions = "ChannelDefinitions"
            case encryptionSettings = "EncryptionSettings"
            case languageCode = "LanguageCode"
            case mediaEncoding = "MediaEncoding"
            case mediaSampleRateHertz = "MediaSampleRateHertz"
            case medicalScribeContextProvided = "MedicalScribeContextProvided"
            case postStreamAnalyticsResult = "PostStreamAnalyticsResult"
            case postStreamAnalyticsSettings = "PostStreamAnalyticsSettings"
            case resourceAccessRoleArn = "ResourceAccessRoleArn"
            case sessionId = "SessionId"
            case streamCreatedAt = "StreamCreatedAt"
            case streamEndedAt = "StreamEndedAt"
            case streamStatus = "StreamStatus"
            case vocabularyFilterMethod = "VocabularyFilterMethod"
            case vocabularyFilterName = "VocabularyFilterName"
            case vocabularyName = "VocabularyName"
        }
    }

    public struct MedicalScribeTranscriptEvent: AWSDecodableShape {
        /// The TranscriptSegment associated with a MedicalScribeTranscriptEvent.
        public let transcriptSegment: MedicalScribeTranscriptSegment?

        @inlinable
        public init(transcriptSegment: MedicalScribeTranscriptSegment? = nil) {
            self.transcriptSegment = transcriptSegment
        }

        private enum CodingKeys: String, CodingKey {
            case transcriptSegment = "TranscriptSegment"
        }
    }

    public struct MedicalScribeTranscriptItem: AWSDecodableShape {
        /// The start time, in milliseconds, of the transcribed item.
        public let beginAudioTime: Double?
        /// The confidence score associated with a word or phrase in your transcript. Confidence scores are values between 0 and 1. A larger value indicates a higher probability that the identified item correctly matches the item spoken in your media.
        public let confidence: Double?
        /// The word, phrase or punctuation mark that was transcribed.
        public let content: String?
        /// The end time, in milliseconds, of the transcribed item.
        public let endAudioTime: Double?
        /// The type of item identified. Options are: PRONUNCIATION (spoken words) and PUNCTUATION.
        public let type: MedicalScribeTranscriptItemType?
        /// Indicates whether the specified item matches a word in the vocabulary filter included in your configuration event. If true, there is a vocabulary filter match.
        public let vocabularyFilterMatch: Bool?

        @inlinable
        public init(beginAudioTime: Double? = nil, confidence: Double? = nil, content: String? = nil, endAudioTime: Double? = nil, type: MedicalScribeTranscriptItemType? = nil, vocabularyFilterMatch: Bool? = nil) {
            self.beginAudioTime = beginAudioTime
            self.confidence = confidence
            self.content = content
            self.endAudioTime = endAudioTime
            self.type = type
            self.vocabularyFilterMatch = vocabularyFilterMatch
        }

        private enum CodingKeys: String, CodingKey {
            case beginAudioTime = "BeginAudioTime"
            case confidence = "Confidence"
            case content = "Content"
            case endAudioTime = "EndAudioTime"
            case type = "Type"
            case vocabularyFilterMatch = "VocabularyFilterMatch"
        }
    }

    public struct MedicalScribeTranscriptSegment: AWSDecodableShape {
        /// The start time, in milliseconds, of the segment.
        public let beginAudioTime: Double?
        /// Indicates which audio channel is associated with the MedicalScribeTranscriptSegment.  If MedicalScribeChannelDefinition is not provided in the MedicalScribeConfigurationEvent, then this field will not be included.
        public let channelId: String?
        /// Contains transcribed text of the segment.
        public let content: String?
        /// The end time, in milliseconds, of the segment.
        public let endAudioTime: Double?
        /// Indicates if the segment is complete. If IsPartial is true, the segment is not complete. If IsPartial is false, the segment is complete.
        public let isPartial: Bool?
        /// Contains words, phrases, or punctuation marks in your segment.
        public let items: [MedicalScribeTranscriptItem]?
        /// The identifier of the segment.
        public let segmentId: String?

        @inlinable
        public init(beginAudioTime: Double? = nil, channelId: String? = nil, content: String? = nil, endAudioTime: Double? = nil, isPartial: Bool? = nil, items: [MedicalScribeTranscriptItem]? = nil, segmentId: String? = nil) {
            self.beginAudioTime = beginAudioTime
            self.channelId = channelId
            self.content = content
            self.endAudioTime = endAudioTime
            self.isPartial = isPartial
            self.items = items
            self.segmentId = segmentId
        }

        private enum CodingKeys: String, CodingKey {
            case beginAudioTime = "BeginAudioTime"
            case channelId = "ChannelId"
            case content = "Content"
            case endAudioTime = "EndAudioTime"
            case isPartial = "IsPartial"
            case items = "Items"
            case segmentId = "SegmentId"
        }
    }

    public struct MedicalTranscript: AWSDecodableShape {
        /// Contains a set of transcription results from one or more audio segments, along with  additional information per your request parameters. This can include information relating to  alternative transcriptions, channel identification, partial result stabilization, language  identification, and other transcription-related data.
        public let results: [MedicalResult]?

        @inlinable
        public init(results: [MedicalResult]? = nil) {
            self.results = results
        }

        private enum CodingKeys: String, CodingKey {
            case results = "Results"
        }
    }

    public struct MedicalTranscriptEvent: AWSDecodableShape {
        /// Contains Results, which contains a set of transcription results from one or  more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result  stabilization, language identification, and other transcription-related data.
        public let transcript: MedicalTranscript?

        @inlinable
        public init(transcript: MedicalTranscript? = nil) {
            self.transcript = transcript
        }

        private enum CodingKeys: String, CodingKey {
            case transcript = "Transcript"
        }
    }

    public struct PointsOfInterest: AWSDecodableShape {
        /// Contains the timestamp ranges (start time through end time) of matched categories and rules.
        public let timestampRanges: [TimestampRange]?

        @inlinable
        public init(timestampRanges: [TimestampRange]? = nil) {
            self.timestampRanges = timestampRanges
        }

        private enum CodingKeys: String, CodingKey {
            case timestampRanges = "TimestampRanges"
        }
    }

    public struct PostCallAnalyticsSettings: AWSEncodableShape {
        /// Specify whether you want only a redacted transcript or both a redacted and an unredacted  transcript. If you choose redacted and unredacted, two JSON files are generated and stored in the  Amazon S3 output location you specify. Note that to include ContentRedactionOutput in your request, you must  enable content redaction (ContentRedactionType).
        public let contentRedactionOutput: ContentRedactionOutput?
        /// The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3  location, your request fails. IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path. For example: arn:aws:iam::111122223333:role/Admin. For more information, see IAM ARNs.
        public let dataAccessRoleArn: String
        /// The KMS key you want to use to encrypt your Call Analytics post-call output. If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of four ways:   Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.   Use an alias for the KMS key ID. For example, alias/ExampleAlias.   Use the Amazon Resource Name (ARN) for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.   Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias.   If using a key located in a different Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways:   Use the ARN for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.   Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias.   Note that the role making the  request must have permission to use the specified KMS key.
        public let outputEncryptionKMSKeyId: String?
        /// The Amazon S3 location where you want your Call Analytics post-call  transcription output stored. You can use any of the following formats to specify the output  location:   s3://DOC-EXAMPLE-BUCKET   s3://DOC-EXAMPLE-BUCKET/my-output-folder/   s3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json
        public let outputLocation: String

        @inlinable
        public init(contentRedactionOutput: ContentRedactionOutput? = nil, dataAccessRoleArn: String, outputEncryptionKMSKeyId: String? = nil, outputLocation: String) {
            self.contentRedactionOutput = contentRedactionOutput
            self.dataAccessRoleArn = dataAccessRoleArn
            self.outputEncryptionKMSKeyId = outputEncryptionKMSKeyId
            self.outputLocation = outputLocation
        }

        private enum CodingKeys: String, CodingKey {
            case contentRedactionOutput = "ContentRedactionOutput"
            case dataAccessRoleArn = "DataAccessRoleArn"
            case outputEncryptionKMSKeyId = "OutputEncryptionKMSKeyId"
            case outputLocation = "OutputLocation"
        }
    }

    public struct Result: AWSDecodableShape {
        /// A list of possible alternative transcriptions for the input audio. Each alternative may contain one or more of Items, Entities, or Transcript.
        public let alternatives: [Alternative]?
        /// Indicates which audio channel is associated with the Result.
        public let channelId: String?
        /// The end time of the Result in seconds, with millisecond precision (e.g., 1.056).
        public let endTime: Double?
        /// Indicates if the segment is complete. If IsPartial is true, the segment is not complete. If IsPartial is false, the segment is complete.
        public let isPartial: Bool?
        /// The language code that represents the language spoken in your audio stream.
        public let languageCode: LanguageCode?
        /// The language code of the dominant language identified in your stream. If you enabled channel identification and each channel of your audio contains a different language, you may have more than one result.
        public let languageIdentification: [LanguageWithScore]?
        /// Provides a unique identifier for the Result.
        public let resultId: String?
        /// The start time of the Result in seconds, with millisecond precision (e.g., 1.056).
        public let startTime: Double?

        @inlinable
        public init(alternatives: [Alternative]? = nil, channelId: String? = nil, endTime: Double? = nil, isPartial: Bool? = nil, languageCode: LanguageCode? = nil, languageIdentification: [LanguageWithScore]? = nil, resultId: String? = nil, startTime: Double? = nil) {
            self.alternatives = alternatives
            self.channelId = channelId
            self.endTime = endTime
            self.isPartial = isPartial
            self.languageCode = languageCode
            self.languageIdentification = languageIdentification
            self.resultId = resultId
            self.startTime = startTime
        }

        private enum CodingKeys: String, CodingKey {
            case alternatives = "Alternatives"
            case channelId = "ChannelId"
            case endTime = "EndTime"
            case isPartial = "IsPartial"
            case languageCode = "LanguageCode"
            case languageIdentification = "LanguageIdentification"
            case resultId = "ResultId"
            case startTime = "StartTime"
        }
    }

    public struct ServiceUnavailableException: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct StartCallAnalyticsStreamTranscriptionRequest: AWSEncodableShape {
        /// An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket  data frames. For more information, see Transcribing streaming audio.
        public let audioStream: AWSEventStream<AudioStream>
        /// Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in  PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is identified. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information.
        public let contentIdentificationType: ContentIdentificationType?
        /// Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in  PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is redacted. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information.
        public let contentRedactionType: ContentRedactionType?
        /// Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see  Partial-result  stabilization.
        public let enablePartialResultsStabilization: Bool?
        /// Enables automatic language identification for your Call Analytics transcription. If you include IdentifyLanguage, you must include a list of language codes, using LanguageOptions, that you think may be present in  your audio stream. You must provide a minimum of two language selections. You can also include a preferred language using PreferredLanguage. Adding a  preferred language can help Amazon Transcribe identify the language faster than if you omit this  parameter. Note that you must include either LanguageCode or  IdentifyLanguage in your request. If you include both parameters, your transcription job fails.
        public let identifyLanguage: Bool?
        /// Specify the language code that represents the language spoken in your audio. For a list of languages supported with real-time Call Analytics, refer to the  Supported  languages table.
        public let languageCode: CallAnalyticsLanguageCode?
        /// Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied.  There are no errors or warnings associated with a language mismatch. For more information, see Custom language models.
        public let languageModelName: String?
        /// Specify two or more language codes that represent the languages you think may be present  in your media. Including language options can improve the accuracy of language identification. If you include LanguageOptions in your request, you must also include  IdentifyLanguage. For a list of languages supported with Call Analytics streaming, refer to the  Supported  languages table.  You can only include one language dialect per language per stream. For example, you cannot include en-US and en-AU in the same request.
        public let languageOptions: String?
        /// Specify the encoding of your input audio. Supported formats are:   FLAC   OPUS-encoded audio in an Ogg container   PCM (only signed 16-bit little-endian audio formats, which does not include WAV)   For more information, see Media formats.
        public let mediaEncoding: MediaEncoding
        /// The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.
        public let mediaSampleRateHertz: Int
        /// Specify the level of stability to use when you enable partial results stabilization  (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result  stabilization.
        public let partialResultsStability: PartialResultsStability?
        /// Specify which types of personally identifiable information (PII) you want to redact in your  transcript. You can include as many types as you'd like, or you can select  ALL. Values must be comma-separated and can include: ADDRESS,  BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL,  NAME, PHONE, PIN,  SSN, or ALL. Note that if you include PiiEntityTypes in your request, you must also include  ContentIdentificationType or ContentRedactionType. If you include ContentRedactionType or  ContentIdentificationType in your request, but do not include  PiiEntityTypes, all PII is redacted or identified.
        public let piiEntityTypes: String?
        /// Specify a preferred language from the subset of languages codes you specified in  LanguageOptions. You can only use this parameter if you've included IdentifyLanguage and LanguageOptions in your request.
        public let preferredLanguage: CallAnalyticsLanguageCode?
        /// Specify a name for your Call Analytics transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response.
        public let sessionId: String?
        /// Specify how you want your vocabulary filter applied to your transcript. To replace words with ***, choose mask. To delete words, choose remove. To flag words without changing them, choose tag.
        public let vocabularyFilterMethod: VocabularyFilterMethod?
        /// Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive. If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription. For more information, see Using vocabulary filtering with unwanted  words.
        public let vocabularyFilterName: String?
        /// Specify the names of the custom vocabulary filters that you want to use when processing your Call Analytics transcription. Note that vocabulary filter names are case sensitive. These filters serve to customize the transcript output.  This parameter is only intended for use with  the IdentifyLanguage parameter. If you're not  including IdentifyLanguage in your request and want to use a custom vocabulary filter  with your transcription, use the VocabularyFilterName parameter instead.  For more information, see Using vocabulary filtering with unwanted  words.
        public let vocabularyFilterNames: String?
        /// Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive. If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription. For more information, see Custom vocabularies.
        public let vocabularyName: String?
        /// Specify the names of the custom vocabularies that you want to use when processing your Call Analytics transcription. Note that vocabulary names are case sensitive. If the custom vocabulary's language doesn't match the identified media language, it won't be applied to the transcription.  This parameter is only intended for use with the IdentifyLanguage parameter. If you're not including IdentifyLanguage in your request and want to use a custom vocabulary with your transcription, use the VocabularyName parameter instead.  For more information, see Custom vocabularies.
        public let vocabularyNames: String?

        @inlinable
        public init(audioStream: AWSEventStream<AudioStream>, contentIdentificationType: ContentIdentificationType? = nil, contentRedactionType: ContentRedactionType? = nil, enablePartialResultsStabilization: Bool? = nil, identifyLanguage: Bool? = nil, languageCode: CallAnalyticsLanguageCode? = nil, languageModelName: String? = nil, languageOptions: String? = nil, mediaEncoding: MediaEncoding, mediaSampleRateHertz: Int, partialResultsStability: PartialResultsStability? = nil, piiEntityTypes: String? = nil, preferredLanguage: CallAnalyticsLanguageCode? = nil, sessionId: String? = nil, vocabularyFilterMethod: VocabularyFilterMethod? = nil, vocabularyFilterName: String? = nil, vocabularyFilterNames: String? = nil, vocabularyName: String? = nil, vocabularyNames: String? = nil) {
            self.audioStream = audioStream
            self.contentIdentificationType = contentIdentificationType
            self.contentRedactionType = contentRedactionType
            self.enablePartialResultsStabilization = enablePartialResultsStabilization
            self.identifyLanguage = identifyLanguage
            self.languageCode = languageCode
            self.languageModelName = languageModelName
            self.languageOptions = languageOptions
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.partialResultsStability = partialResultsStability
            self.piiEntityTypes = piiEntityTypes
            self.preferredLanguage = preferredLanguage
            self.sessionId = sessionId
            self.vocabularyFilterMethod = vocabularyFilterMethod
            self.vocabularyFilterName = vocabularyFilterName
            self.vocabularyFilterNames = vocabularyFilterNames
            self.vocabularyName = vocabularyName
            self.vocabularyNames = vocabularyNames
        }

        public func encode(to encoder: Encoder) throws {
            let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer
            var container = encoder.singleValueContainer()
            try container.encode(self.audioStream)
            request.encodeHeader(self.contentIdentificationType, key: "x-amzn-transcribe-content-identification-type")
            request.encodeHeader(self.contentRedactionType, key: "x-amzn-transcribe-content-redaction-type")
            request.encodeHeader(self.enablePartialResultsStabilization, key: "x-amzn-transcribe-enable-partial-results-stabilization")
            request.encodeHeader(self.identifyLanguage, key: "x-amzn-transcribe-identify-language")
            request.encodeHeader(self.languageCode, key: "x-amzn-transcribe-language-code")
            request.encodeHeader(self.languageModelName, key: "x-amzn-transcribe-language-model-name")
            request.encodeHeader(self.languageOptions, key: "x-amzn-transcribe-language-options")
            request.encodeHeader(self.mediaEncoding, key: "x-amzn-transcribe-media-encoding")
            request.encodeHeader(self.mediaSampleRateHertz, key: "x-amzn-transcribe-sample-rate")
            request.encodeHeader(self.partialResultsStability, key: "x-amzn-transcribe-partial-results-stability")
            request.encodeHeader(self.piiEntityTypes, key: "x-amzn-transcribe-pii-entity-types")
            request.encodeHeader(self.preferredLanguage, key: "x-amzn-transcribe-preferred-language")
            request.encodeHeader(self.sessionId, key: "x-amzn-transcribe-session-id")
            request.encodeHeader(self.vocabularyFilterMethod, key: "x-amzn-transcribe-vocabulary-filter-method")
            request.encodeHeader(self.vocabularyFilterName, key: "x-amzn-transcribe-vocabulary-filter-name")
            request.encodeHeader(self.vocabularyFilterNames, key: "x-amzn-transcribe-vocabulary-filter-names")
            request.encodeHeader(self.vocabularyName, key: "x-amzn-transcribe-vocabulary-name")
            request.encodeHeader(self.vocabularyNames, key: "x-amzn-transcribe-vocabulary-names")
        }

        public func validate(name: String) throws {
            try self.validate(self.languageModelName, name: "languageModelName", parent: name, max: 200)
            try self.validate(self.languageModelName, name: "languageModelName", parent: name, min: 1)
            try self.validate(self.languageModelName, name: "languageModelName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
            try self.validate(self.languageOptions, name: "languageOptions", parent: name, max: 200)
            try self.validate(self.languageOptions, name: "languageOptions", parent: name, min: 1)
            try self.validate(self.languageOptions, name: "languageOptions", parent: name, pattern: "^[a-zA-Z-,]+$")
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, max: 48000)
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, min: 8000)
            try self.validate(self.piiEntityTypes, name: "piiEntityTypes", parent: name, max: 300)
            try self.validate(self.piiEntityTypes, name: "piiEntityTypes", parent: name, min: 1)
            try self.validate(self.piiEntityTypes, name: "piiEntityTypes", parent: name, pattern: "^[A-Z_, ]+$")
            try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, max: 200)
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, min: 1)
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
            try self.validate(self.vocabularyFilterNames, name: "vocabularyFilterNames", parent: name, max: 3000)
            try self.validate(self.vocabularyFilterNames, name: "vocabularyFilterNames", parent: name, min: 1)
            try self.validate(self.vocabularyFilterNames, name: "vocabularyFilterNames", parent: name, pattern: "^[a-zA-Z0-9,-._]+$")
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, max: 200)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, min: 1)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
            try self.validate(self.vocabularyNames, name: "vocabularyNames", parent: name, max: 3000)
            try self.validate(self.vocabularyNames, name: "vocabularyNames", parent: name, min: 1)
            try self.validate(self.vocabularyNames, name: "vocabularyNames", parent: name, pattern: "^[a-zA-Z0-9,-._]+$")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct StartCallAnalyticsStreamTranscriptionResponse: AWSDecodableShape {
        public static let _options: AWSShapeOptions = [.rawPayload]
        /// Provides detailed information about your real-time Call Analytics session.
        public let callAnalyticsTranscriptResultStream: AWSEventStream<CallAnalyticsTranscriptResultStream>
        /// Shows whether content identification was enabled for your Call Analytics transcription.
        public let contentIdentificationType: ContentIdentificationType?
        /// Shows whether content redaction was enabled for your Call Analytics transcription.
        public let contentRedactionType: ContentRedactionType?
        /// Shows whether partial results stabilization was enabled for your Call Analytics transcription.
        public let enablePartialResultsStabilization: Bool?
        /// Shows whether automatic language identification was enabled for your Call Analytics transcription.
        public let identifyLanguage: Bool?
        /// Provides the language code that you specified in your Call Analytics request.
        public let languageCode: CallAnalyticsLanguageCode?
        /// Provides the name of the custom language model that you specified in your Call Analytics  request.
        public let languageModelName: String?
        /// Provides the language codes that you specified in your Call Analytics request.
        public let languageOptions: String?
        /// Provides the media encoding you specified in your Call Analytics request.
        public let mediaEncoding: MediaEncoding?
        /// Provides the sample rate that you specified in your Call Analytics request.
        public let mediaSampleRateHertz: Int?
        /// Provides the stabilization level used for your transcription.
        public let partialResultsStability: PartialResultsStability?
        /// Lists the PII entity types you specified in your Call Analytics request.
        public let piiEntityTypes: String?
        /// Provides the preferred language that you specified in your Call Analytics request.
        public let preferredLanguage: CallAnalyticsLanguageCode?
        /// Provides the identifier for your real-time Call Analytics request.
        public let requestId: String?
        /// Provides the identifier for your Call Analytics transcription session.
        public let sessionId: String?
        /// Provides the vocabulary filtering method used in your Call Analytics transcription.
        public let vocabularyFilterMethod: VocabularyFilterMethod?
        /// Provides the name of the custom vocabulary filter that you specified in your Call Analytics request.
        public let vocabularyFilterName: String?
        /// Provides the names of the custom vocabulary filters that you specified in your Call Analytics request.
        public let vocabularyFilterNames: String?
        /// Provides the name of the custom vocabulary that you specified in your Call Analytics request.
        public let vocabularyName: String?
        /// Provides the names of the custom vocabularies that you specified in your Call Analytics request.
        public let vocabularyNames: String?

        @inlinable
        public init(callAnalyticsTranscriptResultStream: AWSEventStream<CallAnalyticsTranscriptResultStream>, contentIdentificationType: ContentIdentificationType? = nil, contentRedactionType: ContentRedactionType? = nil, enablePartialResultsStabilization: Bool? = nil, identifyLanguage: Bool? = nil, languageCode: CallAnalyticsLanguageCode? = nil, languageModelName: String? = nil, languageOptions: String? = nil, mediaEncoding: MediaEncoding? = nil, mediaSampleRateHertz: Int? = nil, partialResultsStability: PartialResultsStability? = nil, piiEntityTypes: String? = nil, preferredLanguage: CallAnalyticsLanguageCode? = nil, requestId: String? = nil, sessionId: String? = nil, vocabularyFilterMethod: VocabularyFilterMethod? = nil, vocabularyFilterName: String? = nil, vocabularyFilterNames: String? = nil, vocabularyName: String? = nil, vocabularyNames: String? = nil) {
            self.callAnalyticsTranscriptResultStream = callAnalyticsTranscriptResultStream
            self.contentIdentificationType = contentIdentificationType
            self.contentRedactionType = contentRedactionType
            self.enablePartialResultsStabilization = enablePartialResultsStabilization
            self.identifyLanguage = identifyLanguage
            self.languageCode = languageCode
            self.languageModelName = languageModelName
            self.languageOptions = languageOptions
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.partialResultsStability = partialResultsStability
            self.piiEntityTypes = piiEntityTypes
            self.preferredLanguage = preferredLanguage
            self.requestId = requestId
            self.sessionId = sessionId
            self.vocabularyFilterMethod = vocabularyFilterMethod
            self.vocabularyFilterName = vocabularyFilterName
            self.vocabularyFilterNames = vocabularyFilterNames
            self.vocabularyName = vocabularyName
            self.vocabularyNames = vocabularyNames
        }

        public init(from decoder: Decoder) throws {
            let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer
            let container = try decoder.singleValueContainer()
            self.callAnalyticsTranscriptResultStream = try container.decode(AWSEventStream<CallAnalyticsTranscriptResultStream>.self)
            self.contentIdentificationType = try response.decodeHeaderIfPresent(ContentIdentificationType.self, key: "x-amzn-transcribe-content-identification-type")
            self.contentRedactionType = try response.decodeHeaderIfPresent(ContentRedactionType.self, key: "x-amzn-transcribe-content-redaction-type")
            self.enablePartialResultsStabilization = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-enable-partial-results-stabilization")
            self.identifyLanguage = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-identify-language")
            self.languageCode = try response.decodeHeaderIfPresent(CallAnalyticsLanguageCode.self, key: "x-amzn-transcribe-language-code")
            self.languageModelName = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-language-model-name")
            self.languageOptions = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-language-options")
            self.mediaEncoding = try response.decodeHeaderIfPresent(MediaEncoding.self, key: "x-amzn-transcribe-media-encoding")
            self.mediaSampleRateHertz = try response.decodeHeaderIfPresent(Int.self, key: "x-amzn-transcribe-sample-rate")
            self.partialResultsStability = try response.decodeHeaderIfPresent(PartialResultsStability.self, key: "x-amzn-transcribe-partial-results-stability")
            self.piiEntityTypes = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-pii-entity-types")
            self.preferredLanguage = try response.decodeHeaderIfPresent(CallAnalyticsLanguageCode.self, key: "x-amzn-transcribe-preferred-language")
            self.requestId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-request-id")
            self.sessionId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-session-id")
            self.vocabularyFilterMethod = try response.decodeHeaderIfPresent(VocabularyFilterMethod.self, key: "x-amzn-transcribe-vocabulary-filter-method")
            self.vocabularyFilterName = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-filter-name")
            self.vocabularyFilterNames = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-filter-names")
            self.vocabularyName = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-name")
            self.vocabularyNames = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-names")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct StartMedicalScribeStreamRequest: AWSEncodableShape {
        /// Specify the input stream where you will send events in real time. The first element of the input stream must be a MedicalScribeConfigurationEvent.
        public let inputStream: AWSEventStream<MedicalScribeInputStream>
        /// Specify the language code for your HealthScribe streaming session.
        public let languageCode: MedicalScribeLanguageCode
        /// Specify the encoding used for the input audio. Supported formats are:   FLAC   OPUS-encoded audio in an Ogg container   PCM (only signed 16-bit little-endian audio formats, which does not include WAV)    For more information, see Media formats.
        public let mediaEncoding: MedicalScribeMediaEncoding
        /// Specify the sample rate of the input audio (in hertz). Amazon Web Services HealthScribe supports a range from 16,000 Hz to 48,000 Hz. The sample rate you specify must match that of your audio.
        public let mediaSampleRateHertz: Int
        /// Specify an identifier for your streaming session (in UUID format). If you don't include a SessionId in your request, Amazon Web Services HealthScribe generates an ID and returns it in the response.
        public let sessionId: String?

        @inlinable
        public init(inputStream: AWSEventStream<MedicalScribeInputStream>, languageCode: MedicalScribeLanguageCode, mediaEncoding: MedicalScribeMediaEncoding, mediaSampleRateHertz: Int, sessionId: String? = nil) {
            self.inputStream = inputStream
            self.languageCode = languageCode
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.sessionId = sessionId
        }

        public func encode(to encoder: Encoder) throws {
            let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer
            var container = encoder.singleValueContainer()
            try container.encode(self.inputStream)
            request.encodeHeader(self.languageCode, key: "x-amzn-transcribe-language-code")
            request.encodeHeader(self.mediaEncoding, key: "x-amzn-transcribe-media-encoding")
            request.encodeHeader(self.mediaSampleRateHertz, key: "x-amzn-transcribe-sample-rate")
            request.encodeHeader(self.sessionId, key: "x-amzn-transcribe-session-id")
        }

        public func validate(name: String) throws {
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, max: 48000)
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, min: 16000)
            try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct StartMedicalScribeStreamResponse: AWSDecodableShape {
        public static let _options: AWSShapeOptions = [.rawPayload]
        /// The Language Code that you specified in your request. Same as provided in the StartMedicalScribeStreamRequest.
        public let languageCode: MedicalScribeLanguageCode?
        /// The Media Encoding you specified in your request. Same as provided in the StartMedicalScribeStreamRequest
        public let mediaEncoding: MedicalScribeMediaEncoding?
        /// The sample rate (in hertz) that you specified in your request. Same as provided in the StartMedicalScribeStreamRequest
        public let mediaSampleRateHertz: Int?
        /// The unique identifier for your streaming request.
        public let requestId: String?
        /// The result stream where you will receive the output events.
        public let resultStream: AWSEventStream<MedicalScribeResultStream>
        /// The identifier (in UUID format) for your streaming session. If you already started streaming, this is same ID as the one you specified in your initial StartMedicalScribeStreamRequest.
        public let sessionId: String?

        @inlinable
        public init(languageCode: MedicalScribeLanguageCode? = nil, mediaEncoding: MedicalScribeMediaEncoding? = nil, mediaSampleRateHertz: Int? = nil, requestId: String? = nil, resultStream: AWSEventStream<MedicalScribeResultStream>, sessionId: String? = nil) {
            self.languageCode = languageCode
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.requestId = requestId
            self.resultStream = resultStream
            self.sessionId = sessionId
        }

        public init(from decoder: Decoder) throws {
            let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer
            let container = try decoder.singleValueContainer()
            self.languageCode = try response.decodeHeaderIfPresent(MedicalScribeLanguageCode.self, key: "x-amzn-transcribe-language-code")
            self.mediaEncoding = try response.decodeHeaderIfPresent(MedicalScribeMediaEncoding.self, key: "x-amzn-transcribe-media-encoding")
            self.mediaSampleRateHertz = try response.decodeHeaderIfPresent(Int.self, key: "x-amzn-transcribe-sample-rate")
            self.requestId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-request-id")
            self.resultStream = try container.decode(AWSEventStream<MedicalScribeResultStream>.self)
            self.sessionId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-session-id")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct StartMedicalStreamTranscriptionRequest: AWSEncodableShape {
        public let audioStream: AWSEventStream<AudioStream>
        /// Labels all personal health information (PHI) identified in your transcript. Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment. For more information, see Identifying personal health information (PHI) in a transcription.
        public let contentIdentificationType: MedicalContentIdentificationType?
        /// Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is  transcribed in a continuous manner and your transcript is not separated by channel. If you include EnableChannelIdentification in your request, you must also  include NumberOfChannels. For more information, see Transcribing multi-channel audio.
        public let enableChannelIdentification: Bool?
        /// Specify the language code that represents the language spoken in your audio.  Amazon Transcribe Medical only supports US English (en-US).
        public let languageCode: LanguageCode
        /// Specify the encoding used for the input audio. Supported formats are:   FLAC   OPUS-encoded audio in an Ogg container   PCM (only signed 16-bit little-endian audio formats, which does not include WAV)   For more information, see Media formats.
        public let mediaEncoding: MediaEncoding
        /// The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.
        public let mediaSampleRateHertz: Int
        /// Specify the number of channels in your audio stream. This value must be  2, as only two channels are supported. If your audio doesn't contain  multiple channels, do not include this parameter in your request. If you include NumberOfChannels in your request, you must also  include EnableChannelIdentification.
        public let numberOfChannels: Int?
        /// Specify a name for your transcription session. If you don't include this parameter in  your request, Amazon Transcribe Medical generates an ID and returns it in the response.
        public let sessionId: String?
        /// Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file. For more information, see Partitioning speakers (diarization).
        public let showSpeakerLabel: Bool?
        /// Specify the medical specialty contained in your audio.
        public let specialty: Specialty
        /// Specify the type of input audio. For example, choose DICTATION for a  provider dictating patient notes and CONVERSATION for a dialogue between a patient and a medical professional.
        public let type: `Type`
        /// Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive.
        public let vocabularyName: String?

        @inlinable
        public init(audioStream: AWSEventStream<AudioStream>, contentIdentificationType: MedicalContentIdentificationType? = nil, enableChannelIdentification: Bool? = nil, languageCode: LanguageCode, mediaEncoding: MediaEncoding, mediaSampleRateHertz: Int, numberOfChannels: Int? = nil, sessionId: String? = nil, showSpeakerLabel: Bool? = nil, specialty: Specialty, type: `Type`, vocabularyName: String? = nil) {
            self.audioStream = audioStream
            self.contentIdentificationType = contentIdentificationType
            self.enableChannelIdentification = enableChannelIdentification
            self.languageCode = languageCode
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.numberOfChannels = numberOfChannels
            self.sessionId = sessionId
            self.showSpeakerLabel = showSpeakerLabel
            self.specialty = specialty
            self.type = type
            self.vocabularyName = vocabularyName
        }

        public func encode(to encoder: Encoder) throws {
            let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer
            var container = encoder.singleValueContainer()
            try container.encode(self.audioStream)
            request.encodeHeader(self.contentIdentificationType, key: "x-amzn-transcribe-content-identification-type")
            request.encodeHeader(self.enableChannelIdentification, key: "x-amzn-transcribe-enable-channel-identification")
            request.encodeHeader(self.languageCode, key: "x-amzn-transcribe-language-code")
            request.encodeHeader(self.mediaEncoding, key: "x-amzn-transcribe-media-encoding")
            request.encodeHeader(self.mediaSampleRateHertz, key: "x-amzn-transcribe-sample-rate")
            request.encodeHeader(self.numberOfChannels, key: "x-amzn-transcribe-number-of-channels")
            request.encodeHeader(self.sessionId, key: "x-amzn-transcribe-session-id")
            request.encodeHeader(self.showSpeakerLabel, key: "x-amzn-transcribe-show-speaker-label")
            request.encodeHeader(self.specialty, key: "x-amzn-transcribe-specialty")
            request.encodeHeader(self.type, key: "x-amzn-transcribe-type")
            request.encodeHeader(self.vocabularyName, key: "x-amzn-transcribe-vocabulary-name")
        }

        public func validate(name: String) throws {
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, max: 48000)
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, min: 8000)
            try self.validate(self.numberOfChannels, name: "numberOfChannels", parent: name, min: 2)
            try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, max: 200)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, min: 1)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct StartMedicalStreamTranscriptionResponse: AWSDecodableShape {
        public static let _options: AWSShapeOptions = [.rawPayload]
        /// Shows whether content identification was enabled for your transcription.
        public let contentIdentificationType: MedicalContentIdentificationType?
        /// Shows whether channel identification was enabled for your transcription.
        public let enableChannelIdentification: Bool?
        /// Provides the language code that you specified in your request. This must be en-US.
        public let languageCode: LanguageCode?
        /// Provides the media encoding you specified in your request.
        public let mediaEncoding: MediaEncoding?
        /// Provides the sample rate that you specified in your request.
        public let mediaSampleRateHertz: Int?
        /// Provides the number of channels that you specified in your request.
        public let numberOfChannels: Int?
        /// Provides the identifier for your streaming request.
        public let requestId: String?
        /// Provides the identifier for your transcription session.
        public let sessionId: String?
        /// Shows whether speaker partitioning was enabled for your transcription.
        public let showSpeakerLabel: Bool?
        /// Provides the medical specialty that you specified in your request.
        public let specialty: Specialty?
        /// Provides detailed information about your streaming session.
        public let transcriptResultStream: AWSEventStream<MedicalTranscriptResultStream>
        /// Provides the type of audio you specified in your request.
        public let type: `Type`?
        /// Provides the name of the custom vocabulary that you specified in your request.
        public let vocabularyName: String?

        @inlinable
        public init(contentIdentificationType: MedicalContentIdentificationType? = nil, enableChannelIdentification: Bool? = nil, languageCode: LanguageCode? = nil, mediaEncoding: MediaEncoding? = nil, mediaSampleRateHertz: Int? = nil, numberOfChannels: Int? = nil, requestId: String? = nil, sessionId: String? = nil, showSpeakerLabel: Bool? = nil, specialty: Specialty? = nil, transcriptResultStream: AWSEventStream<MedicalTranscriptResultStream>, type: `Type`? = nil, vocabularyName: String? = nil) {
            self.contentIdentificationType = contentIdentificationType
            self.enableChannelIdentification = enableChannelIdentification
            self.languageCode = languageCode
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.numberOfChannels = numberOfChannels
            self.requestId = requestId
            self.sessionId = sessionId
            self.showSpeakerLabel = showSpeakerLabel
            self.specialty = specialty
            self.transcriptResultStream = transcriptResultStream
            self.type = type
            self.vocabularyName = vocabularyName
        }

        public init(from decoder: Decoder) throws {
            let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer
            let container = try decoder.singleValueContainer()
            self.contentIdentificationType = try response.decodeHeaderIfPresent(MedicalContentIdentificationType.self, key: "x-amzn-transcribe-content-identification-type")
            self.enableChannelIdentification = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-enable-channel-identification")
            self.languageCode = try response.decodeHeaderIfPresent(LanguageCode.self, key: "x-amzn-transcribe-language-code")
            self.mediaEncoding = try response.decodeHeaderIfPresent(MediaEncoding.self, key: "x-amzn-transcribe-media-encoding")
            self.mediaSampleRateHertz = try response.decodeHeaderIfPresent(Int.self, key: "x-amzn-transcribe-sample-rate")
            self.numberOfChannels = try response.decodeHeaderIfPresent(Int.self, key: "x-amzn-transcribe-number-of-channels")
            self.requestId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-request-id")
            self.sessionId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-session-id")
            self.showSpeakerLabel = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-show-speaker-label")
            self.specialty = try response.decodeHeaderIfPresent(Specialty.self, key: "x-amzn-transcribe-specialty")
            self.transcriptResultStream = try container.decode(AWSEventStream<MedicalTranscriptResultStream>.self)
            self.type = try response.decodeHeaderIfPresent(`Type`.self, key: "x-amzn-transcribe-type")
            self.vocabularyName = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-name")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct StartStreamTranscriptionRequest: AWSEncodableShape {
        /// An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket  data frames. For more information, see Transcribing streaming audio.
        public let audioStream: AWSEventStream<AudioStream>
        /// Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in  PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is identified. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information.
        public let contentIdentificationType: ContentIdentificationType?
        /// Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in  PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is redacted. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information.
        public let contentRedactionType: ContentRedactionType?
        /// Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the  output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is  transcribed in a continuous manner and your transcript is not separated by channel. If you include EnableChannelIdentification in your request, you must also  include NumberOfChannels. For more information, see Transcribing multi-channel audio.
        public let enableChannelIdentification: Bool?
        /// Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see  Partial-result  stabilization.
        public let enablePartialResultsStabilization: Bool?
        /// Enables automatic language identification for your transcription. If you include IdentifyLanguage, you must include a list of language codes, using LanguageOptions, that you think may be present in  your audio stream.  You can also include a preferred language using PreferredLanguage. Adding a  preferred language can help Amazon Transcribe identify the language faster than if you omit this  parameter. If you have multi-channel audio that contains different languages on each channel, and you've  enabled channel identification, automatic language identification identifies the dominant language on  each audio channel. Note that you must include either LanguageCode or  IdentifyLanguage or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. Streaming language identification can't be combined with custom language models or  redaction.
        public let identifyLanguage: Bool?
        /// Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead. If you include IdentifyMultipleLanguages, you must include a list of language codes, using LanguageOptions, that you think may be present in your stream. If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include VocabularyNames or VocabularyFilterNames. Note that you must include one of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails.
        public let identifyMultipleLanguages: Bool?
        /// Specify the language code that represents the language spoken in your audio. If you're unsure of the language spoken in your audio, consider using  IdentifyLanguage to enable automatic language identification. For a list of languages supported with Amazon Transcribe streaming, refer to the  Supported  languages table.
        public let languageCode: LanguageCode?
        /// Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied.  There are no errors or warnings associated with a language mismatch. For more information, see Custom language models.
        public let languageModelName: String?
        /// Specify two or more language codes that represent the languages you think may be present  in your media; including more than five is not recommended. Including language options can improve the accuracy of language identification. If you include LanguageOptions in your request, you must also include  IdentifyLanguage or IdentifyMultipleLanguages. For a list of languages supported with Amazon Transcribe streaming, refer to the  Supported  languages table.  You can only include one language dialect per language per stream. For example, you cannot include en-US and en-AU in the same request.
        public let languageOptions: String?
        /// Specify the encoding of your input audio. Supported formats are:   FLAC   OPUS-encoded audio in an Ogg container   PCM (only signed 16-bit little-endian audio formats, which does not include WAV)   For more information, see Media formats.
        public let mediaEncoding: MediaEncoding
        /// The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio.
        public let mediaSampleRateHertz: Int
        /// Specify the number of channels in your audio stream. This value must be  2, as only two channels are supported. If your audio doesn't contain  multiple channels, do not include this parameter in your request. If you include NumberOfChannels in your request, you must also  include EnableChannelIdentification.
        public let numberOfChannels: Int?
        /// Specify the level of stability to use when you enable partial results stabilization  (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result  stabilization.
        public let partialResultsStability: PartialResultsStability?
        /// Specify which types of personally identifiable information (PII) you want to redact in your  transcript. You can include as many types as you'd like, or you can select  ALL. Values must be comma-separated and can include: ADDRESS,  BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL,  NAME, PHONE, PIN,  SSN, or ALL. Note that if you include PiiEntityTypes in your request, you must also include  ContentIdentificationType or ContentRedactionType. If you include ContentRedactionType or  ContentIdentificationType in your request, but do not include  PiiEntityTypes, all PII is redacted or identified.
        public let piiEntityTypes: String?
        /// Specify a preferred language from the subset of languages codes you specified in  LanguageOptions. You can only use this parameter if you've included IdentifyLanguage and LanguageOptions in your request.
        public let preferredLanguage: LanguageCode?
        /// Specify a name for your transcription session. If you don't include this parameter in your request,  Amazon Transcribe generates an ID and returns it in the response.
        public let sessionId: String?
        /// Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning  labels the speech from individual speakers in your media file. For more information, see Partitioning speakers (diarization).
        public let showSpeakerLabel: Bool?
        /// Specify how you want your vocabulary filter applied to your transcript. To replace words with ***, choose mask. To delete words, choose remove. To flag words without changing them, choose tag.
        public let vocabularyFilterMethod: VocabularyFilterMethod?
        /// Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive. If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription.  This parameter is not intended for use with the IdentifyLanguage parameter. If you're including IdentifyLanguage in your request and want to use one or more vocabulary filters with your transcription, use the VocabularyFilterNames parameter instead.  For more information, see Using vocabulary filtering with unwanted  words.
        public let vocabularyFilterName: String?
        /// Specify the names of the custom vocabulary filters that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive. If none of the languages of the specified custom vocabulary filters match the language identified in your media, your job fails.  This parameter is only intended for use with  the IdentifyLanguage parameter. If you're not  including IdentifyLanguage in your request and want to use a custom vocabulary filter  with your transcription, use the VocabularyFilterName parameter instead.  For more information, see Using vocabulary filtering with unwanted  words.
        public let vocabularyFilterNames: String?
        /// Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive. If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription.  This parameter is not intended for use with the IdentifyLanguage parameter. If you're including IdentifyLanguage in your request and want to use one or more custom vocabularies with your transcription, use the VocabularyNames parameter instead.  For more information, see Custom vocabularies.
        public let vocabularyName: String?
        /// Specify the names of the custom vocabularies that you want to use when processing your transcription. Note that vocabulary names are case sensitive. If none of the languages of the specified custom vocabularies match the language identified in  your media, your job fails.  This parameter is only intended for use with the IdentifyLanguage parameter. If you're not including IdentifyLanguage in your request and want to use a custom vocabulary with your transcription, use the VocabularyName parameter instead.  For more information, see Custom vocabularies.
        public let vocabularyNames: String?

        @inlinable
        public init(audioStream: AWSEventStream<AudioStream>, contentIdentificationType: ContentIdentificationType? = nil, contentRedactionType: ContentRedactionType? = nil, enableChannelIdentification: Bool? = nil, enablePartialResultsStabilization: Bool? = nil, identifyLanguage: Bool? = nil, identifyMultipleLanguages: Bool? = nil, languageCode: LanguageCode? = nil, languageModelName: String? = nil, languageOptions: String? = nil, mediaEncoding: MediaEncoding, mediaSampleRateHertz: Int, numberOfChannels: Int? = nil, partialResultsStability: PartialResultsStability? = nil, piiEntityTypes: String? = nil, preferredLanguage: LanguageCode? = nil, sessionId: String? = nil, showSpeakerLabel: Bool? = nil, vocabularyFilterMethod: VocabularyFilterMethod? = nil, vocabularyFilterName: String? = nil, vocabularyFilterNames: String? = nil, vocabularyName: String? = nil, vocabularyNames: String? = nil) {
            self.audioStream = audioStream
            self.contentIdentificationType = contentIdentificationType
            self.contentRedactionType = contentRedactionType
            self.enableChannelIdentification = enableChannelIdentification
            self.enablePartialResultsStabilization = enablePartialResultsStabilization
            self.identifyLanguage = identifyLanguage
            self.identifyMultipleLanguages = identifyMultipleLanguages
            self.languageCode = languageCode
            self.languageModelName = languageModelName
            self.languageOptions = languageOptions
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.numberOfChannels = numberOfChannels
            self.partialResultsStability = partialResultsStability
            self.piiEntityTypes = piiEntityTypes
            self.preferredLanguage = preferredLanguage
            self.sessionId = sessionId
            self.showSpeakerLabel = showSpeakerLabel
            self.vocabularyFilterMethod = vocabularyFilterMethod
            self.vocabularyFilterName = vocabularyFilterName
            self.vocabularyFilterNames = vocabularyFilterNames
            self.vocabularyName = vocabularyName
            self.vocabularyNames = vocabularyNames
        }

        public func encode(to encoder: Encoder) throws {
            let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer
            var container = encoder.singleValueContainer()
            try container.encode(self.audioStream)
            request.encodeHeader(self.contentIdentificationType, key: "x-amzn-transcribe-content-identification-type")
            request.encodeHeader(self.contentRedactionType, key: "x-amzn-transcribe-content-redaction-type")
            request.encodeHeader(self.enableChannelIdentification, key: "x-amzn-transcribe-enable-channel-identification")
            request.encodeHeader(self.enablePartialResultsStabilization, key: "x-amzn-transcribe-enable-partial-results-stabilization")
            request.encodeHeader(self.identifyLanguage, key: "x-amzn-transcribe-identify-language")
            request.encodeHeader(self.identifyMultipleLanguages, key: "x-amzn-transcribe-identify-multiple-languages")
            request.encodeHeader(self.languageCode, key: "x-amzn-transcribe-language-code")
            request.encodeHeader(self.languageModelName, key: "x-amzn-transcribe-language-model-name")
            request.encodeHeader(self.languageOptions, key: "x-amzn-transcribe-language-options")
            request.encodeHeader(self.mediaEncoding, key: "x-amzn-transcribe-media-encoding")
            request.encodeHeader(self.mediaSampleRateHertz, key: "x-amzn-transcribe-sample-rate")
            request.encodeHeader(self.numberOfChannels, key: "x-amzn-transcribe-number-of-channels")
            request.encodeHeader(self.partialResultsStability, key: "x-amzn-transcribe-partial-results-stability")
            request.encodeHeader(self.piiEntityTypes, key: "x-amzn-transcribe-pii-entity-types")
            request.encodeHeader(self.preferredLanguage, key: "x-amzn-transcribe-preferred-language")
            request.encodeHeader(self.sessionId, key: "x-amzn-transcribe-session-id")
            request.encodeHeader(self.showSpeakerLabel, key: "x-amzn-transcribe-show-speaker-label")
            request.encodeHeader(self.vocabularyFilterMethod, key: "x-amzn-transcribe-vocabulary-filter-method")
            request.encodeHeader(self.vocabularyFilterName, key: "x-amzn-transcribe-vocabulary-filter-name")
            request.encodeHeader(self.vocabularyFilterNames, key: "x-amzn-transcribe-vocabulary-filter-names")
            request.encodeHeader(self.vocabularyName, key: "x-amzn-transcribe-vocabulary-name")
            request.encodeHeader(self.vocabularyNames, key: "x-amzn-transcribe-vocabulary-names")
        }

        public func validate(name: String) throws {
            try self.validate(self.languageModelName, name: "languageModelName", parent: name, max: 200)
            try self.validate(self.languageModelName, name: "languageModelName", parent: name, min: 1)
            try self.validate(self.languageModelName, name: "languageModelName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
            try self.validate(self.languageOptions, name: "languageOptions", parent: name, max: 200)
            try self.validate(self.languageOptions, name: "languageOptions", parent: name, min: 1)
            try self.validate(self.languageOptions, name: "languageOptions", parent: name, pattern: "^[a-zA-Z-,]+$")
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, max: 48000)
            try self.validate(self.mediaSampleRateHertz, name: "mediaSampleRateHertz", parent: name, min: 8000)
            try self.validate(self.numberOfChannels, name: "numberOfChannels", parent: name, min: 2)
            try self.validate(self.piiEntityTypes, name: "piiEntityTypes", parent: name, max: 300)
            try self.validate(self.piiEntityTypes, name: "piiEntityTypes", parent: name, min: 1)
            try self.validate(self.piiEntityTypes, name: "piiEntityTypes", parent: name, pattern: "^[A-Z_, ]+$")
            try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36)
            try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, max: 200)
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, min: 1)
            try self.validate(self.vocabularyFilterName, name: "vocabularyFilterName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
            try self.validate(self.vocabularyFilterNames, name: "vocabularyFilterNames", parent: name, max: 3000)
            try self.validate(self.vocabularyFilterNames, name: "vocabularyFilterNames", parent: name, min: 1)
            try self.validate(self.vocabularyFilterNames, name: "vocabularyFilterNames", parent: name, pattern: "^[a-zA-Z0-9,-._]+$")
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, max: 200)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, min: 1)
            try self.validate(self.vocabularyName, name: "vocabularyName", parent: name, pattern: "^[0-9a-zA-Z._-]+$")
            try self.validate(self.vocabularyNames, name: "vocabularyNames", parent: name, max: 3000)
            try self.validate(self.vocabularyNames, name: "vocabularyNames", parent: name, min: 1)
            try self.validate(self.vocabularyNames, name: "vocabularyNames", parent: name, pattern: "^[a-zA-Z0-9,-._]+$")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct StartStreamTranscriptionResponse: AWSDecodableShape {
        public static let _options: AWSShapeOptions = [.rawPayload]
        /// Shows whether content identification was enabled for your transcription.
        public let contentIdentificationType: ContentIdentificationType?
        /// Shows whether content redaction was enabled for your transcription.
        public let contentRedactionType: ContentRedactionType?
        /// Shows whether channel identification was enabled for your transcription.
        public let enableChannelIdentification: Bool?
        /// Shows whether partial results stabilization was enabled for your transcription.
        public let enablePartialResultsStabilization: Bool?
        /// Shows whether automatic language identification was enabled for your  transcription.
        public let identifyLanguage: Bool?
        /// Shows whether automatic multi-language identification was enabled for your transcription.
        public let identifyMultipleLanguages: Bool?
        /// Provides the language code that you specified in your request.
        public let languageCode: LanguageCode?
        /// Provides the name of the custom language model that you specified in your request.
        public let languageModelName: String?
        /// Provides the language codes that you specified in your request.
        public let languageOptions: String?
        /// Provides the media encoding you specified in your request.
        public let mediaEncoding: MediaEncoding?
        /// Provides the sample rate that you specified in your request.
        public let mediaSampleRateHertz: Int?
        /// Provides the number of channels that you specified in your request.
        public let numberOfChannels: Int?
        /// Provides the stabilization level used for your transcription.
        public let partialResultsStability: PartialResultsStability?
        /// Lists the PII entity types you specified in your request.
        public let piiEntityTypes: String?
        /// Provides the preferred language that you specified in your request.
        public let preferredLanguage: LanguageCode?
        /// Provides the identifier for your streaming request.
        public let requestId: String?
        /// Provides the identifier for your transcription session.
        public let sessionId: String?
        /// Shows whether speaker partitioning was enabled for your transcription.
        public let showSpeakerLabel: Bool?
        /// Provides detailed information about your streaming session.
        public let transcriptResultStream: AWSEventStream<TranscriptResultStream>
        /// Provides the vocabulary filtering method used in your transcription.
        public let vocabularyFilterMethod: VocabularyFilterMethod?
        /// Provides the name of the custom vocabulary filter that you specified in your request.
        public let vocabularyFilterName: String?
        /// Provides the names of the custom vocabulary filters that you specified in your request.
        public let vocabularyFilterNames: String?
        /// Provides the name of the custom vocabulary that you specified in your request.
        public let vocabularyName: String?
        /// Provides the names of the custom vocabularies that you specified in your request.
        public let vocabularyNames: String?

        @inlinable
        public init(contentIdentificationType: ContentIdentificationType? = nil, contentRedactionType: ContentRedactionType? = nil, enableChannelIdentification: Bool? = nil, enablePartialResultsStabilization: Bool? = nil, identifyLanguage: Bool? = nil, identifyMultipleLanguages: Bool? = nil, languageCode: LanguageCode? = nil, languageModelName: String? = nil, languageOptions: String? = nil, mediaEncoding: MediaEncoding? = nil, mediaSampleRateHertz: Int? = nil, numberOfChannels: Int? = nil, partialResultsStability: PartialResultsStability? = nil, piiEntityTypes: String? = nil, preferredLanguage: LanguageCode? = nil, requestId: String? = nil, sessionId: String? = nil, showSpeakerLabel: Bool? = nil, transcriptResultStream: AWSEventStream<TranscriptResultStream>, vocabularyFilterMethod: VocabularyFilterMethod? = nil, vocabularyFilterName: String? = nil, vocabularyFilterNames: String? = nil, vocabularyName: String? = nil, vocabularyNames: String? = nil) {
            self.contentIdentificationType = contentIdentificationType
            self.contentRedactionType = contentRedactionType
            self.enableChannelIdentification = enableChannelIdentification
            self.enablePartialResultsStabilization = enablePartialResultsStabilization
            self.identifyLanguage = identifyLanguage
            self.identifyMultipleLanguages = identifyMultipleLanguages
            self.languageCode = languageCode
            self.languageModelName = languageModelName
            self.languageOptions = languageOptions
            self.mediaEncoding = mediaEncoding
            self.mediaSampleRateHertz = mediaSampleRateHertz
            self.numberOfChannels = numberOfChannels
            self.partialResultsStability = partialResultsStability
            self.piiEntityTypes = piiEntityTypes
            self.preferredLanguage = preferredLanguage
            self.requestId = requestId
            self.sessionId = sessionId
            self.showSpeakerLabel = showSpeakerLabel
            self.transcriptResultStream = transcriptResultStream
            self.vocabularyFilterMethod = vocabularyFilterMethod
            self.vocabularyFilterName = vocabularyFilterName
            self.vocabularyFilterNames = vocabularyFilterNames
            self.vocabularyName = vocabularyName
            self.vocabularyNames = vocabularyNames
        }

        public init(from decoder: Decoder) throws {
            let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer
            let container = try decoder.singleValueContainer()
            self.contentIdentificationType = try response.decodeHeaderIfPresent(ContentIdentificationType.self, key: "x-amzn-transcribe-content-identification-type")
            self.contentRedactionType = try response.decodeHeaderIfPresent(ContentRedactionType.self, key: "x-amzn-transcribe-content-redaction-type")
            self.enableChannelIdentification = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-enable-channel-identification")
            self.enablePartialResultsStabilization = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-enable-partial-results-stabilization")
            self.identifyLanguage = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-identify-language")
            self.identifyMultipleLanguages = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-identify-multiple-languages")
            self.languageCode = try response.decodeHeaderIfPresent(LanguageCode.self, key: "x-amzn-transcribe-language-code")
            self.languageModelName = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-language-model-name")
            self.languageOptions = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-language-options")
            self.mediaEncoding = try response.decodeHeaderIfPresent(MediaEncoding.self, key: "x-amzn-transcribe-media-encoding")
            self.mediaSampleRateHertz = try response.decodeHeaderIfPresent(Int.self, key: "x-amzn-transcribe-sample-rate")
            self.numberOfChannels = try response.decodeHeaderIfPresent(Int.self, key: "x-amzn-transcribe-number-of-channels")
            self.partialResultsStability = try response.decodeHeaderIfPresent(PartialResultsStability.self, key: "x-amzn-transcribe-partial-results-stability")
            self.piiEntityTypes = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-pii-entity-types")
            self.preferredLanguage = try response.decodeHeaderIfPresent(LanguageCode.self, key: "x-amzn-transcribe-preferred-language")
            self.requestId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-request-id")
            self.sessionId = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-session-id")
            self.showSpeakerLabel = try response.decodeHeaderIfPresent(Bool.self, key: "x-amzn-transcribe-show-speaker-label")
            self.transcriptResultStream = try container.decode(AWSEventStream<TranscriptResultStream>.self)
            self.vocabularyFilterMethod = try response.decodeHeaderIfPresent(VocabularyFilterMethod.self, key: "x-amzn-transcribe-vocabulary-filter-method")
            self.vocabularyFilterName = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-filter-name")
            self.vocabularyFilterNames = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-filter-names")
            self.vocabularyName = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-name")
            self.vocabularyNames = try response.decodeHeaderIfPresent(String.self, key: "x-amzn-transcribe-vocabulary-names")
        }

        private enum CodingKeys: CodingKey {}
    }

    public struct TimestampRange: AWSDecodableShape {
        /// The time, in milliseconds, from the beginning of the audio stream to the start of the category  match.
        public let beginOffsetMillis: Int64?
        /// The time, in milliseconds, from the beginning of the audio stream to the end of the category  match.
        public let endOffsetMillis: Int64?

        @inlinable
        public init(beginOffsetMillis: Int64? = nil, endOffsetMillis: Int64? = nil) {
            self.beginOffsetMillis = beginOffsetMillis
            self.endOffsetMillis = endOffsetMillis
        }

        private enum CodingKeys: String, CodingKey {
            case beginOffsetMillis = "BeginOffsetMillis"
            case endOffsetMillis = "EndOffsetMillis"
        }
    }

    public struct Transcript: AWSDecodableShape {
        /// Contains a set of transcription results from one or more audio segments, along with additional  information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.
        public let results: [Result]?

        @inlinable
        public init(results: [Result]? = nil) {
            self.results = results
        }

        private enum CodingKeys: String, CodingKey {
            case results = "Results"
        }
    }

    public struct TranscriptEvent: AWSDecodableShape {
        /// Contains Results, which contains a set of transcription results from one or more audio segments, along with additional information per your request parameters. This can include information relating to alternative transcriptions, channel identification, partial result stabilization, language identification, and other transcription-related data.
        public let transcript: Transcript?

        @inlinable
        public init(transcript: Transcript? = nil) {
            self.transcript = transcript
        }

        private enum CodingKeys: String, CodingKey {
            case transcript = "Transcript"
        }
    }

    public struct UtteranceEvent: AWSDecodableShape {
        /// The time, in milliseconds, from the beginning of the audio stream to the start of the UtteranceEvent.
        public let beginOffsetMillis: Int64?
        /// The time, in milliseconds, from the beginning of the audio stream to the start of the  UtteranceEvent.
        public let endOffsetMillis: Int64?
        /// Contains entities identified as personally identifiable information (PII) in your transcription  output.
        public let entities: [CallAnalyticsEntity]?
        /// Indicates whether the segment in the UtteranceEvent is complete  (FALSE) or partial (TRUE).
        public let isPartial: Bool?
        /// Provides the issue that was detected in the specified segment.
        public let issuesDetected: [IssueDetected]?
        /// Contains words, phrases, or punctuation marks that are associated with the specified  UtteranceEvent.
        public let items: [CallAnalyticsItem]?
        /// The language code that represents the language spoken in your audio stream.
        public let languageCode: CallAnalyticsLanguageCode?
        /// The language code of the dominant language identified in your stream.
        public let languageIdentification: [CallAnalyticsLanguageWithScore]?
        /// Provides the role of the speaker for each audio channel, either CUSTOMER or  AGENT.
        public let participantRole: ParticipantRole?
        /// Provides the sentiment that was detected in the specified segment.
        public let sentiment: Sentiment?
        /// Contains transcribed text.
        public let transcript: String?
        /// The unique identifier that is associated with the specified UtteranceEvent.
        public let utteranceId: String?

        @inlinable
        public init(beginOffsetMillis: Int64? = nil, endOffsetMillis: Int64? = nil, entities: [CallAnalyticsEntity]? = nil, isPartial: Bool? = nil, issuesDetected: [IssueDetected]? = nil, items: [CallAnalyticsItem]? = nil, languageCode: CallAnalyticsLanguageCode? = nil, languageIdentification: [CallAnalyticsLanguageWithScore]? = nil, participantRole: ParticipantRole? = nil, sentiment: Sentiment? = nil, transcript: String? = nil, utteranceId: String? = nil) {
            self.beginOffsetMillis = beginOffsetMillis
            self.endOffsetMillis = endOffsetMillis
            self.entities = entities
            self.isPartial = isPartial
            self.issuesDetected = issuesDetected
            self.items = items
            self.languageCode = languageCode
            self.languageIdentification = languageIdentification
            self.participantRole = participantRole
            self.sentiment = sentiment
            self.transcript = transcript
            self.utteranceId = utteranceId
        }

        private enum CodingKeys: String, CodingKey {
            case beginOffsetMillis = "BeginOffsetMillis"
            case endOffsetMillis = "EndOffsetMillis"
            case entities = "Entities"
            case isPartial = "IsPartial"
            case issuesDetected = "IssuesDetected"
            case items = "Items"
            case languageCode = "LanguageCode"
            case languageIdentification = "LanguageIdentification"
            case participantRole = "ParticipantRole"
            case sentiment = "Sentiment"
            case transcript = "Transcript"
            case utteranceId = "UtteranceId"
        }
    }
}

// MARK: - Errors

/// Error enum for TranscribeStreaming
public struct TranscribeStreamingErrorType: AWSErrorType {
    enum Code: String {
        case badRequestException = "BadRequestException"
        case conflictException = "ConflictException"
        case internalFailureException = "InternalFailureException"
        case limitExceededException = "LimitExceededException"
        case resourceNotFoundException = "ResourceNotFoundException"
        case serviceUnavailableException = "ServiceUnavailableException"
    }

    private let error: Code
    public let context: AWSErrorContext?

    /// initialize TranscribeStreaming
    public init?(errorCode: String, context: AWSErrorContext) {
        guard let error = Code(rawValue: errorCode) else { return nil }
        self.error = error
        self.context = context
    }

    internal init(_ error: Code) {
        self.error = error
        self.context = nil
    }

    /// return error code string
    public var errorCode: String { self.error.rawValue }

    /// One or more arguments to the StartStreamTranscription,  StartMedicalStreamTranscription, or StartCallAnalyticsStreamTranscription  operation was not valid. For example, MediaEncoding or LanguageCode  used unsupported values. Check the specified parameters and try your request again.
    public static var badRequestException: Self { .init(.badRequestException) }
    /// A new stream started with the same session ID. The current stream has been terminated.
    public static var conflictException: Self { .init(.conflictException) }
    /// A problem occurred while processing the audio. Amazon Transcribe terminated  processing.
    public static var internalFailureException: Self { .init(.internalFailureException) }
    /// Your client has exceeded one of the Amazon Transcribe limits. This is typically the audio length limit. Break your audio stream into smaller chunks and try your request again.
    public static var limitExceededException: Self { .init(.limitExceededException) }
    /// The request references a resource which doesn't exist.
    public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) }
    /// The service is currently unavailable. Try your request later.
    public static var serviceUnavailableException: Self { .init(.serviceUnavailableException) }
}

extension TranscribeStreamingErrorType: Equatable {
    public static func == (lhs: TranscribeStreamingErrorType, rhs: TranscribeStreamingErrorType) -> Bool {
        lhs.error == rhs.error
    }
}

extension TranscribeStreamingErrorType: CustomStringConvertible {
    public var description: String {
        return "\(self.error.rawValue): \(self.message ?? "")"
    }
}
