//===----------------------------------------------------------------------===//
//
// This source file is part of the Soto for AWS open source project
//
// Copyright (c) 2017-2024 the Soto project authors
// Licensed under Apache License v2.0
//
// See LICENSE.txt for license information
// See CONTRIBUTORS.txt for the list of Soto project authors
//
// SPDX-License-Identifier: Apache-2.0
//
//===----------------------------------------------------------------------===//

// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator.
// DO NOT EDIT.

#if canImport(FoundationEssentials)
import FoundationEssentials
#else
import Foundation
#endif
@_spi(SotoInternal) import SotoCore

extension FSx {
    // MARK: Enums

    public enum ActiveDirectoryErrorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case domainNotFound = "DOMAIN_NOT_FOUND"
        case incompatibleDomainMode = "INCOMPATIBLE_DOMAIN_MODE"
        case invalidDomainStage = "INVALID_DOMAIN_STAGE"
        case invalidNetworkType = "INVALID_NETWORK_TYPE"
        case wrongVpc = "WRONG_VPC"
        public var description: String { return self.rawValue }
    }

    public enum AdministrativeActionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case downloadDataFromBackup = "DOWNLOAD_DATA_FROM_BACKUP"
        case fileSystemAliasAssociation = "FILE_SYSTEM_ALIAS_ASSOCIATION"
        case fileSystemAliasDisassociation = "FILE_SYSTEM_ALIAS_DISASSOCIATION"
        case fileSystemUpdate = "FILE_SYSTEM_UPDATE"
        case iopsOptimization = "IOPS_OPTIMIZATION"
        case misconfiguredStateRecovery = "MISCONFIGURED_STATE_RECOVERY"
        case releaseNfsV3Locks = "RELEASE_NFS_V3_LOCKS"
        case snapshotUpdate = "SNAPSHOT_UPDATE"
        case storageOptimization = "STORAGE_OPTIMIZATION"
        case storageTypeOptimization = "STORAGE_TYPE_OPTIMIZATION"
        case throughputOptimization = "THROUGHPUT_OPTIMIZATION"
        case volumeInitializeWithSnapshot = "VOLUME_INITIALIZE_WITH_SNAPSHOT"
        case volumeRestore = "VOLUME_RESTORE"
        case volumeUpdate = "VOLUME_UPDATE"
        case volumeUpdateWithSnapshot = "VOLUME_UPDATE_WITH_SNAPSHOT"
        public var description: String { return self.rawValue }
    }

    public enum AliasLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case createFailed = "CREATE_FAILED"
        case creating = "CREATING"
        case deleteFailed = "DELETE_FAILED"
        case deleting = "DELETING"
        public var description: String { return self.rawValue }
    }

    public enum AutoImportPolicyType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case new = "NEW"
        case newChanged = "NEW_CHANGED"
        case newChangedDeleted = "NEW_CHANGED_DELETED"
        case none = "NONE"
        public var description: String { return self.rawValue }
    }

    public enum AutocommitPeriodType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case days = "DAYS"
        case hours = "HOURS"
        case minutes = "MINUTES"
        case months = "MONTHS"
        case none = "NONE"
        case years = "YEARS"
        public var description: String { return self.rawValue }
    }

    public enum BackupLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case copying = "COPYING"
        case creating = "CREATING"
        case deleted = "DELETED"
        case failed = "FAILED"
        case pending = "PENDING"
        case transferring = "TRANSFERRING"
        public var description: String { return self.rawValue }
    }

    public enum BackupType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case automatic = "AUTOMATIC"
        case awsBackup = "AWS_BACKUP"
        case userInitiated = "USER_INITIATED"
        public var description: String { return self.rawValue }
    }

    public enum DataCompressionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case lz4 = "LZ4"
        case none = "NONE"
        public var description: String { return self.rawValue }
    }

    public enum DataRepositoryLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case creating = "CREATING"
        case deleting = "DELETING"
        case failed = "FAILED"
        case misconfigured = "MISCONFIGURED"
        case updating = "UPDATING"
        public var description: String { return self.rawValue }
    }

    public enum DataRepositoryTaskFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case dataRepoAssociationId = "data-repository-association-id"
        case fileCacheId = "file-cache-id"
        case fileSystemId = "file-system-id"
        case taskLifecycle = "task-lifecycle"
        public var description: String { return self.rawValue }
    }

    public enum DataRepositoryTaskLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case canceled = "CANCELED"
        case canceling = "CANCELING"
        case executing = "EXECUTING"
        case failed = "FAILED"
        case pending = "PENDING"
        case succeeded = "SUCCEEDED"
        public var description: String { return self.rawValue }
    }

    public enum DataRepositoryTaskType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case `import` = "IMPORT_METADATA_FROM_REPOSITORY"
        case autoTriggeredEviction = "AUTO_RELEASE_DATA"
        case eviction = "RELEASE_DATA_FROM_FILESYSTEM"
        case export = "EXPORT_TO_REPOSITORY"
        public var description: String { return self.rawValue }
    }

    public enum DeleteFileSystemOpenZFSOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case deleteChildVolumesAndSnapshots = "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"
        public var description: String { return self.rawValue }
    }

    public enum DeleteOpenZFSVolumeOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case deleteChildVolumesAndSnapshots = "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS"
        public var description: String { return self.rawValue }
    }

    public enum DiskIopsConfigurationMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case automatic = "AUTOMATIC"
        case userProvisioned = "USER_PROVISIONED"
        public var description: String { return self.rawValue }
    }

    public enum DriveCacheType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case none = "NONE"
        case read = "READ"
        public var description: String { return self.rawValue }
    }

    public enum EventType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case changed = "CHANGED"
        case deleted = "DELETED"
        case new = "NEW"
        public var description: String { return self.rawValue }
    }

    public enum FileCacheLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case creating = "CREATING"
        case deleting = "DELETING"
        case failed = "FAILED"
        case updating = "UPDATING"
        public var description: String { return self.rawValue }
    }

    public enum FileCacheLustreDeploymentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case cache1 = "CACHE_1"
        public var description: String { return self.rawValue }
    }

    public enum FileCacheType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case lustre = "LUSTRE"
        public var description: String { return self.rawValue }
    }

    public enum FileSystemLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case creating = "CREATING"
        case deleting = "DELETING"
        case failed = "FAILED"
        case misconfigured = "MISCONFIGURED"
        case misconfiguredUnavailable = "MISCONFIGURED_UNAVAILABLE"
        case updating = "UPDATING"
        public var description: String { return self.rawValue }
    }

    public enum FileSystemMaintenanceOperation: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case backingUp = "BACKING_UP"
        case patching = "PATCHING"
        public var description: String { return self.rawValue }
    }

    public enum FileSystemType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case lustre = "LUSTRE"
        case ontap = "ONTAP"
        case openzfs = "OPENZFS"
        case windows = "WINDOWS"
        public var description: String { return self.rawValue }
    }

    public enum FilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case backupType = "backup-type"
        case dataRepositoryType = "data-repository-type"
        case fileCacheId = "file-cache-id"
        case fileCacheType = "file-cache-type"
        case fileSystemId = "file-system-id"
        case fileSystemType = "file-system-type"
        case volumeId = "volume-id"
        public var description: String { return self.rawValue }
    }

    public enum FlexCacheEndpointType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case cache = "CACHE"
        case none = "NONE"
        case origin = "ORIGIN"
        public var description: String { return self.rawValue }
    }

    public enum InputOntapVolumeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case dp = "DP"
        case rw = "RW"
        public var description: String { return self.rawValue }
    }

    public enum LustreAccessAuditLogLevel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case disabled = "DISABLED"
        case errorOnly = "ERROR_ONLY"
        case warnError = "WARN_ERROR"
        case warnOnly = "WARN_ONLY"
        public var description: String { return self.rawValue }
    }

    public enum LustreDeploymentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case persistent1 = "PERSISTENT_1"
        case persistent2 = "PERSISTENT_2"
        case scratch1 = "SCRATCH_1"
        case scratch2 = "SCRATCH_2"
        public var description: String { return self.rawValue }
    }

    public enum LustreReadCacheSizingMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case noCache = "NO_CACHE"
        case proportionalToThroughputCapacity = "PROPORTIONAL_TO_THROUGHPUT_CAPACITY"
        case userProvisioned = "USER_PROVISIONED"
        public var description: String { return self.rawValue }
    }

    public enum MetadataConfigurationMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case automatic = "AUTOMATIC"
        case userProvisioned = "USER_PROVISIONED"
        public var description: String { return self.rawValue }
    }

    public enum NetworkType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case dual = "DUAL"
        case ipv4 = "IPV4"
        public var description: String { return self.rawValue }
    }

    public enum NfsVersion: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case nfs3 = "NFS3"
        public var description: String { return self.rawValue }
    }

    public enum OntapDeploymentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case multiAz1 = "MULTI_AZ_1"
        case multiAz2 = "MULTI_AZ_2"
        case singleAz1 = "SINGLE_AZ_1"
        case singleAz2 = "SINGLE_AZ_2"
        public var description: String { return self.rawValue }
    }

    public enum OntapVolumeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case dp = "DP"
        case ls = "LS"
        case rw = "RW"
        public var description: String { return self.rawValue }
    }

    public enum OpenZFSCopyStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case clone = "CLONE"
        case fullCopy = "FULL_COPY"
        case incrementalCopy = "INCREMENTAL_COPY"
        public var description: String { return self.rawValue }
    }

    public enum OpenZFSDataCompressionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case lz4 = "LZ4"
        case none = "NONE"
        case zstd = "ZSTD"
        public var description: String { return self.rawValue }
    }

    public enum OpenZFSDeploymentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case multiAz1 = "MULTI_AZ_1"
        case singleAz1 = "SINGLE_AZ_1"
        case singleAz2 = "SINGLE_AZ_2"
        case singleAzHa1 = "SINGLE_AZ_HA_1"
        case singleAzHa2 = "SINGLE_AZ_HA_2"
        public var description: String { return self.rawValue }
    }

    public enum OpenZFSFileSystemUserType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case posix = "POSIX"
        public var description: String { return self.rawValue }
    }

    public enum OpenZFSQuotaType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case group = "GROUP"
        case user = "USER"
        public var description: String { return self.rawValue }
    }

    public enum OpenZFSReadCacheSizingMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case noCache = "NO_CACHE"
        case proportionalToThroughputCapacity = "PROPORTIONAL_TO_THROUGHPUT_CAPACITY"
        case userProvisioned = "USER_PROVISIONED"
        public var description: String { return self.rawValue }
    }

    public enum PrivilegedDelete: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case disabled = "DISABLED"
        case enabled = "ENABLED"
        case permanentlyDisabled = "PERMANENTLY_DISABLED"
        public var description: String { return self.rawValue }
    }

    public enum ReportFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case reportCsv20191124 = "REPORT_CSV_20191124"
        public var description: String { return self.rawValue }
    }

    public enum ReportScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case failedFilesOnly = "FAILED_FILES_ONLY"
        public var description: String { return self.rawValue }
    }

    public enum ResourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case fileSystem = "FILE_SYSTEM"
        case volume = "VOLUME"
        public var description: String { return self.rawValue }
    }

    public enum RestoreOpenZFSVolumeOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case deleteClonedVolumes = "DELETE_CLONED_VOLUMES"
        case deleteIntermediateSnapshots = "DELETE_INTERMEDIATE_SNAPSHOTS"
        public var description: String { return self.rawValue }
    }

    public enum RetentionPeriodType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case days = "DAYS"
        case hours = "HOURS"
        case infinite = "INFINITE"
        case minutes = "MINUTES"
        case months = "MONTHS"
        case seconds = "SECONDS"
        case unspecified = "UNSPECIFIED"
        case years = "YEARS"
        public var description: String { return self.rawValue }
    }

    public enum S3AccessPointAttachmentLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case creating = "CREATING"
        case deleting = "DELETING"
        case failed = "FAILED"
        case updating = "UPDATING"
        public var description: String { return self.rawValue }
    }

    public enum S3AccessPointAttachmentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case openzfs = "OPENZFS"
        public var description: String { return self.rawValue }
    }

    public enum S3AccessPointAttachmentsFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case fileSystemId = "file-system-id"
        case type = "type"
        case volumeId = "volume-id"
        public var description: String { return self.rawValue }
    }

    public enum SecurityStyle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case mixed = "MIXED"
        case ntfs = "NTFS"
        case unix = "UNIX"
        public var description: String { return self.rawValue }
    }

    public enum ServiceLimit: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case fileCacheCount = "FILE_CACHE_COUNT"
        case fileSystemCount = "FILE_SYSTEM_COUNT"
        case storageVirtualMachinesPerFileSystem = "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM"
        case totalInProgressCopyBackups = "TOTAL_IN_PROGRESS_COPY_BACKUPS"
        case totalSsdIops = "TOTAL_SSD_IOPS"
        case totalStorage = "TOTAL_STORAGE"
        case totalThroughputCapacity = "TOTAL_THROUGHPUT_CAPACITY"
        case totalUserInitiatedBackups = "TOTAL_USER_INITIATED_BACKUPS"
        case totalUserTags = "TOTAL_USER_TAGS"
        case volumesPerFileSystem = "VOLUMES_PER_FILE_SYSTEM"
        public var description: String { return self.rawValue }
    }

    public enum SnaplockType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case compliance = "COMPLIANCE"
        case enterprise = "ENTERPRISE"
        public var description: String { return self.rawValue }
    }

    public enum SnapshotFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case fileSystemId = "file-system-id"
        case volumeId = "volume-id"
        public var description: String { return self.rawValue }
    }

    public enum SnapshotLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case creating = "CREATING"
        case deleting = "DELETING"
        case pending = "PENDING"
        public var description: String { return self.rawValue }
    }

    public enum Status: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case cancelled = "CANCELLED"
        case completed = "COMPLETED"
        case failed = "FAILED"
        case inProgress = "IN_PROGRESS"
        case optimizing = "OPTIMIZING"
        case paused = "PAUSED"
        case pending = "PENDING"
        case updatedOptimizing = "UPDATED_OPTIMIZING"
        public var description: String { return self.rawValue }
    }

    public enum StorageType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case hdd = "HDD"
        case intelligentTiering = "INTELLIGENT_TIERING"
        case ssd = "SSD"
        public var description: String { return self.rawValue }
    }

    public enum StorageVirtualMachineFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case fileSystemId = "file-system-id"
        public var description: String { return self.rawValue }
    }

    public enum StorageVirtualMachineLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case created = "CREATED"
        case creating = "CREATING"
        case deleting = "DELETING"
        case failed = "FAILED"
        case misconfigured = "MISCONFIGURED"
        case pending = "PENDING"
        public var description: String { return self.rawValue }
    }

    public enum StorageVirtualMachineRootVolumeSecurityStyle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case mixed = "MIXED"
        case ntfs = "NTFS"
        case unix = "UNIX"
        public var description: String { return self.rawValue }
    }

    public enum StorageVirtualMachineSubtype: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case `default` = "DEFAULT"
        case dpDestination = "DP_DESTINATION"
        case syncDestination = "SYNC_DESTINATION"
        case syncSource = "SYNC_SOURCE"
        public var description: String { return self.rawValue }
    }

    public enum TieringPolicyName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case all = "ALL"
        case auto = "AUTO"
        case none = "NONE"
        case snapshotOnly = "SNAPSHOT_ONLY"
        public var description: String { return self.rawValue }
    }

    public enum Unit: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case days = "DAYS"
        public var description: String { return self.rawValue }
    }

    public enum UpdateOpenZFSVolumeOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case deleteClonedVolumes = "DELETE_CLONED_VOLUMES"
        case deleteIntermediateData = "DELETE_INTERMEDIATE_DATA"
        case deleteIntermediateSnapshots = "DELETE_INTERMEDIATE_SNAPSHOTS"
        public var description: String { return self.rawValue }
    }

    public enum VolumeFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case fileSystemId = "file-system-id"
        case storageVirtualMachineId = "storage-virtual-machine-id"
        public var description: String { return self.rawValue }
    }

    public enum VolumeLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case available = "AVAILABLE"
        case created = "CREATED"
        case creating = "CREATING"
        case deleting = "DELETING"
        case failed = "FAILED"
        case misconfigured = "MISCONFIGURED"
        case pending = "PENDING"
        public var description: String { return self.rawValue }
    }

    public enum VolumeStyle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case flexgroup = "FLEXGROUP"
        case flexvol = "FLEXVOL"
        public var description: String { return self.rawValue }
    }

    public enum VolumeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case ontap = "ONTAP"
        case openzfs = "OPENZFS"
        public var description: String { return self.rawValue }
    }

    public enum WindowsAccessAuditLogLevel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case disabled = "DISABLED"
        case failureOnly = "FAILURE_ONLY"
        case successAndFailure = "SUCCESS_AND_FAILURE"
        case successOnly = "SUCCESS_ONLY"
        public var description: String { return self.rawValue }
    }

    public enum WindowsDeploymentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable {
        case multiAz1 = "MULTI_AZ_1"
        case singleAz1 = "SINGLE_AZ_1"
        case singleAz2 = "SINGLE_AZ_2"
        public var description: String { return self.rawValue }
    }

    // MARK: Shapes

    public struct AccessPointAlreadyOwnedByYou: AWSErrorShape {
        /// An error code indicating that an access point with that name already exists in the Amazon Web Services Region in your Amazon Web Services account.
        public let errorCode: String?
        public let message: String?

        @inlinable
        public init(errorCode: String? = nil, message: String? = nil) {
            self.errorCode = errorCode
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case errorCode = "ErrorCode"
            case message = "Message"
        }
    }

    public struct ActiveDirectoryBackupAttributes: AWSDecodableShape {
        /// The ID of the Amazon Web Services Managed Microsoft Active Directory instance to which the file system is joined.
        public let activeDirectoryId: String?
        /// The fully qualified domain name of the self-managed Active Directory directory.
        public let domainName: String?
        public let resourceARN: String?

        @inlinable
        public init(activeDirectoryId: String? = nil, domainName: String? = nil, resourceARN: String? = nil) {
            self.activeDirectoryId = activeDirectoryId
            self.domainName = domainName
            self.resourceARN = resourceARN
        }

        private enum CodingKeys: String, CodingKey {
            case activeDirectoryId = "ActiveDirectoryId"
            case domainName = "DomainName"
            case resourceARN = "ResourceARN"
        }
    }

    public struct ActiveDirectoryError: AWSErrorShape {
        /// The directory ID of the directory that an error pertains to.
        public let activeDirectoryId: String?
        public let message: String?
        /// The type of Active Directory error.
        public let type: ActiveDirectoryErrorType?

        @inlinable
        public init(activeDirectoryId: String? = nil, message: String? = nil, type: ActiveDirectoryErrorType? = nil) {
            self.activeDirectoryId = activeDirectoryId
            self.message = message
            self.type = type
        }

        private enum CodingKeys: String, CodingKey {
            case activeDirectoryId = "ActiveDirectoryId"
            case message = "Message"
            case type = "Type"
        }
    }

    public struct AdministrativeAction: AWSDecodableShape {
        public let administrativeActionType: AdministrativeActionType?
        public let failureDetails: AdministrativeActionFailureDetails?
        public let message: String?
        /// The percentage-complete status of a STORAGE_OPTIMIZATION  or DOWNLOAD_DATA_FROM_BACKUP administrative action. Does not apply to any other administrative action type.
        public let progressPercent: Int?
        /// The remaining bytes to transfer for the FSx for OpenZFS snapshot that you're copying.
        public let remainingTransferBytes: Int64?
        /// The time that the administrative action request was received.
        public let requestTime: Date?
        /// The status of the administrative action, as follows:    FAILED - Amazon FSx failed to process the administrative action successfully.    IN_PROGRESS - Amazon FSx is processing the administrative action.    PENDING - Amazon FSx is waiting to process the administrative action.    COMPLETED - Amazon FSx has finished processing the administrative task. For a backup restore to a second-generation FSx for ONTAP file system,  indicates that all data has been downloaded to the volume, and clients now have read-write access to volume.    UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process.    PENDING - For a backup restore to a second-generation FSx for ONTAP file system,  indicates that the file metadata is being downloaded onto the volume. The volume's Lifecycle state is CREATING.    IN_PROGRESS - For a backup restore to a second-generation FSx for ONTAP file system,  indicates that all metadata has been downloaded to the new volume and client can access data with read-only access  while Amazon FSx downloads the file data to the volume. Track the progress of this process with the ProgressPercent element.
        public let status: Status?
        /// The target value for the administration action, provided in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative actions.
        public let targetFileSystemValues: FileSystem?
        public let targetSnapshotValues: Snapshot?
        public let targetVolumeValues: Volume?
        /// The number of bytes that have transferred for the FSx for OpenZFS snapshot that you're copying.
        public let totalTransferBytes: Int64?

        @inlinable
        public init(administrativeActionType: AdministrativeActionType? = nil, failureDetails: AdministrativeActionFailureDetails? = nil, message: String? = nil, progressPercent: Int? = nil, remainingTransferBytes: Int64? = nil, requestTime: Date? = nil, status: Status? = nil, targetFileSystemValues: FileSystem? = nil, targetSnapshotValues: Snapshot? = nil, targetVolumeValues: Volume? = nil, totalTransferBytes: Int64? = nil) {
            self.administrativeActionType = administrativeActionType
            self.failureDetails = failureDetails
            self.message = message
            self.progressPercent = progressPercent
            self.remainingTransferBytes = remainingTransferBytes
            self.requestTime = requestTime
            self.status = status
            self.targetFileSystemValues = targetFileSystemValues
            self.targetSnapshotValues = targetSnapshotValues
            self.targetVolumeValues = targetVolumeValues
            self.totalTransferBytes = totalTransferBytes
        }

        private enum CodingKeys: String, CodingKey {
            case administrativeActionType = "AdministrativeActionType"
            case failureDetails = "FailureDetails"
            case message = "Message"
            case progressPercent = "ProgressPercent"
            case remainingTransferBytes = "RemainingTransferBytes"
            case requestTime = "RequestTime"
            case status = "Status"
            case targetFileSystemValues = "TargetFileSystemValues"
            case targetSnapshotValues = "TargetSnapshotValues"
            case targetVolumeValues = "TargetVolumeValues"
            case totalTransferBytes = "TotalTransferBytes"
        }
    }

    public struct AdministrativeActionFailureDetails: AWSDecodableShape {
        /// Error message providing details about the failed administrative action.
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct AggregateConfiguration: AWSDecodableShape {
        /// The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The strings in the value of Aggregates are not are not formatted as aggrX, where X is a number between 1 and 12.   The value of Aggregates contains aggregates that are not present.   One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
        public let aggregates: [String]?
        /// The total number of constituents this FlexGroup volume has. Not applicable for FlexVols.
        public let totalConstituents: Int?

        @inlinable
        public init(aggregates: [String]? = nil, totalConstituents: Int? = nil) {
            self.aggregates = aggregates
            self.totalConstituents = totalConstituents
        }

        private enum CodingKeys: String, CodingKey {
            case aggregates = "Aggregates"
            case totalConstituents = "TotalConstituents"
        }
    }

    public struct Alias: AWSDecodableShape {
        /// Describes the state of the DNS alias.   AVAILABLE - The DNS alias is associated with an Amazon FSx file system.   CREATING - Amazon FSx is creating the DNS alias and associating it with the file system.   CREATE_FAILED - Amazon FSx was unable to associate the DNS alias with the file system.   DELETING - Amazon FSx is disassociating the DNS alias from the file system and deleting it.   DELETE_FAILED - Amazon FSx was unable to disassociate the DNS alias from the file system.
        public let lifecycle: AliasLifecycle?
        /// The name of the DNS alias. The alias name has to meet the following requirements:   Formatted as a fully-qualified domain name (FQDN), hostname.domain, for example, accounting.example.com.   Can contain alphanumeric characters, the underscore (_), and the hyphen (-).   Cannot start or end with a hyphen.   Can start with a numeric.   For DNS names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them:  as uppercase letters, lowercase letters, or the corresponding letters in escape codes.
        public let name: String?

        @inlinable
        public init(lifecycle: AliasLifecycle? = nil, name: String? = nil) {
            self.lifecycle = lifecycle
            self.name = name
        }

        private enum CodingKeys: String, CodingKey {
            case lifecycle = "Lifecycle"
            case name = "Name"
        }
    }

    public struct AssociateFileSystemAliasesRequest: AWSEncodableShape {
        /// An array of one or more DNS alias names to associate with the file system.  The alias name has to comply with the following formatting requirements:   Formatted as a fully-qualified domain name (FQDN),  hostname.domain ,  for example, accounting.corp.example.com.   Can contain alphanumeric characters and the hyphen (-).   Cannot start or end with a hyphen.   Can start with a numeric.   For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them:  as uppercase letters, lowercase letters, or the corresponding letters in escape codes.
        public let aliases: [String]?
        public let clientRequestToken: String?
        /// Specifies the file system with which you want to associate one or more DNS aliases.
        public let fileSystemId: String?

        @inlinable
        public init(aliases: [String]? = nil, clientRequestToken: String? = AssociateFileSystemAliasesRequest.idempotencyToken(), fileSystemId: String? = nil) {
            self.aliases = aliases
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
        }

        public func validate(name: String) throws {
            try self.aliases?.forEach {
                try validate($0, name: "aliases[]", parent: name, max: 253)
                try validate($0, name: "aliases[]", parent: name, min: 4)
                try validate($0, name: "aliases[]", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,253}$")
            }
            try self.validate(self.aliases, name: "aliases", parent: name, max: 50)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case aliases = "Aliases"
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
        }
    }

    public struct AssociateFileSystemAliasesResponse: AWSDecodableShape {
        /// An array of the DNS aliases that Amazon FSx is associating with the file system.
        public let aliases: [Alias]?

        @inlinable
        public init(aliases: [Alias]? = nil) {
            self.aliases = aliases
        }

        private enum CodingKeys: String, CodingKey {
            case aliases = "Aliases"
        }
    }

    public struct AutoExportPolicy: AWSEncodableShape & AWSDecodableShape {
        /// The AutoExportPolicy can have the following event values:    NEW - New files and directories are automatically exported to the data repository as they are added to the file system.    CHANGED - Changes to files and directories on the file system are automatically exported to the data repository.    DELETED - Files and directories are automatically deleted on the data repository when they are deleted on the file system.   You can define any combination of event types for your AutoExportPolicy.
        public let events: [EventType]?

        @inlinable
        public init(events: [EventType]? = nil) {
            self.events = events
        }

        public func validate(name: String) throws {
            try self.validate(self.events, name: "events", parent: name, max: 3)
        }

        private enum CodingKeys: String, CodingKey {
            case events = "Events"
        }
    }

    public struct AutoImportPolicy: AWSEncodableShape & AWSDecodableShape {
        /// The AutoImportPolicy can have the following event values:    NEW - Amazon FSx automatically imports metadata of files added to the linked S3 bucket that do not currently exist in the FSx file system.    CHANGED - Amazon FSx automatically updates file metadata and invalidates existing file content on the file system as files change in the data repository.    DELETED - Amazon FSx automatically deletes files on the file system as corresponding files are deleted in the data repository.   You can define any combination of event types for your AutoImportPolicy.
        public let events: [EventType]?

        @inlinable
        public init(events: [EventType]? = nil) {
            self.events = events
        }

        public func validate(name: String) throws {
            try self.validate(self.events, name: "events", parent: name, max: 3)
        }

        private enum CodingKeys: String, CodingKey {
            case events = "Events"
        }
    }

    public struct AutocommitPeriod: AWSEncodableShape & AWSDecodableShape {
        /// Defines the type of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume.  Setting this value to NONE disables autocommit. The default value is NONE.
        public let type: AutocommitPeriodType?
        /// Defines the amount of time for the autocommit period of a file in an FSx for ONTAP SnapLock volume.  The following ranges are valid:     Minutes: 5 - 65,535    Hours: 1 - 65,535    Days: 1 - 3,650    Months: 1 - 120    Years: 1 - 10
        public let value: Int?

        @inlinable
        public init(type: AutocommitPeriodType? = nil, value: Int? = nil) {
            self.type = type
            self.value = value
        }

        public func validate(name: String) throws {
            try self.validate(self.value, name: "value", parent: name, max: 65535)
            try self.validate(self.value, name: "value", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case type = "Type"
            case value = "Value"
        }
    }

    public struct Backup: AWSDecodableShape {
        /// The ID of the backup.
        public let backupId: String?
        /// The time when a particular backup was created.
        public let creationTime: Date?
        /// The configuration of the self-managed Microsoft Active Directory directory to which the Windows File Server instance is joined.
        public let directoryInformation: ActiveDirectoryBackupAttributes?
        /// Details explaining any failures that occurred when creating a backup.
        public let failureDetails: BackupFailureDetails?
        /// The metadata of the file system associated with the backup. This metadata is persisted even if the file system is deleted.
        public let fileSystem: FileSystem?
        /// The ID of the Key Management Service (KMS) key used to encrypt the backup of the Amazon FSx file system's data at rest.
        public let kmsKeyId: String?
        /// The lifecycle status of the backup.    AVAILABLE - The backup is fully available.    PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.    CREATING - Amazon FSx is creating the backup.    TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to Amazon S3.    COPYING - Amazon FSx is copying the backup.    DELETED - Amazon FSx deleted the backup and it's no longer available.    FAILED - Amazon FSx couldn't finish the backup.
        public let lifecycle: BackupLifecycle?
        public let ownerId: String?
        public let progressPercent: Int?
        /// The Amazon Resource Name (ARN) for the backup resource.
        public let resourceARN: String?
        /// Specifies the resource type that's backed up.
        public let resourceType: ResourceType?
        ///  The size of the backup in bytes. This represents the amount of data that the file system would contain if you restore this backup.
        public let sizeInBytes: Int64?
        public let sourceBackupId: String?
        /// The source Region of the backup. Specifies the Region from where this backup is copied.
        public let sourceBackupRegion: String?
        /// The tags associated with a particular file system.
        public let tags: [Tag]?
        /// The type of the file-system backup.
        public let type: BackupType?
        public let volume: Volume?

        @inlinable
        public init(backupId: String? = nil, creationTime: Date? = nil, directoryInformation: ActiveDirectoryBackupAttributes? = nil, failureDetails: BackupFailureDetails? = nil, fileSystem: FileSystem? = nil, kmsKeyId: String? = nil, lifecycle: BackupLifecycle? = nil, ownerId: String? = nil, progressPercent: Int? = nil, resourceARN: String? = nil, resourceType: ResourceType? = nil, sizeInBytes: Int64? = nil, sourceBackupId: String? = nil, sourceBackupRegion: String? = nil, tags: [Tag]? = nil, type: BackupType? = nil, volume: Volume? = nil) {
            self.backupId = backupId
            self.creationTime = creationTime
            self.directoryInformation = directoryInformation
            self.failureDetails = failureDetails
            self.fileSystem = fileSystem
            self.kmsKeyId = kmsKeyId
            self.lifecycle = lifecycle
            self.ownerId = ownerId
            self.progressPercent = progressPercent
            self.resourceARN = resourceARN
            self.resourceType = resourceType
            self.sizeInBytes = sizeInBytes
            self.sourceBackupId = sourceBackupId
            self.sourceBackupRegion = sourceBackupRegion
            self.tags = tags
            self.type = type
            self.volume = volume
        }

        private enum CodingKeys: String, CodingKey {
            case backupId = "BackupId"
            case creationTime = "CreationTime"
            case directoryInformation = "DirectoryInformation"
            case failureDetails = "FailureDetails"
            case fileSystem = "FileSystem"
            case kmsKeyId = "KmsKeyId"
            case lifecycle = "Lifecycle"
            case ownerId = "OwnerId"
            case progressPercent = "ProgressPercent"
            case resourceARN = "ResourceARN"
            case resourceType = "ResourceType"
            case sizeInBytes = "SizeInBytes"
            case sourceBackupId = "SourceBackupId"
            case sourceBackupRegion = "SourceBackupRegion"
            case tags = "Tags"
            case type = "Type"
            case volume = "Volume"
        }
    }

    public struct BackupBeingCopied: AWSErrorShape {
        public let backupId: String?
        public let message: String?

        @inlinable
        public init(backupId: String? = nil, message: String? = nil) {
            self.backupId = backupId
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case backupId = "BackupId"
            case message = "Message"
        }
    }

    public struct BackupFailureDetails: AWSDecodableShape {
        /// A message describing the backup-creation failure.
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct BackupRestoring: AWSErrorShape {
        /// The ID of a file system being restored from the backup.
        public let fileSystemId: String?
        public let message: String?

        @inlinable
        public init(fileSystemId: String? = nil, message: String? = nil) {
            self.fileSystemId = fileSystemId
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystemId = "FileSystemId"
            case message = "Message"
        }
    }

    public struct CancelDataRepositoryTaskRequest: AWSEncodableShape {
        /// Specifies the data repository task to cancel.
        public let taskId: String?

        @inlinable
        public init(taskId: String? = nil) {
            self.taskId = taskId
        }

        public func validate(name: String) throws {
            try self.validate(self.taskId, name: "taskId", parent: name, max: 128)
            try self.validate(self.taskId, name: "taskId", parent: name, min: 12)
            try self.validate(self.taskId, name: "taskId", parent: name, pattern: "^(task-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case taskId = "TaskId"
        }
    }

    public struct CancelDataRepositoryTaskResponse: AWSDecodableShape {
        /// The lifecycle status of the data repository task, as follows:    PENDING - Amazon FSx has not started the task.    EXECUTING - Amazon FSx is processing the task.    FAILED -  Amazon FSx was not able to complete the task. For example, there may be files the task failed to process.  The DataRepositoryTaskFailureDetails property provides more information about task failures.    SUCCEEDED - FSx completed the task successfully.    CANCELED - Amazon FSx canceled the task and it did not complete.    CANCELING - FSx is in process of canceling the task.
        public let lifecycle: DataRepositoryTaskLifecycle?
        /// The ID of the task being canceled.
        public let taskId: String?

        @inlinable
        public init(lifecycle: DataRepositoryTaskLifecycle? = nil, taskId: String? = nil) {
            self.lifecycle = lifecycle
            self.taskId = taskId
        }

        private enum CodingKeys: String, CodingKey {
            case lifecycle = "Lifecycle"
            case taskId = "TaskId"
        }
    }

    public struct CompletionReport: AWSEncodableShape & AWSDecodableShape {
        /// Set Enabled to True to generate a CompletionReport when the task completes.  If set to true, then you need to provide a report Scope, Path, and Format.  Set Enabled to False if you do not want a CompletionReport generated when the task completes.
        public let enabled: Bool?
        /// Required if Enabled is set to true. Specifies the format of the CompletionReport. REPORT_CSV_20191124 is the only format currently supported. When Format is set to REPORT_CSV_20191124, the CompletionReport is provided in CSV format, and is delivered to {path}/task-{id}/failures.csv.
        public let format: ReportFormat?
        /// Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location.  The Path you provide must be located within the file system’s ExportPath.  An example Path value is "s3://amzn-s3-demo-bucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode.
        public let path: String?
        /// Required if Enabled is set to true. Specifies the scope of the CompletionReport; FAILED_FILES_ONLY is the only scope currently supported.  When Scope is set to FAILED_FILES_ONLY, the CompletionReport only contains information about files that the data repository task failed to process.
        public let scope: ReportScope?

        @inlinable
        public init(enabled: Bool? = nil, format: ReportFormat? = nil, path: String? = nil, scope: ReportScope? = nil) {
            self.enabled = enabled
            self.format = format
            self.path = path
            self.scope = scope
        }

        public func validate(name: String) throws {
            try self.validate(self.path, name: "path", parent: name, max: 4357)
            try self.validate(self.path, name: "path", parent: name, min: 3)
            try self.validate(self.path, name: "path", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$")
        }

        private enum CodingKeys: String, CodingKey {
            case enabled = "Enabled"
            case format = "Format"
            case path = "Path"
            case scope = "Scope"
        }
    }

    public struct CopyBackupRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// A Boolean flag indicating whether tags from the source backup should be copied to the backup copy. This value defaults to false. If you set CopyTags to true and the source backup has existing tags, you can use the Tags parameter to create new tags, provided that the sum of the source backup tags and the new tags doesn't exceed 50. Both sets of tags are merged. If there are tag conflicts (for example, two tags with the same key but different values), the tags created with the Tags parameter take precedence.
        public let copyTags: Bool?
        public let kmsKeyId: String?
        /// The ID of the source backup. Specifies the ID of the backup that's being copied.
        public let sourceBackupId: String?
        /// The source Amazon Web Services Region of the backup. Specifies the Amazon Web Services Region from which the backup is being copied. The source and destination Regions must be in the same Amazon Web Services partition. If you don't specify a Region, SourceRegion defaults to the Region where the request is sent from (in-Region copy).
        public let sourceRegion: String?
        public let tags: [Tag]?

        @inlinable
        public init(clientRequestToken: String? = CopyBackupRequest.idempotencyToken(), copyTags: Bool? = nil, kmsKeyId: String? = nil, sourceBackupId: String? = nil, sourceRegion: String? = nil, tags: [Tag]? = nil) {
            self.clientRequestToken = clientRequestToken
            self.copyTags = copyTags
            self.kmsKeyId = kmsKeyId
            self.sourceBackupId = sourceBackupId
            self.sourceRegion = sourceRegion
            self.tags = tags
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^.{1,2048}$")
            try self.validate(self.sourceBackupId, name: "sourceBackupId", parent: name, max: 128)
            try self.validate(self.sourceBackupId, name: "sourceBackupId", parent: name, min: 12)
            try self.validate(self.sourceBackupId, name: "sourceBackupId", parent: name, pattern: "^(backup-[0-9a-f]{8,})$")
            try self.validate(self.sourceRegion, name: "sourceRegion", parent: name, max: 20)
            try self.validate(self.sourceRegion, name: "sourceRegion", parent: name, min: 1)
            try self.validate(self.sourceRegion, name: "sourceRegion", parent: name, pattern: "^[a-z0-9-]{1,20}$")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case copyTags = "CopyTags"
            case kmsKeyId = "KmsKeyId"
            case sourceBackupId = "SourceBackupId"
            case sourceRegion = "SourceRegion"
            case tags = "Tags"
        }
    }

    public struct CopyBackupResponse: AWSDecodableShape {
        public let backup: Backup?

        @inlinable
        public init(backup: Backup? = nil) {
            self.backup = backup
        }

        private enum CodingKeys: String, CodingKey {
            case backup = "Backup"
        }
    }

    public struct CopySnapshotAndUpdateVolumeRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// Specifies the strategy to use when copying data from a snapshot to the volume.     FULL_COPY - Copies all data from the snapshot to the volume.     INCREMENTAL_COPY - Copies only the snapshot data that's changed since the previous replication.     CLONE isn't a valid copy strategy option for the CopySnapshotAndUpdateVolume operation.
        public let copyStrategy: OpenZFSCopyStrategy?
        /// Confirms that you want to delete data on the destination volume that wasn’t there during the previous snapshot replication. Your replication will fail if you don’t include an option for a specific type of data and that data is on your destination. For example, if you don’t include DELETE_INTERMEDIATE_SNAPSHOTS and there are intermediate snapshots on the destination, you can’t copy the snapshot.    DELETE_INTERMEDIATE_SNAPSHOTS - Deletes snapshots on the destination volume that aren’t on the source volume.    DELETE_CLONED_VOLUMES - Deletes snapshot clones on the destination volume that aren't on the source volume.    DELETE_INTERMEDIATE_DATA - Overwrites snapshots on the destination volume that don’t match the source snapshot that you’re copying.
        public let options: [UpdateOpenZFSVolumeOption]?
        public let sourceSnapshotARN: String?
        /// Specifies the ID of the volume that you are copying the snapshot to.
        public let volumeId: String?

        @inlinable
        public init(clientRequestToken: String? = CopySnapshotAndUpdateVolumeRequest.idempotencyToken(), copyStrategy: OpenZFSCopyStrategy? = nil, options: [UpdateOpenZFSVolumeOption]? = nil, sourceSnapshotARN: String? = nil, volumeId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.copyStrategy = copyStrategy
            self.options = options
            self.sourceSnapshotARN = sourceSnapshotARN
            self.volumeId = volumeId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.sourceSnapshotARN, name: "sourceSnapshotARN", parent: name, max: 512)
            try self.validate(self.sourceSnapshotARN, name: "sourceSnapshotARN", parent: name, min: 8)
            try self.validate(self.sourceSnapshotARN, name: "sourceSnapshotARN", parent: name, pattern: "^arn:(?=[^:]+:fsx:[^:]+:\\d{12}:)((|(?=[a-z0-9-.]{1,63})(?!\\d{1,3}(\\.\\d{1,3}){3})(?![^:]*-{2})(?![^:]*-\\.)(?![^:]*\\.-)[a-z0-9].*(?<!-)):){4}(?!/).{0,1024}$")
            try self.validate(self.volumeId, name: "volumeId", parent: name, max: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, min: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case copyStrategy = "CopyStrategy"
            case options = "Options"
            case sourceSnapshotARN = "SourceSnapshotARN"
            case volumeId = "VolumeId"
        }
    }

    public struct CopySnapshotAndUpdateVolumeResponse: AWSDecodableShape {
        /// A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system.
        public let administrativeActions: [AdministrativeAction]?
        /// The lifecycle state of the destination volume.
        public let lifecycle: VolumeLifecycle?
        /// The ID of the volume that you copied the snapshot to.
        public let volumeId: String?

        @inlinable
        public init(administrativeActions: [AdministrativeAction]? = nil, lifecycle: VolumeLifecycle? = nil, volumeId: String? = nil) {
            self.administrativeActions = administrativeActions
            self.lifecycle = lifecycle
            self.volumeId = volumeId
        }

        private enum CodingKeys: String, CodingKey {
            case administrativeActions = "AdministrativeActions"
            case lifecycle = "Lifecycle"
            case volumeId = "VolumeId"
        }
    }

    public struct CreateAggregateConfiguration: AWSEncodableShape {
        /// Used to specify the names of aggregates on which the volume will be created.
        public let aggregates: [String]?
        /// Used to explicitly set the number of constituents within the FlexGroup per storage aggregate. This field is optional when creating a FlexGroup volume. If unspecified, the default value will be 8. This field cannot be provided when creating a FlexVol volume.
        public let constituentsPerAggregate: Int?

        @inlinable
        public init(aggregates: [String]? = nil, constituentsPerAggregate: Int? = nil) {
            self.aggregates = aggregates
            self.constituentsPerAggregate = constituentsPerAggregate
        }

        public func validate(name: String) throws {
            try self.aggregates?.forEach {
                try validate($0, name: "aggregates[]", parent: name, max: 6)
                try validate($0, name: "aggregates[]", parent: name, min: 5)
                try validate($0, name: "aggregates[]", parent: name, pattern: "^(aggr[0-9]{1,2})$")
            }
            try self.validate(self.aggregates, name: "aggregates", parent: name, max: 6)
            try self.validate(self.constituentsPerAggregate, name: "constituentsPerAggregate", parent: name, max: 200)
            try self.validate(self.constituentsPerAggregate, name: "constituentsPerAggregate", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case aggregates = "Aggregates"
            case constituentsPerAggregate = "ConstituentsPerAggregate"
        }
    }

    public struct CreateAndAttachS3AccessPointOpenZFSConfiguration: AWSEncodableShape {
        /// Specifies the file system user identity to use for authorizing file read and write requests that are made using this S3 access point.
        public let fileSystemIdentity: OpenZFSFileSystemIdentity?
        /// The ID of the FSx for OpenZFS volume to which you want the S3 access point attached.
        public let volumeId: String?

        @inlinable
        public init(fileSystemIdentity: OpenZFSFileSystemIdentity? = nil, volumeId: String? = nil) {
            self.fileSystemIdentity = fileSystemIdentity
            self.volumeId = volumeId
        }

        public func validate(name: String) throws {
            try self.fileSystemIdentity?.validate(name: "\(name).fileSystemIdentity")
            try self.validate(self.volumeId, name: "volumeId", parent: name, max: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, min: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystemIdentity = "FileSystemIdentity"
            case volumeId = "VolumeId"
        }
    }

    public struct CreateAndAttachS3AccessPointRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The name you want to assign to this S3 access point.
        public let name: String?
        /// Specifies the configuration to use when creating and attaching an S3 access point to an FSx for OpenZFS volume.
        public let openZFSConfiguration: CreateAndAttachS3AccessPointOpenZFSConfiguration?
        /// Specifies the virtual private cloud (VPC) configuration if you're creating an access point that is restricted to a VPC.  For more information, see Creating access points restricted to a virtual private cloud.
        public let s3AccessPoint: CreateAndAttachS3AccessPointS3Configuration?
        /// The type of S3 access point you want to create. Only OpenZFS is supported.
        public let type: S3AccessPointAttachmentType?

        @inlinable
        public init(clientRequestToken: String? = CreateAndAttachS3AccessPointRequest.idempotencyToken(), name: String? = nil, openZFSConfiguration: CreateAndAttachS3AccessPointOpenZFSConfiguration? = nil, s3AccessPoint: CreateAndAttachS3AccessPointS3Configuration? = nil, type: S3AccessPointAttachmentType? = nil) {
            self.clientRequestToken = clientRequestToken
            self.name = name
            self.openZFSConfiguration = openZFSConfiguration
            self.s3AccessPoint = s3AccessPoint
            self.type = type
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.name, name: "name", parent: name, max: 50)
            try self.validate(self.name, name: "name", parent: name, min: 3)
            try self.validate(self.name, name: "name", parent: name, pattern: "^(?=[a-z0-9])[a-z0-9-]{1,48}[a-z0-9]$")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.s3AccessPoint?.validate(name: "\(name).s3AccessPoint")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case name = "Name"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case s3AccessPoint = "S3AccessPoint"
            case type = "Type"
        }
    }

    public struct CreateAndAttachS3AccessPointResponse: AWSDecodableShape {
        /// Describes the configuration of the S3 access point created.
        public let s3AccessPointAttachment: S3AccessPointAttachment?

        @inlinable
        public init(s3AccessPointAttachment: S3AccessPointAttachment? = nil) {
            self.s3AccessPointAttachment = s3AccessPointAttachment
        }

        private enum CodingKeys: String, CodingKey {
            case s3AccessPointAttachment = "S3AccessPointAttachment"
        }
    }

    public struct CreateAndAttachS3AccessPointS3Configuration: AWSEncodableShape {
        /// Specifies an access policy to associate with the S3 access point configuration. For more information, see  Configuring IAM policies for using access points  in the Amazon Simple Storage Service User Guide.
        public let policy: String?
        /// If included, Amazon S3 restricts access to this S3 access point to requests made from the specified virtual private cloud (VPC).
        public let vpcConfiguration: S3AccessPointVpcConfiguration?

        @inlinable
        public init(policy: String? = nil, vpcConfiguration: S3AccessPointVpcConfiguration? = nil) {
            self.policy = policy
            self.vpcConfiguration = vpcConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.policy, name: "policy", parent: name, max: 200000)
            try self.validate(self.policy, name: "policy", parent: name, min: 1)
            try self.vpcConfiguration?.validate(name: "\(name).vpcConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case policy = "Policy"
            case vpcConfiguration = "VpcConfiguration"
        }
    }

    public struct CreateBackupRequest: AWSEncodableShape {
        /// (Optional) A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
        public let clientRequestToken: String?
        /// The ID of the file system to back up.
        public let fileSystemId: String?
        /// (Optional) The tags to apply to the backup at backup creation. The key value of the Name tag appears in the console as the backup name. If you have set CopyTagsToBackups to true, and you specify one or more tags using the CreateBackup operation, no existing file system tags are copied from the file system to the backup.
        public let tags: [Tag]?
        /// (Optional) The ID of the FSx for ONTAP volume to back up.
        public let volumeId: String?

        @inlinable
        public init(clientRequestToken: String? = CreateBackupRequest.idempotencyToken(), fileSystemId: String? = nil, tags: [Tag]? = nil, volumeId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
            self.tags = tags
            self.volumeId = volumeId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
            try self.validate(self.volumeId, name: "volumeId", parent: name, max: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, min: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
            case tags = "Tags"
            case volumeId = "VolumeId"
        }
    }

    public struct CreateBackupResponse: AWSDecodableShape {
        /// A description of the backup.
        public let backup: Backup?

        @inlinable
        public init(backup: Backup? = nil) {
            self.backup = backup
        }

        private enum CodingKeys: String, CodingKey {
            case backup = "Backup"
        }
    }

    public struct CreateDataRepositoryAssociationRequest: AWSEncodableShape {
        /// Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Default is false.
        public let batchImportMetaDataOnCreate: Bool?
        public let clientRequestToken: String?
        /// The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional). This path specifies where in the S3 data repository files will be imported from or exported to.
        public let dataRepositoryPath: String?
        public let fileSystemId: String?
        /// A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.  If you specify only a forward slash (/) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.
        public let fileSystemPath: String?
        /// For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
        public let importedFileChunkSize: Int?
        /// The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
        public let s3: S3DataRepositoryConfiguration?
        public let tags: [Tag]?

        @inlinable
        public init(batchImportMetaDataOnCreate: Bool? = nil, clientRequestToken: String? = CreateDataRepositoryAssociationRequest.idempotencyToken(), dataRepositoryPath: String? = nil, fileSystemId: String? = nil, fileSystemPath: String? = nil, importedFileChunkSize: Int? = nil, s3: S3DataRepositoryConfiguration? = nil, tags: [Tag]? = nil) {
            self.batchImportMetaDataOnCreate = batchImportMetaDataOnCreate
            self.clientRequestToken = clientRequestToken
            self.dataRepositoryPath = dataRepositoryPath
            self.fileSystemId = fileSystemId
            self.fileSystemPath = fileSystemPath
            self.importedFileChunkSize = importedFileChunkSize
            self.s3 = s3
            self.tags = tags
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.dataRepositoryPath, name: "dataRepositoryPath", parent: name, max: 4357)
            try self.validate(self.dataRepositoryPath, name: "dataRepositoryPath", parent: name, min: 3)
            try self.validate(self.dataRepositoryPath, name: "dataRepositoryPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            try self.validate(self.fileSystemPath, name: "fileSystemPath", parent: name, max: 4096)
            try self.validate(self.fileSystemPath, name: "fileSystemPath", parent: name, min: 1)
            try self.validate(self.fileSystemPath, name: "fileSystemPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,4096}$")
            try self.validate(self.importedFileChunkSize, name: "importedFileChunkSize", parent: name, max: 512000)
            try self.validate(self.importedFileChunkSize, name: "importedFileChunkSize", parent: name, min: 1)
            try self.s3?.validate(name: "\(name).s3")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case batchImportMetaDataOnCreate = "BatchImportMetaDataOnCreate"
            case clientRequestToken = "ClientRequestToken"
            case dataRepositoryPath = "DataRepositoryPath"
            case fileSystemId = "FileSystemId"
            case fileSystemPath = "FileSystemPath"
            case importedFileChunkSize = "ImportedFileChunkSize"
            case s3 = "S3"
            case tags = "Tags"
        }
    }

    public struct CreateDataRepositoryAssociationResponse: AWSDecodableShape {
        /// The response object returned after the data repository association is created.
        public let association: DataRepositoryAssociation?

        @inlinable
        public init(association: DataRepositoryAssociation? = nil) {
            self.association = association
        }

        private enum CodingKeys: String, CodingKey {
            case association = "Association"
        }
    }

    public struct CreateDataRepositoryTaskRequest: AWSEncodableShape {
        /// Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.
        public let capacityToRelease: Int64?
        public let clientRequestToken: String?
        public let fileSystemId: String?
        /// A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all exported files that meet the last accessed time criteria (for release tasks).   For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1.   For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix (where prefix is optional).   For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release exported files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all exported files in the file system, specify a forward slash (/) as the path.  A file must also meet the last accessed time criteria specified in  for the file to be released.
        public let paths: [String]?
        /// The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.
        public let releaseConfiguration: ReleaseConfiguration?
        /// Defines whether or not Amazon FSx provides a CompletionReport once the task has completed.  A CompletionReport provides a detailed  report on the files that Amazon FSx processed that meet the criteria specified by the  Scope parameter. For more information, see  Working with Task Completion Reports.
        public let report: CompletionReport?
        public let tags: [Tag]?
        /// Specifies the type of data repository task to create.    EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.    IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.    RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that have been exported to a linked S3 bucket and that meet your specified release criteria.    AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.
        public let type: DataRepositoryTaskType?

        @inlinable
        public init(capacityToRelease: Int64? = nil, clientRequestToken: String? = CreateDataRepositoryTaskRequest.idempotencyToken(), fileSystemId: String? = nil, paths: [String]? = nil, releaseConfiguration: ReleaseConfiguration? = nil, report: CompletionReport? = nil, tags: [Tag]? = nil, type: DataRepositoryTaskType? = nil) {
            self.capacityToRelease = capacityToRelease
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
            self.paths = paths
            self.releaseConfiguration = releaseConfiguration
            self.report = report
            self.tags = tags
            self.type = type
        }

        public func validate(name: String) throws {
            try self.validate(self.capacityToRelease, name: "capacityToRelease", parent: name, max: 2147483647)
            try self.validate(self.capacityToRelease, name: "capacityToRelease", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            try self.paths?.forEach {
                try validate($0, name: "paths[]", parent: name, max: 4096)
                try validate($0, name: "paths[]", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{0,4096}$")
            }
            try self.validate(self.paths, name: "paths", parent: name, max: 100)
            try self.releaseConfiguration?.validate(name: "\(name).releaseConfiguration")
            try self.report?.validate(name: "\(name).report")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case capacityToRelease = "CapacityToRelease"
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
            case paths = "Paths"
            case releaseConfiguration = "ReleaseConfiguration"
            case report = "Report"
            case tags = "Tags"
            case type = "Type"
        }
    }

    public struct CreateDataRepositoryTaskResponse: AWSDecodableShape {
        /// The description of the data repository task that you just created.
        public let dataRepositoryTask: DataRepositoryTask?

        @inlinable
        public init(dataRepositoryTask: DataRepositoryTask? = nil) {
            self.dataRepositoryTask = dataRepositoryTask
        }

        private enum CodingKeys: String, CodingKey {
            case dataRepositoryTask = "DataRepositoryTask"
        }
    }

    public struct CreateFileCacheLustreConfiguration: AWSEncodableShape {
        /// Specifies the cache deployment type, which must be CACHE_1.
        public let deploymentType: FileCacheLustreDeploymentType?
        /// The configuration for a Lustre MDT (Metadata Target) storage volume.
        public let metadataConfiguration: FileCacheLustreMetadataConfiguration?
        /// Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is 1000.
        public let perUnitStorageThroughput: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(deploymentType: FileCacheLustreDeploymentType? = nil, metadataConfiguration: FileCacheLustreMetadataConfiguration? = nil, perUnitStorageThroughput: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.deploymentType = deploymentType
            self.metadataConfiguration = metadataConfiguration
            self.perUnitStorageThroughput = perUnitStorageThroughput
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.metadataConfiguration?.validate(name: "\(name).metadataConfiguration")
            try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, max: 1000)
            try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, min: 12)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case deploymentType = "DeploymentType"
            case metadataConfiguration = "MetadataConfiguration"
            case perUnitStorageThroughput = "PerUnitStorageThroughput"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct CreateFileCacheRequest: AWSEncodableShape {
        /// An idempotency token for resource creation, in a string of up to 63 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. By using the idempotent operation, you can retry a CreateFileCache operation without the risk of creating an extra cache. This approach can be useful when an initial call fails in a way that makes it unclear whether a cache was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a cache, the client receives success as long as the parameters are the same.
        public let clientRequestToken: String?
        /// A boolean flag indicating whether tags for the cache should be copied to data repository associations. This value defaults to false.
        public let copyTagsToDataRepositoryAssociations: Bool?
        /// A list of up to 8 configurations for data repository associations (DRAs) to be created during the cache creation. The DRAs link the cache to either an Amazon S3 data repository or a Network File System (NFS) data repository that supports the NFSv3 protocol. The DRA configurations must meet the following requirements:   All configurations on the list must be of the same data repository type, either all S3 or all NFS. A cache can't link to different data repository types at the same time.   An NFS DRA must link to an NFS file system that supports the NFSv3 protocol.   DRA automatic import and automatic export is not supported.
        public let dataRepositoryAssociations: [FileCacheDataRepositoryAssociation]?
        /// The type of cache that you're creating, which must be LUSTRE.
        public let fileCacheType: FileCacheType?
        /// Sets the Lustre version for the cache that you're creating, which must be 2.12.
        public let fileCacheTypeVersion: String?
        /// Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.
        public let kmsKeyId: String?
        /// The configuration for the Amazon File Cache resource being created.
        public let lustreConfiguration: CreateFileCacheLustreConfiguration?
        /// A list of IDs specifying the security groups to apply to all network interfaces created for Amazon File Cache access. This list isn't returned in later requests to describe the cache.
        public let securityGroupIds: [String]?
        /// The storage capacity of the cache in gibibytes (GiB). Valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.
        public let storageCapacity: Int?
        public let subnetIds: [String]?
        public let tags: [Tag]?

        @inlinable
        public init(clientRequestToken: String? = CreateFileCacheRequest.idempotencyToken(), copyTagsToDataRepositoryAssociations: Bool? = nil, dataRepositoryAssociations: [FileCacheDataRepositoryAssociation]? = nil, fileCacheType: FileCacheType? = nil, fileCacheTypeVersion: String? = nil, kmsKeyId: String? = nil, lustreConfiguration: CreateFileCacheLustreConfiguration? = nil, securityGroupIds: [String]? = nil, storageCapacity: Int? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil) {
            self.clientRequestToken = clientRequestToken
            self.copyTagsToDataRepositoryAssociations = copyTagsToDataRepositoryAssociations
            self.dataRepositoryAssociations = dataRepositoryAssociations
            self.fileCacheType = fileCacheType
            self.fileCacheTypeVersion = fileCacheTypeVersion
            self.kmsKeyId = kmsKeyId
            self.lustreConfiguration = lustreConfiguration
            self.securityGroupIds = securityGroupIds
            self.storageCapacity = storageCapacity
            self.subnetIds = subnetIds
            self.tags = tags
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.dataRepositoryAssociations?.forEach {
                try $0.validate(name: "\(name).dataRepositoryAssociations[]")
            }
            try self.validate(self.dataRepositoryAssociations, name: "dataRepositoryAssociations", parent: name, max: 8)
            try self.validate(self.fileCacheTypeVersion, name: "fileCacheTypeVersion", parent: name, max: 20)
            try self.validate(self.fileCacheTypeVersion, name: "fileCacheTypeVersion", parent: name, min: 1)
            try self.validate(self.fileCacheTypeVersion, name: "fileCacheTypeVersion", parent: name, pattern: "^[0-9](.[0-9]*)*$")
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^.{1,2048}$")
            try self.lustreConfiguration?.validate(name: "\(name).lustreConfiguration")
            try self.securityGroupIds?.forEach {
                try validate($0, name: "securityGroupIds[]", parent: name, max: 20)
                try validate($0, name: "securityGroupIds[]", parent: name, min: 11)
                try validate($0, name: "securityGroupIds[]", parent: name, pattern: "^(sg-[0-9a-f]{8,})$")
            }
            try self.validate(self.securityGroupIds, name: "securityGroupIds", parent: name, max: 50)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, max: 2147483647)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, min: 0)
            try self.subnetIds?.forEach {
                try validate($0, name: "subnetIds[]", parent: name, max: 24)
                try validate($0, name: "subnetIds[]", parent: name, min: 15)
                try validate($0, name: "subnetIds[]", parent: name, pattern: "^(subnet-[0-9a-f]{8,})$")
            }
            try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 50)
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case copyTagsToDataRepositoryAssociations = "CopyTagsToDataRepositoryAssociations"
            case dataRepositoryAssociations = "DataRepositoryAssociations"
            case fileCacheType = "FileCacheType"
            case fileCacheTypeVersion = "FileCacheTypeVersion"
            case kmsKeyId = "KmsKeyId"
            case lustreConfiguration = "LustreConfiguration"
            case securityGroupIds = "SecurityGroupIds"
            case storageCapacity = "StorageCapacity"
            case subnetIds = "SubnetIds"
            case tags = "Tags"
        }
    }

    public struct CreateFileCacheResponse: AWSDecodableShape {
        /// A description of the cache that was created.
        public let fileCache: FileCacheCreating?

        @inlinable
        public init(fileCache: FileCacheCreating? = nil) {
            self.fileCache = fileCache
        }

        private enum CodingKeys: String, CodingKey {
            case fileCache = "FileCache"
        }
    }

    public struct CreateFileSystemFromBackupRequest: AWSEncodableShape {
        public let backupId: String?
        /// A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
        public let clientRequestToken: String?
        /// Sets the version for the Amazon FSx for Lustre file system that you're creating from a backup. Valid values are 2.10, 2.12, and 2.15. You can enter a Lustre version that is newer than the backup's FileSystemTypeVersion setting. If you don't enter a newer Lustre version, it defaults to the backup's setting.
        public let fileSystemTypeVersion: String?
        public let kmsKeyId: String?
        public let lustreConfiguration: CreateFileSystemLustreConfiguration?
        /// Sets the network type for the Amazon FSx for OpenZFS file system that you're creating from a backup.
        public let networkType: NetworkType?
        /// The OpenZFS configuration for the file system that's being created.
        public let openZFSConfiguration: CreateFileSystemOpenZFSConfiguration?
        /// A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups apply to all network interfaces. This value isn't returned in later DescribeFileSystem requests.
        public let securityGroupIds: [String]?
        /// Sets the storage capacity of the OpenZFS file system that you're creating from a backup, in gibibytes (GiB). Valid values are from 64 GiB up to 524,288 GiB (512 TiB). However, the value that you specify must be equal to or greater than the backup's storage capacity value. If you don't use the StorageCapacity parameter, the default is the backup's StorageCapacity value. If used to create a file system other than OpenZFS, you must provide a value that matches the backup's StorageCapacity value. If you provide any other value, Amazon FSx responds with an HTTP status code 400 Bad Request.
        public let storageCapacity: Int?
        /// Sets the storage type for the Windows, OpenZFS, or Lustre file system that you're creating from a backup. Valid values are SSD, HDD, and INTELLIGENT_TIERING.   Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS deployment types.   Set to HDD to use hard disk drive storage.  HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 FSx for Windows File Server file system deployment types.   Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type and for Lustre file systems with the Persistent_2 deployment type.   The default value is SSD.   HDD and SSD storage types have different minimum storage capacity requirements.  A restored file system's storage capacity is tied to the file system that was backed up.  You can create a file system that uses HDD storage from a backup of a file system that  used SSD storage if the original SSD file system had a storage capacity of at least 2000 GiB.
        public let storageType: StorageType?
        /// Specifies the IDs of the subnets that the file system will be accessible from. For Windows MULTI_AZ_1  file system deployment types, provide exactly two subnet IDs, one for the preferred file server  and one for the standby file server. You specify one of these subnets as the preferred subnet  using the WindowsConfiguration > PreferredSubnetID property. Windows SINGLE_AZ_1 and SINGLE_AZ_2 file system deployment types, Lustre file systems, and OpenZFS file systems provide exactly one subnet ID. The file server is launched in that subnet's Availability Zone.
        public let subnetIds: [String]?
        /// The tags to be applied to the file system at file system creation. The key value of the Name tag appears in the console as the file system name.
        public let tags: [Tag]?
        /// The configuration for this Microsoft Windows file system.
        public let windowsConfiguration: CreateFileSystemWindowsConfiguration?

        @inlinable
        public init(backupId: String? = nil, clientRequestToken: String? = CreateFileSystemFromBackupRequest.idempotencyToken(), fileSystemTypeVersion: String? = nil, kmsKeyId: String? = nil, lustreConfiguration: CreateFileSystemLustreConfiguration? = nil, networkType: NetworkType? = nil, openZFSConfiguration: CreateFileSystemOpenZFSConfiguration? = nil, securityGroupIds: [String]? = nil, storageCapacity: Int? = nil, storageType: StorageType? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, windowsConfiguration: CreateFileSystemWindowsConfiguration? = nil) {
            self.backupId = backupId
            self.clientRequestToken = clientRequestToken
            self.fileSystemTypeVersion = fileSystemTypeVersion
            self.kmsKeyId = kmsKeyId
            self.lustreConfiguration = lustreConfiguration
            self.networkType = networkType
            self.openZFSConfiguration = openZFSConfiguration
            self.securityGroupIds = securityGroupIds
            self.storageCapacity = storageCapacity
            self.storageType = storageType
            self.subnetIds = subnetIds
            self.tags = tags
            self.windowsConfiguration = windowsConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.backupId, name: "backupId", parent: name, max: 128)
            try self.validate(self.backupId, name: "backupId", parent: name, min: 12)
            try self.validate(self.backupId, name: "backupId", parent: name, pattern: "^(backup-[0-9a-f]{8,})$")
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, max: 20)
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, min: 1)
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, pattern: "^[0-9](.[0-9]*)*$")
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^.{1,2048}$")
            try self.lustreConfiguration?.validate(name: "\(name).lustreConfiguration")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.securityGroupIds?.forEach {
                try validate($0, name: "securityGroupIds[]", parent: name, max: 20)
                try validate($0, name: "securityGroupIds[]", parent: name, min: 11)
                try validate($0, name: "securityGroupIds[]", parent: name, pattern: "^(sg-[0-9a-f]{8,})$")
            }
            try self.validate(self.securityGroupIds, name: "securityGroupIds", parent: name, max: 50)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, max: 2147483647)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, min: 0)
            try self.subnetIds?.forEach {
                try validate($0, name: "subnetIds[]", parent: name, max: 24)
                try validate($0, name: "subnetIds[]", parent: name, min: 15)
                try validate($0, name: "subnetIds[]", parent: name, pattern: "^(subnet-[0-9a-f]{8,})$")
            }
            try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 50)
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
            try self.windowsConfiguration?.validate(name: "\(name).windowsConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case backupId = "BackupId"
            case clientRequestToken = "ClientRequestToken"
            case fileSystemTypeVersion = "FileSystemTypeVersion"
            case kmsKeyId = "KmsKeyId"
            case lustreConfiguration = "LustreConfiguration"
            case networkType = "NetworkType"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case securityGroupIds = "SecurityGroupIds"
            case storageCapacity = "StorageCapacity"
            case storageType = "StorageType"
            case subnetIds = "SubnetIds"
            case tags = "Tags"
            case windowsConfiguration = "WindowsConfiguration"
        }
    }

    public struct CreateFileSystemFromBackupResponse: AWSDecodableShape {
        /// A description of the file system.
        public let fileSystem: FileSystem?

        @inlinable
        public init(fileSystem: FileSystem? = nil) {
            self.fileSystem = fileSystem
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystem = "FileSystem"
        }
    }

    public struct CreateFileSystemLustreConfiguration: AWSEncodableShape {
        ///  (Optional) When you create your file system, your existing S3 objects appear as file and directory listings.  Use this parameter to choose how Amazon FSx keeps your file and directory listings up to date as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:    NONE - (Default) AutoImport is off. Amazon FSx only updates  file and directory listings from the linked S3 bucket  when the file system is created. FSx does not update file and directory  listings for any new or changed objects after choosing this option.    NEW - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that  do not currently exist in the FSx file system.     NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports  file and directory listings of any new objects added to the S3 bucket and any  existing objects that are changed in the S3 bucket after you choose this option.    NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any  existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.   For more information, see  Automatically import updates from your S3 bucket.  This parameter is not supported for file systems with a data repository association.
        public let autoImportPolicy: AutoImportPolicyType?
        /// The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.
        public let automaticBackupRetentionDays: Int?
        /// (Optional) Not available for use with file systems that are linked to a data repository. A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. If CopyTagsToBackups is set to true, all file system tags are copied to all automatic and user-initiated backups when the user doesn't specify any backup-specific tags. If CopyTagsToBackups is set to true and you specify one or more backup tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value. (Default = false) For more information, see  Working with backups in the Amazon FSx for Lustre User Guide.
        public let copyTagsToBackups: Bool?
        public let dailyAutomaticBackupStartTime: String?
        /// Sets the data compression configuration for the file system. DataCompressionType can have the following values:    NONE - (Default) Data compression is turned off when the file system is created.    LZ4 - Data compression is turned on with the LZ4 algorithm.   For more information, see Lustre data compression  in the Amazon FSx for Lustre User Guide.
        public let dataCompressionType: DataCompressionType?
        /// Specifies the optional provisioned SSD read cache on FSx for Lustre file systems that use the Intelligent-Tiering storage class. Required when StorageType is set to INTELLIGENT_TIERING.
        public let dataReadCacheConfiguration: LustreReadCacheConfiguration?
        /// (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment  types when you need temporary storage and shorter-term processing of data.  The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst  throughput capacity than SCRATCH_1. Choose PERSISTENT_1 for longer-term storage and for throughput-focused  workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all  Amazon Web Services Regions in which FSx for Lustre is available. Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads  that require the highest levels of IOPS/throughput. PERSISTENT_2 supports  the SSD and Intelligent-Tiering storage classes. You can optionally specify a metadata configuration mode for PERSISTENT_2 which supports increasing metadata performance. PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see  Deployment and storage class options for FSx for Lustre file systems in the Amazon FSx for Lustre User Guide.  If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.  Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide. (Default = SCRATCH_1)
        public let deploymentType: LustreDeploymentType?
        /// The type of drive cache used by PERSISTENT_1 file systems that are provisioned with HDD storage devices. This parameter is required when storage type is HDD. Set this property to READ to improve the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file system. This parameter is required when StorageType is set to HDD.
        public let driveCacheType: DriveCacheType?
        /// (Optional) Specifies whether Elastic Fabric Adapter (EFA) and GPUDirect Storage (GDS) support is enabled for the Amazon FSx for Lustre file system. (Default = false)
        public let efaEnabled: Bool?
        /// (Optional) Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which new and changed data is to be exported from your Amazon FSx for Lustre file system. If an ExportPath value is not provided, Amazon FSx sets a default export path, s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in UTC format, for example s3://import-bucket/FSxLustre20181105T222312Z. The Amazon S3 export bucket must be the same as the import bucket specified by ImportPath. If you specify only a bucket name, such as s3://import-bucket, you get a 1:1 mapping of file system objects to S3 bucket objects. This mapping means that the input data in S3 is overwritten on export. If you provide a custom prefix in the export path, such as s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file  system to that export prefix in the Amazon S3 bucket.  This parameter is not supported for file systems with a data repository association.
        public let exportPath: String?
        /// (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3  objects have a maximum size of 5 TB.  This parameter is not supported for file systems with a data repository association.
        public let importedFileChunkSize: Int?
        /// (Optional) The path to the Amazon S3 bucket (including the optional prefix) that you're using as the data repository for your Amazon FSx for Lustre file system. The root of your FSx for Lustre file system will be mapped to the root of the Amazon S3 bucket you select. An example is s3://import-bucket/optional-prefix. If you specify a prefix after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.  This parameter is not supported for file systems with a data repository association.
        public let importPath: String?
        /// The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.
        public let logConfiguration: LustreLogCreateConfiguration?
        /// The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a PERSISTENT_2 deployment type.
        public let metadataConfiguration: CreateFileSystemLustreMetadataConfiguration?
        /// Required with PERSISTENT_1 and PERSISTENT_2 deployment types using an SSD or HDD storage class, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying ﬁle system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB ﬁle system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of ﬁle system throughput. You pay for the amount of throughput that you provision.  Valid values:   For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.   For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.   For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.
        public let perUnitStorageThroughput: Int?
        /// The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.
        public let rootSquashConfiguration: LustreRootSquashConfiguration?
        /// Specifies the throughput of an FSx for Lustre file system using the Intelligent-Tiering storage class, measured in megabytes per second (MBps). Valid values are 4000 MBps or multiples of 4000 MBps. You pay for the amount of throughput that you provision.
        public let throughputCapacity: Int?
        /// (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(autoImportPolicy: AutoImportPolicyType? = nil, automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, dataReadCacheConfiguration: LustreReadCacheConfiguration? = nil, deploymentType: LustreDeploymentType? = nil, driveCacheType: DriveCacheType? = nil, efaEnabled: Bool? = nil, exportPath: String? = nil, importedFileChunkSize: Int? = nil, importPath: String? = nil, logConfiguration: LustreLogCreateConfiguration? = nil, metadataConfiguration: CreateFileSystemLustreMetadataConfiguration? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.autoImportPolicy = autoImportPolicy
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.copyTagsToBackups = copyTagsToBackups
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.dataCompressionType = dataCompressionType
            self.dataReadCacheConfiguration = dataReadCacheConfiguration
            self.deploymentType = deploymentType
            self.driveCacheType = driveCacheType
            self.efaEnabled = efaEnabled
            self.exportPath = exportPath
            self.importedFileChunkSize = importedFileChunkSize
            self.importPath = importPath
            self.logConfiguration = logConfiguration
            self.metadataConfiguration = metadataConfiguration
            self.perUnitStorageThroughput = perUnitStorageThroughput
            self.rootSquashConfiguration = rootSquashConfiguration
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.dataReadCacheConfiguration?.validate(name: "\(name).dataReadCacheConfiguration")
            try self.validate(self.exportPath, name: "exportPath", parent: name, max: 4357)
            try self.validate(self.exportPath, name: "exportPath", parent: name, min: 3)
            try self.validate(self.exportPath, name: "exportPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$")
            try self.validate(self.importedFileChunkSize, name: "importedFileChunkSize", parent: name, max: 512000)
            try self.validate(self.importedFileChunkSize, name: "importedFileChunkSize", parent: name, min: 1)
            try self.validate(self.importPath, name: "importPath", parent: name, max: 4357)
            try self.validate(self.importPath, name: "importPath", parent: name, min: 3)
            try self.validate(self.importPath, name: "importPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$")
            try self.logConfiguration?.validate(name: "\(name).logConfiguration")
            try self.metadataConfiguration?.validate(name: "\(name).metadataConfiguration")
            try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, max: 1000)
            try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, min: 12)
            try self.rootSquashConfiguration?.validate(name: "\(name).rootSquashConfiguration")
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 2000000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 4000)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case autoImportPolicy = "AutoImportPolicy"
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case copyTagsToBackups = "CopyTagsToBackups"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case dataCompressionType = "DataCompressionType"
            case dataReadCacheConfiguration = "DataReadCacheConfiguration"
            case deploymentType = "DeploymentType"
            case driveCacheType = "DriveCacheType"
            case efaEnabled = "EfaEnabled"
            case exportPath = "ExportPath"
            case importedFileChunkSize = "ImportedFileChunkSize"
            case importPath = "ImportPath"
            case logConfiguration = "LogConfiguration"
            case metadataConfiguration = "MetadataConfiguration"
            case perUnitStorageThroughput = "PerUnitStorageThroughput"
            case rootSquashConfiguration = "RootSquashConfiguration"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct CreateFileSystemLustreMetadataConfiguration: AWSEncodableShape {
        /// (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for the file system. This parameter sets the maximum rate of metadata disk IOPS supported by the file system.   For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.   For Intelligent-Tiering file systems, valid values are 6000 and 12000.     Iops doesn’t have a default value. If you're using USER_PROVISIONED mode, you can choose to specify a valid value. If you're using AUTOMATIC mode, you cannot specify a value because FSx for Lustre automatically sets the value based on your file system storage capacity.
        public let iops: Int?
        /// The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.   In AUTOMATIC mode (supported only on SSD file systems), FSx for Lustre automatically provisions and scales the number of Metadata IOPS for your file system based on your file system storage capacity.   In USER_PROVISIONED mode, you specify the number of Metadata IOPS to provision for your file system.
        public let mode: MetadataConfigurationMode?

        @inlinable
        public init(iops: Int? = nil, mode: MetadataConfigurationMode? = nil) {
            self.iops = iops
            self.mode = mode
        }

        public func validate(name: String) throws {
            try self.validate(self.iops, name: "iops", parent: name, max: 192000)
            try self.validate(self.iops, name: "iops", parent: name, min: 1500)
        }

        private enum CodingKeys: String, CodingKey {
            case iops = "Iops"
            case mode = "Mode"
        }
    }

    public struct CreateFileSystemOntapConfiguration: AWSEncodableShape {
        public let automaticBackupRetentionDays: Int?
        public let dailyAutomaticBackupStartTime: String?
        /// Specifies the FSx for ONTAP file system deployment type to use in creating the file system.      MULTI_AZ_1 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.    MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate  temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.    SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.    SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.  This is a second-generation FSx for ONTAP file system.   For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.
        public let deploymentType: OntapDeploymentType?
        /// The SSD IOPS configuration for the FSx for ONTAP file system.
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// (Multi-AZ only) Specifies the IPv4 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx  API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the  Amazon FSx  console, Amazon FSx  chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpAddressRange: String?
        /// (Multi-AZ only) Specifies the IPv6 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /118 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpv6AddressRange: String?
        /// The ONTAP administrative password for the fsxadmin user with which you administer your file system using the NetApp ONTAP CLI and REST API.
        public let fsxAdminPassword: String?
        /// Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1.  The value of this property affects the values of StorageCapacity,  Iops, and ThroughputCapacity. For more information, see  High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support  (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see  Using block storage protocols.  Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The value of HAPairs is less than 1 or greater than 12.   The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2.
        public let haPairs: Int?
        /// Required when DeploymentType is set to MULTI_AZ_1 or MULTI_AZ_2. This specifies the subnet in which you want the preferred file server to be located.
        public let preferredSubnetId: String?
        /// (Multi-AZ only) Specifies the route tables in which Amazon FSx  creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx  selects your VPC's default route table.  Amazon FSx manages these route tables for Multi-AZ file systems using tag-based authentication.  These route tables are tagged with Key: AmazonFSx; Value: ManagedByAmazonFSx.  When creating FSx for ONTAP Multi-AZ file systems using CloudFormation we recommend that you add the  Key: AmazonFSx; Value: ManagedByAmazonFSx tag manually.
        public let routeTableIds: [String]?
        /// Sets the throughput capacity for the file system that you're creating in megabytes per second (MBps). For more information, see  Managing throughput capacity  in the FSx for ONTAP User Guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value.   The value of ThroughputCapacity when divided by the value of HAPairs is outside of the valid range for ThroughputCapacity.
        public let throughputCapacity: Int?
        /// Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.  You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity when creating a file system, but not both. This field and ThroughputCapacity are the same for file systems powered by one HA pair.   For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.   For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.   For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.   Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.   The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).   The value of ThroughputCapacityPerHAPair is not a valid value.
        public let throughputCapacityPerHAPair: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, deploymentType: OntapDeploymentType? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, endpointIpAddressRange: String? = nil, endpointIpv6AddressRange: String? = nil, fsxAdminPassword: String? = nil, haPairs: Int? = nil, preferredSubnetId: String? = nil, routeTableIds: [String]? = nil, throughputCapacity: Int? = nil, throughputCapacityPerHAPair: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.deploymentType = deploymentType
            self.diskIopsConfiguration = diskIopsConfiguration
            self.endpointIpAddressRange = endpointIpAddressRange
            self.endpointIpv6AddressRange = endpointIpv6AddressRange
            self.fsxAdminPassword = fsxAdminPassword
            self.haPairs = haPairs
            self.preferredSubnetId = preferredSubnetId
            self.routeTableIds = routeTableIds
            self.throughputCapacity = throughputCapacity
            self.throughputCapacityPerHAPair = throughputCapacityPerHAPair
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.diskIopsConfiguration?.validate(name: "\(name).diskIopsConfiguration")
            try self.validate(self.endpointIpAddressRange, name: "endpointIpAddressRange", parent: name, max: 17)
            try self.validate(self.endpointIpAddressRange, name: "endpointIpAddressRange", parent: name, min: 9)
            try self.validate(self.endpointIpAddressRange, name: "endpointIpAddressRange", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{9,17}$")
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, max: 43)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, min: 4)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,43}$")
            try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, max: 50)
            try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, min: 8)
            try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{8,50}$")
            try self.validate(self.haPairs, name: "haPairs", parent: name, max: 12)
            try self.validate(self.haPairs, name: "haPairs", parent: name, min: 1)
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, max: 24)
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, min: 15)
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, pattern: "^(subnet-[0-9a-f]{8,})$")
            try self.routeTableIds?.forEach {
                try validate($0, name: "routeTableIds[]", parent: name, max: 21)
                try validate($0, name: "routeTableIds[]", parent: name, min: 12)
                try validate($0, name: "routeTableIds[]", parent: name, pattern: "^(rtb-[0-9a-f]{8,})$")
            }
            try self.validate(self.routeTableIds, name: "routeTableIds", parent: name, max: 50)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 100000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 8)
            try self.validate(self.throughputCapacityPerHAPair, name: "throughputCapacityPerHAPair", parent: name, max: 6144)
            try self.validate(self.throughputCapacityPerHAPair, name: "throughputCapacityPerHAPair", parent: name, min: 128)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case deploymentType = "DeploymentType"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case endpointIpAddressRange = "EndpointIpAddressRange"
            case endpointIpv6AddressRange = "EndpointIpv6AddressRange"
            case fsxAdminPassword = "FsxAdminPassword"
            case haPairs = "HAPairs"
            case preferredSubnetId = "PreferredSubnetId"
            case routeTableIds = "RouteTableIds"
            case throughputCapacity = "ThroughputCapacity"
            case throughputCapacityPerHAPair = "ThroughputCapacityPerHAPair"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct CreateFileSystemOpenZFSConfiguration: AWSEncodableShape {
        public let automaticBackupRetentionDays: Int?
        /// A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
        public let copyTagsToBackups: Bool?
        /// A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to false. If it's set to true, all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.
        public let copyTagsToVolumes: Bool?
        public let dailyAutomaticBackupStartTime: String?
        /// Specifies the file system deployment type. Valid values are the following:    MULTI_AZ_1- Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same Amazon Web Services Region.    SINGLE_AZ_HA_2- Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone.    SINGLE_AZ_HA_1- Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone.    SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone.    SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone.   For a list of which Amazon Web Services Regions each deployment type is available in, see Deployment type availability. For more information on the differences in performance between deployment types, see File system performance in the Amazon FSx for OpenZFS User Guide.
        public let deploymentType: OpenZFSDeploymentType?
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// (Multi-AZ only) Specifies the IPv4 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpAddressRange: String?
        /// (Multi-AZ only) Specifies the IPv6 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /118 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpv6AddressRange: String?
        /// Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.
        public let preferredSubnetId: String?
        ///  Specifies the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
        public let readCacheConfiguration: OpenZFSReadCacheConfiguration?
        /// The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume.
        public let rootVolumeConfiguration: OpenZFSCreateRootVolumeConfiguration?
        /// (Multi-AZ only) Specifies the route tables in which Amazon FSx  creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx  selects your VPC's default route table.
        public let routeTableIds: [String]?
        /// Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType that you choose, as follows:   For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.   For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.   You pay for additional throughput capacity that you provision.
        public let throughputCapacity: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, copyTagsToVolumes: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, deploymentType: OpenZFSDeploymentType? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, endpointIpAddressRange: String? = nil, endpointIpv6AddressRange: String? = nil, preferredSubnetId: String? = nil, readCacheConfiguration: OpenZFSReadCacheConfiguration? = nil, rootVolumeConfiguration: OpenZFSCreateRootVolumeConfiguration? = nil, routeTableIds: [String]? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.copyTagsToBackups = copyTagsToBackups
            self.copyTagsToVolumes = copyTagsToVolumes
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.deploymentType = deploymentType
            self.diskIopsConfiguration = diskIopsConfiguration
            self.endpointIpAddressRange = endpointIpAddressRange
            self.endpointIpv6AddressRange = endpointIpv6AddressRange
            self.preferredSubnetId = preferredSubnetId
            self.readCacheConfiguration = readCacheConfiguration
            self.rootVolumeConfiguration = rootVolumeConfiguration
            self.routeTableIds = routeTableIds
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.diskIopsConfiguration?.validate(name: "\(name).diskIopsConfiguration")
            try self.validate(self.endpointIpAddressRange, name: "endpointIpAddressRange", parent: name, max: 17)
            try self.validate(self.endpointIpAddressRange, name: "endpointIpAddressRange", parent: name, min: 9)
            try self.validate(self.endpointIpAddressRange, name: "endpointIpAddressRange", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{9,17}$")
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, max: 43)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, min: 4)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,43}$")
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, max: 24)
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, min: 15)
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, pattern: "^(subnet-[0-9a-f]{8,})$")
            try self.readCacheConfiguration?.validate(name: "\(name).readCacheConfiguration")
            try self.rootVolumeConfiguration?.validate(name: "\(name).rootVolumeConfiguration")
            try self.routeTableIds?.forEach {
                try validate($0, name: "routeTableIds[]", parent: name, max: 21)
                try validate($0, name: "routeTableIds[]", parent: name, min: 12)
                try validate($0, name: "routeTableIds[]", parent: name, pattern: "^(rtb-[0-9a-f]{8,})$")
            }
            try self.validate(self.routeTableIds, name: "routeTableIds", parent: name, max: 50)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 100000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 8)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case copyTagsToBackups = "CopyTagsToBackups"
            case copyTagsToVolumes = "CopyTagsToVolumes"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case deploymentType = "DeploymentType"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case endpointIpAddressRange = "EndpointIpAddressRange"
            case endpointIpv6AddressRange = "EndpointIpv6AddressRange"
            case preferredSubnetId = "PreferredSubnetId"
            case readCacheConfiguration = "ReadCacheConfiguration"
            case rootVolumeConfiguration = "RootVolumeConfiguration"
            case routeTableIds = "RouteTableIds"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct CreateFileSystemRequest: AWSEncodableShape {
        /// A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent creation. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
        public let clientRequestToken: String?
        /// The type of Amazon FSx file system to create. Valid values are WINDOWS, LUSTRE, ONTAP, and OPENZFS.
        public let fileSystemType: FileSystemType?
        /// For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10, 2.12, and 2.15:    2.10 is supported by the Scratch and Persistent_1 Lustre  deployment types.    2.12 is supported by all Lustre deployment types, except for PERSISTENT_2 with a metadata configuration mode.    2.15 is supported by all Lustre deployment types and is recommended for all new file systems.   Default value is 2.10, except for the following deployments:   Default value is 2.12 when DeploymentType is set to  PERSISTENT_2 without a metadata configuration mode.   Default value is 2.15 when DeploymentType is set to  PERSISTENT_2 with a metadata configuration mode.
        public let fileSystemTypeVersion: String?
        public let kmsKeyId: String?
        public let lustreConfiguration: CreateFileSystemLustreConfiguration?
        /// The network type of the Amazon FSx file system that you are creating. Valid values are IPV4 (which supports IPv4 only) and DUAL (for dual-stack mode, which supports both IPv4 and IPv6). The default is IPV4. Supported for FSx for OpenZFS, FSx for ONTAP, and FSx for Windows File Server file systems.
        public let networkType: NetworkType?
        public let ontapConfiguration: CreateFileSystemOntapConfiguration?
        /// The OpenZFS configuration for the file system that's being created.
        public let openZFSConfiguration: CreateFileSystemOpenZFSConfiguration?
        /// A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later requests to describe the file system.  You must specify a security group if you are creating a Multi-AZ  FSx for ONTAP file system in a VPC subnet that has been shared with you.
        public let securityGroupIds: [String]?
        /// Sets the storage capacity of the file system that you're creating, in gibibytes (GiB).  FSx for Lustre file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType and the Lustre DeploymentType, as follows:   For SCRATCH_2, PERSISTENT_2, and PERSISTENT_1 deployment types  using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.   For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for  12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.   For SCRATCH_1 deployment type, valid values are  1200 GiB, 2400 GiB, and increments of 3600 GiB.    FSx for ONTAP file systems - The amount of storage capacity  that you can configure depends on the value of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum is calculated as 524,288 * HAPairs.   FSx for OpenZFS file systems - The amount of storage capacity that  you can configure is from 64 GiB up to 524,288 GiB (512 TiB).  FSx for Windows File Server file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType as follows:   For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).   For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).
        public let storageCapacity: Int?
        /// Sets the storage class for the file system that you're creating. Valid values are SSD, HDD, and INTELLIGENT_TIERING.   Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.   Set to HDD to use hard disk drive storage, which is supported on  SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types.   Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type and for Lustre file systems with the Persistent_2 deployment type.   Default value is SSD. For more information, see  Storage type options in the FSx for Windows File Server User Guide, FSx for Lustre storage classes in the FSx for Lustre User Guide, and Working with Intelligent-Tiering in the Amazon FSx for OpenZFS User Guide.
        public let storageType: StorageType?
        /// Specifies the IDs of the subnets that the file system will be accessible from. For Windows and ONTAP MULTI_AZ_1 deployment types,provide exactly two subnet IDs, one for the preferred file server and one for the standby file server. You specify one of these subnets as the preferred subnet using the WindowsConfiguration > PreferredSubnetID or OntapConfiguration > PreferredSubnetID properties. For more information about Multi-AZ file system configuration, see  Availability and durability: Single-AZ and Multi-AZ file systems in the Amazon FSx for Windows User Guide and  Availability and durability in the Amazon FSx for ONTAP User Guide. For Windows SINGLE_AZ_1 and SINGLE_AZ_2 and all Lustre  deployment types, provide exactly one subnet ID. The file server is launched in that subnet's Availability Zone.
        public let subnetIds: [String]?
        /// The tags to apply to the file system that's being created. The key value of the Name tag appears in the console as the file system name.
        public let tags: [Tag]?
        /// The Microsoft Windows configuration for the file system that's being created.
        public let windowsConfiguration: CreateFileSystemWindowsConfiguration?

        @inlinable
        public init(clientRequestToken: String? = CreateFileSystemRequest.idempotencyToken(), fileSystemType: FileSystemType? = nil, fileSystemTypeVersion: String? = nil, kmsKeyId: String? = nil, lustreConfiguration: CreateFileSystemLustreConfiguration? = nil, networkType: NetworkType? = nil, ontapConfiguration: CreateFileSystemOntapConfiguration? = nil, openZFSConfiguration: CreateFileSystemOpenZFSConfiguration? = nil, securityGroupIds: [String]? = nil, storageCapacity: Int? = nil, storageType: StorageType? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, windowsConfiguration: CreateFileSystemWindowsConfiguration? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileSystemType = fileSystemType
            self.fileSystemTypeVersion = fileSystemTypeVersion
            self.kmsKeyId = kmsKeyId
            self.lustreConfiguration = lustreConfiguration
            self.networkType = networkType
            self.ontapConfiguration = ontapConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.securityGroupIds = securityGroupIds
            self.storageCapacity = storageCapacity
            self.storageType = storageType
            self.subnetIds = subnetIds
            self.tags = tags
            self.windowsConfiguration = windowsConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, max: 20)
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, min: 1)
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, pattern: "^[0-9](.[0-9]*)*$")
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, min: 1)
            try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^.{1,2048}$")
            try self.lustreConfiguration?.validate(name: "\(name).lustreConfiguration")
            try self.ontapConfiguration?.validate(name: "\(name).ontapConfiguration")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.securityGroupIds?.forEach {
                try validate($0, name: "securityGroupIds[]", parent: name, max: 20)
                try validate($0, name: "securityGroupIds[]", parent: name, min: 11)
                try validate($0, name: "securityGroupIds[]", parent: name, pattern: "^(sg-[0-9a-f]{8,})$")
            }
            try self.validate(self.securityGroupIds, name: "securityGroupIds", parent: name, max: 50)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, max: 2147483647)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, min: 0)
            try self.subnetIds?.forEach {
                try validate($0, name: "subnetIds[]", parent: name, max: 24)
                try validate($0, name: "subnetIds[]", parent: name, min: 15)
                try validate($0, name: "subnetIds[]", parent: name, pattern: "^(subnet-[0-9a-f]{8,})$")
            }
            try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 50)
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
            try self.windowsConfiguration?.validate(name: "\(name).windowsConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileSystemType = "FileSystemType"
            case fileSystemTypeVersion = "FileSystemTypeVersion"
            case kmsKeyId = "KmsKeyId"
            case lustreConfiguration = "LustreConfiguration"
            case networkType = "NetworkType"
            case ontapConfiguration = "OntapConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case securityGroupIds = "SecurityGroupIds"
            case storageCapacity = "StorageCapacity"
            case storageType = "StorageType"
            case subnetIds = "SubnetIds"
            case tags = "Tags"
            case windowsConfiguration = "WindowsConfiguration"
        }
    }

    public struct CreateFileSystemResponse: AWSDecodableShape {
        /// The configuration of the file system that was created.
        public let fileSystem: FileSystem?

        @inlinable
        public init(fileSystem: FileSystem? = nil) {
            self.fileSystem = fileSystem
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystem = "FileSystem"
        }
    }

    public struct CreateFileSystemWindowsConfiguration: AWSEncodableShape {
        /// The ID for an existing Amazon Web Services Managed Microsoft Active Directory (AD) instance that the file system should join when it's created.
        public let activeDirectoryId: String?
        /// An array of one or more DNS alias names that you want to associate with the Amazon FSx file system.  Aliases allow you to use existing DNS names to access the data in your Amazon FSx file system.  You can associate up to 50 aliases with a file system at any time.  You can associate additional DNS aliases after you create the file system using the AssociateFileSystemAliases operation.  You can remove DNS aliases from the file system after it is created using the DisassociateFileSystemAliases operation. You only need to specify the alias name in the request payload. For more information, see Managing DNS aliases and  Accessing data using DNS aliases. An alias name has to meet the following requirements:   Formatted as a fully-qualified domain name (FQDN), hostname.domain, for example, accounting.example.com.   Can contain alphanumeric characters, the underscore (_), and the hyphen (-).   Cannot start or end with a hyphen.   Can start with a numeric.   For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them:  as uppercase letters, lowercase letters, or the corresponding letters in escape codes.
        public let aliases: [String]?
        /// The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.
        public let auditLogConfiguration: WindowsAuditLogCreateConfiguration?
        /// The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 30.
        public let automaticBackupRetentionDays: Int?
        /// A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
        public let copyTagsToBackups: Bool?
        /// The preferred time to take daily automatic backups, formatted HH:MM in the UTC time zone.
        public let dailyAutomaticBackupStartTime: String?
        /// Specifies the file system deployment type, valid values are the following:    MULTI_AZ_1 - Deploys a high availability file system that is configured  for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. You  can only deploy a Multi-AZ file system in Amazon Web Services Regions that have a minimum of three Availability Zones. Also  supports HDD storage type    SINGLE_AZ_1 - (Default) Choose to deploy a file system that is configured for single AZ redundancy.    SINGLE_AZ_2 - The latest generation Single AZ file system.  Specifies a file system that is configured for single AZ redundancy and supports HDD storage type.   For more information, see   Availability and Durability: Single-AZ and Multi-AZ File Systems.
        public let deploymentType: WindowsDeploymentType?
        /// The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet  in which you want the preferred file server to be located. For in-Amazon Web Services applications, we recommend that you launch  your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ  data transfer costs and minimize latency.
        public let preferredSubnetId: String?
        public let selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfiguration?
        /// Sets the throughput capacity of an Amazon FSx file system, measured in megabytes per second (MB/s), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).
        public let throughputCapacity: Int?
        /// The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(activeDirectoryId: String? = nil, aliases: [String]? = nil, auditLogConfiguration: WindowsAuditLogCreateConfiguration? = nil, automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, deploymentType: WindowsDeploymentType? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, preferredSubnetId: String? = nil, selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfiguration? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.activeDirectoryId = activeDirectoryId
            self.aliases = aliases
            self.auditLogConfiguration = auditLogConfiguration
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.copyTagsToBackups = copyTagsToBackups
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.deploymentType = deploymentType
            self.diskIopsConfiguration = diskIopsConfiguration
            self.preferredSubnetId = preferredSubnetId
            self.selfManagedActiveDirectoryConfiguration = selfManagedActiveDirectoryConfiguration
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.validate(self.activeDirectoryId, name: "activeDirectoryId", parent: name, max: 12)
            try self.validate(self.activeDirectoryId, name: "activeDirectoryId", parent: name, min: 12)
            try self.validate(self.activeDirectoryId, name: "activeDirectoryId", parent: name, pattern: "^d-[0-9a-f]{10}$")
            try self.aliases?.forEach {
                try validate($0, name: "aliases[]", parent: name, max: 253)
                try validate($0, name: "aliases[]", parent: name, min: 4)
                try validate($0, name: "aliases[]", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,253}$")
            }
            try self.validate(self.aliases, name: "aliases", parent: name, max: 50)
            try self.auditLogConfiguration?.validate(name: "\(name).auditLogConfiguration")
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.diskIopsConfiguration?.validate(name: "\(name).diskIopsConfiguration")
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, max: 24)
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, min: 15)
            try self.validate(self.preferredSubnetId, name: "preferredSubnetId", parent: name, pattern: "^(subnet-[0-9a-f]{8,})$")
            try self.selfManagedActiveDirectoryConfiguration?.validate(name: "\(name).selfManagedActiveDirectoryConfiguration")
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 100000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 8)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case activeDirectoryId = "ActiveDirectoryId"
            case aliases = "Aliases"
            case auditLogConfiguration = "AuditLogConfiguration"
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case copyTagsToBackups = "CopyTagsToBackups"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case deploymentType = "DeploymentType"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case preferredSubnetId = "PreferredSubnetId"
            case selfManagedActiveDirectoryConfiguration = "SelfManagedActiveDirectoryConfiguration"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct CreateOntapVolumeConfiguration: AWSEncodableShape {
        /// Use to specify configuration options for a volume’s storage aggregate or aggregates.
        public let aggregateConfiguration: CreateAggregateConfiguration?
        /// A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.
        public let copyTagsToBackups: Bool?
        /// Specifies the location in the SVM's namespace where the volume is mounted. This parameter is required. The JunctionPath must have a leading forward slash, such as /vol3.
        public let junctionPath: String?
        /// Specifies the type of volume you are creating. Valid values are the following:    RW specifies a read/write volume. RW is the default.    DP specifies a data-protection volume. A DP volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.   For more information, see Volume types  in the Amazon FSx for NetApp ONTAP User Guide.
        public let ontapVolumeType: InputOntapVolumeType?
        /// Specifies the security style for the volume. If a volume's security style is not specified,  it is automatically set to the root volume's security style. The security style determines the type of permissions  that FSx for ONTAP uses to control data access. Specify one of the following values:    UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.     NTFS if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.    MIXED This is an advanced setting. For more information, see the topic  What the security styles and their effects are  in the NetApp Documentation Center.   For more information, see Volume security style in the  FSx for ONTAP User Guide.
        public let securityStyle: SecurityStyle?
        /// Specifies the configured size of the volume, in bytes.
        public let sizeInBytes: Int64?
        /// Use SizeInBytes instead. Specifies the size of the volume, in megabytes (MB), that you are creating.
        public let sizeInMegabytes: Int?
        /// Specifies the SnapLock configuration for an FSx for ONTAP volume.
        public let snaplockConfiguration: CreateSnaplockConfiguration?
        /// Specifies the snapshot policy for the volume. There are three built-in snapshot policies:    default: This is the default policy. A maximum of six hourly snapshots taken five minutes past  the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.    default-1weekly: This policy is the same as the default policy except  that it only retains one snapshot from the weekly schedule.    none: This policy does not take any snapshots. This policy can be assigned to volumes to  prevent automatic snapshots from being taken.   You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API. For more information, see Snapshot policies  in the Amazon FSx for NetApp ONTAP User Guide.
        public let snapshotPolicy: String?
        /// Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume, or set to false to disable them.  StorageEfficiencyEnabled is required when creating a RW volume (OntapVolumeType set to RW).
        public let storageEfficiencyEnabled: Bool?
        /// Specifies the ONTAP SVM in which to create the volume.
        public let storageVirtualMachineId: String?
        public let tieringPolicy: TieringPolicy?
        /// Use to specify the style of an ONTAP volume. FSx for ONTAP offers two styles of volumes that you can use for different purposes,  FlexVol and FlexGroup volumes. For more information, see  Volume styles in the Amazon FSx for NetApp ONTAP User Guide.
        public let volumeStyle: VolumeStyle?

        @inlinable
        public init(aggregateConfiguration: CreateAggregateConfiguration? = nil, copyTagsToBackups: Bool? = nil, junctionPath: String? = nil, ontapVolumeType: InputOntapVolumeType? = nil, securityStyle: SecurityStyle? = nil, sizeInBytes: Int64? = nil, snaplockConfiguration: CreateSnaplockConfiguration? = nil, snapshotPolicy: String? = nil, storageEfficiencyEnabled: Bool? = nil, storageVirtualMachineId: String? = nil, tieringPolicy: TieringPolicy? = nil, volumeStyle: VolumeStyle? = nil) {
            self.aggregateConfiguration = aggregateConfiguration
            self.copyTagsToBackups = copyTagsToBackups
            self.junctionPath = junctionPath
            self.ontapVolumeType = ontapVolumeType
            self.securityStyle = securityStyle
            self.sizeInBytes = sizeInBytes
            self.sizeInMegabytes = nil
            self.snaplockConfiguration = snaplockConfiguration
            self.snapshotPolicy = snapshotPolicy
            self.storageEfficiencyEnabled = storageEfficiencyEnabled
            self.storageVirtualMachineId = storageVirtualMachineId
            self.tieringPolicy = tieringPolicy
            self.volumeStyle = volumeStyle
        }

        @available(*, deprecated, message: "Members sizeInMegabytes have been deprecated")
        @inlinable
        public init(aggregateConfiguration: CreateAggregateConfiguration? = nil, copyTagsToBackups: Bool? = nil, junctionPath: String? = nil, ontapVolumeType: InputOntapVolumeType? = nil, securityStyle: SecurityStyle? = nil, sizeInBytes: Int64? = nil, sizeInMegabytes: Int? = nil, snaplockConfiguration: CreateSnaplockConfiguration? = nil, snapshotPolicy: String? = nil, storageEfficiencyEnabled: Bool? = nil, storageVirtualMachineId: String? = nil, tieringPolicy: TieringPolicy? = nil, volumeStyle: VolumeStyle? = nil) {
            self.aggregateConfiguration = aggregateConfiguration
            self.copyTagsToBackups = copyTagsToBackups
            self.junctionPath = junctionPath
            self.ontapVolumeType = ontapVolumeType
            self.securityStyle = securityStyle
            self.sizeInBytes = sizeInBytes
            self.sizeInMegabytes = sizeInMegabytes
            self.snaplockConfiguration = snaplockConfiguration
            self.snapshotPolicy = snapshotPolicy
            self.storageEfficiencyEnabled = storageEfficiencyEnabled
            self.storageVirtualMachineId = storageVirtualMachineId
            self.tieringPolicy = tieringPolicy
            self.volumeStyle = volumeStyle
        }

        public func validate(name: String) throws {
            try self.aggregateConfiguration?.validate(name: "\(name).aggregateConfiguration")
            try self.validate(self.junctionPath, name: "junctionPath", parent: name, max: 255)
            try self.validate(self.junctionPath, name: "junctionPath", parent: name, min: 1)
            try self.validate(self.junctionPath, name: "junctionPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$")
            try self.validate(self.sizeInBytes, name: "sizeInBytes", parent: name, max: 22517998000000000)
            try self.validate(self.sizeInBytes, name: "sizeInBytes", parent: name, min: 0)
            try self.validate(self.sizeInMegabytes, name: "sizeInMegabytes", parent: name, max: 2147483647)
            try self.validate(self.sizeInMegabytes, name: "sizeInMegabytes", parent: name, min: 0)
            try self.snaplockConfiguration?.validate(name: "\(name).snaplockConfiguration")
            try self.validate(self.snapshotPolicy, name: "snapshotPolicy", parent: name, max: 255)
            try self.validate(self.snapshotPolicy, name: "snapshotPolicy", parent: name, min: 1)
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, max: 21)
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, min: 21)
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, pattern: "^(svm-[0-9a-f]{17,})$")
            try self.tieringPolicy?.validate(name: "\(name).tieringPolicy")
        }

        private enum CodingKeys: String, CodingKey {
            case aggregateConfiguration = "AggregateConfiguration"
            case copyTagsToBackups = "CopyTagsToBackups"
            case junctionPath = "JunctionPath"
            case ontapVolumeType = "OntapVolumeType"
            case securityStyle = "SecurityStyle"
            case sizeInBytes = "SizeInBytes"
            case sizeInMegabytes = "SizeInMegabytes"
            case snaplockConfiguration = "SnaplockConfiguration"
            case snapshotPolicy = "SnapshotPolicy"
            case storageEfficiencyEnabled = "StorageEfficiencyEnabled"
            case storageVirtualMachineId = "StorageVirtualMachineId"
            case tieringPolicy = "TieringPolicy"
            case volumeStyle = "VolumeStyle"
        }
    }

    public struct CreateOpenZFSOriginSnapshotConfiguration: AWSEncodableShape {
        /// Specifies the strategy used when copying data from the snapshot to the new volume.     CLONE - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying data from the snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data.    FULL_COPY - Copies all data from the snapshot to the new volume. Specify this option to create the volume from a snapshot on another FSx for OpenZFS file system.    The INCREMENTAL_COPY option is only for updating an existing volume by using a snapshot from another FSx for OpenZFS file system. For more information, see CopySnapshotAndUpdateVolume.
        public let copyStrategy: OpenZFSCopyStrategy?
        public let snapshotARN: String?

        @inlinable
        public init(copyStrategy: OpenZFSCopyStrategy? = nil, snapshotARN: String? = nil) {
            self.copyStrategy = copyStrategy
            self.snapshotARN = snapshotARN
        }

        public func validate(name: String) throws {
            try self.validate(self.snapshotARN, name: "snapshotARN", parent: name, max: 512)
            try self.validate(self.snapshotARN, name: "snapshotARN", parent: name, min: 8)
            try self.validate(self.snapshotARN, name: "snapshotARN", parent: name, pattern: "^arn:(?=[^:]+:fsx:[^:]+:\\d{12}:)((|(?=[a-z0-9-.]{1,63})(?!\\d{1,3}(\\.\\d{1,3}){3})(?![^:]*-{2})(?![^:]*-\\.)(?![^:]*\\.-)[a-z0-9].*(?<!-)):){4}(?!/).{0,1024}$")
        }

        private enum CodingKeys: String, CodingKey {
            case copyStrategy = "CopyStrategy"
            case snapshotARN = "SnapshotARN"
        }
    }

    public struct CreateOpenZFSVolumeConfiguration: AWSEncodableShape {
        /// A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If this value is set to true, and you do not specify any tags, all tags for the original volume are copied over to snapshots.  If this value is set to true, and you do specify one or more tags, only the specified tags for the original volume are copied over to snapshots. If you specify one or more tags when creating a new snapshot, no tags are copied over from the original volume, regardless of this value.
        public let copyTagsToSnapshots: Bool?
        /// Specifies the method used to compress the data on the volume. The compression type is NONE by default.    NONE - Doesn't compress the data on the volume. NONE is the default.    ZSTD - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. ZSTD compression provides a higher level of  data compression and higher read throughput performance than LZ4 compression.    LZ4 - Compresses the data in the volume using the LZ4 compression algorithm. LZ4 compression provides a lower level of compression  and higher write throughput performance than ZSTD compression.   For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, see  Tips for maximizing performance File system and volume settings in the Amazon FSx for OpenZFS User Guide.
        public let dataCompressionType: OpenZFSDataCompressionType?
        /// The configuration object for mounting a Network File System (NFS) file system.
        public let nfsExports: [OpenZFSNfsExport]?
        /// The configuration object that specifies the snapshot to use as the origin of the data for the volume.
        public let originSnapshot: CreateOpenZFSOriginSnapshotConfiguration?
        /// The ID of the volume to use as the parent volume of the volume that you are creating.
        public let parentVolumeId: String?
        /// A Boolean value indicating whether the volume is read-only.
        public let readOnly: Bool?
        /// Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB).  For file systems using the Intelligent-Tiering storage class, valid values are 128, 256, 512, 1024, 2048, or 4096 KiB, with a default of 1024 KiB.  For all other file systems, valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB, with a default of 128 KiB.  We recommend using the default setting for the majority of use cases. Generally, workloads that write in fixed small or large record sizes may benefit from setting a custom record size, like database workloads (small record size) or media streaming workloads (large record size). For additional guidance on when to set a custom record size, see   ZFS Record size in the Amazon FSx for OpenZFS User Guide.
        public let recordSizeKiB: Int?
        /// Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify  a quota that is larger than the storage on the parent volume. A volume quota limits  the amount of storage that the volume can consume to the configured amount, but does not  guarantee the space will be available on the parent volume. To guarantee quota space, you must also set  StorageCapacityReservationGiB. To not specify a storage capacity quota, set this to -1.  For more information, see  Volume properties  in the Amazon FSx for OpenZFS User Guide.
        public let storageCapacityQuotaGiB: Int?
        /// Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting StorageCapacityReservationGiB guarantees that the specified amount of storage space on the parent volume will always be available for the volume.  You can't reserve more storage than the parent volume has. To not specify a storage capacity  reservation, set this to 0 or -1. For more information, see  Volume properties  in the Amazon FSx for OpenZFS User Guide.
        public let storageCapacityReservationGiB: Int?
        /// Configures how much storage users and groups can use on the volume.
        public let userAndGroupQuotas: [OpenZFSUserOrGroupQuota]?

        @inlinable
        public init(copyTagsToSnapshots: Bool? = nil, dataCompressionType: OpenZFSDataCompressionType? = nil, nfsExports: [OpenZFSNfsExport]? = nil, originSnapshot: CreateOpenZFSOriginSnapshotConfiguration? = nil, parentVolumeId: String? = nil, readOnly: Bool? = nil, recordSizeKiB: Int? = nil, storageCapacityQuotaGiB: Int? = nil, storageCapacityReservationGiB: Int? = nil, userAndGroupQuotas: [OpenZFSUserOrGroupQuota]? = nil) {
            self.copyTagsToSnapshots = copyTagsToSnapshots
            self.dataCompressionType = dataCompressionType
            self.nfsExports = nfsExports
            self.originSnapshot = originSnapshot
            self.parentVolumeId = parentVolumeId
            self.readOnly = readOnly
            self.recordSizeKiB = recordSizeKiB
            self.storageCapacityQuotaGiB = storageCapacityQuotaGiB
            self.storageCapacityReservationGiB = storageCapacityReservationGiB
            self.userAndGroupQuotas = userAndGroupQuotas
        }

        public func validate(name: String) throws {
            try self.nfsExports?.forEach {
                try $0.validate(name: "\(name).nfsExports[]")
            }
            try self.validate(self.nfsExports, name: "nfsExports", parent: name, max: 1)
            try self.originSnapshot?.validate(name: "\(name).originSnapshot")
            try self.validate(self.parentVolumeId, name: "parentVolumeId", parent: name, max: 23)
            try self.validate(self.parentVolumeId, name: "parentVolumeId", parent: name, min: 23)
            try self.validate(self.parentVolumeId, name: "parentVolumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
            try self.validate(self.recordSizeKiB, name: "recordSizeKiB", parent: name, max: 4096)
            try self.validate(self.recordSizeKiB, name: "recordSizeKiB", parent: name, min: 4)
            try self.validate(self.storageCapacityQuotaGiB, name: "storageCapacityQuotaGiB", parent: name, max: 2147483647)
            try self.validate(self.storageCapacityQuotaGiB, name: "storageCapacityQuotaGiB", parent: name, min: -1)
            try self.validate(self.storageCapacityReservationGiB, name: "storageCapacityReservationGiB", parent: name, max: 2147483647)
            try self.validate(self.storageCapacityReservationGiB, name: "storageCapacityReservationGiB", parent: name, min: -1)
            try self.userAndGroupQuotas?.forEach {
                try $0.validate(name: "\(name).userAndGroupQuotas[]")
            }
            try self.validate(self.userAndGroupQuotas, name: "userAndGroupQuotas", parent: name, max: 500)
        }

        private enum CodingKeys: String, CodingKey {
            case copyTagsToSnapshots = "CopyTagsToSnapshots"
            case dataCompressionType = "DataCompressionType"
            case nfsExports = "NfsExports"
            case originSnapshot = "OriginSnapshot"
            case parentVolumeId = "ParentVolumeId"
            case readOnly = "ReadOnly"
            case recordSizeKiB = "RecordSizeKiB"
            case storageCapacityQuotaGiB = "StorageCapacityQuotaGiB"
            case storageCapacityReservationGiB = "StorageCapacityReservationGiB"
            case userAndGroupQuotas = "UserAndGroupQuotas"
        }
    }

    public struct CreateSnaplockConfiguration: AWSEncodableShape {
        /// Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default  value is false. If you set AuditLogVolume to true, the SnapLock volume is  created as an audit log volume. The minimum retention period for an audit log volume is six months.  For more information, see   SnapLock audit log volumes.
        public let auditLogVolume: Bool?
        /// The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume.
        public let autocommitPeriod: AutocommitPeriod?
        /// Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock  Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete WORM files even  if they have active retention periods. PERMANENTLY_DISABLED is a terminal state.  If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default  value is DISABLED.  For more information, see  Privileged delete.
        public let privilegedDelete: PrivilegedDelete?
        /// Specifies the retention period of an FSx for ONTAP  SnapLock volume.
        public let retentionPeriod: SnaplockRetentionPeriod?
        /// Specifies the retention mode of an FSx for ONTAP SnapLock volume.  After it is set, it can't be changed.  You can choose one of the following retention modes:     COMPLIANCE: Files transitioned to write once, read many (WORM) on a Compliance volume can't be deleted  until their retention periods expire. This retention mode is used to address government or industry-specific mandates or to protect  against ransomware attacks. For more information,  see SnapLock Compliance.     ENTERPRISE: Files transitioned to WORM on an Enterprise volume can be deleted by authorized users  before their retention periods expire using privileged delete. This retention mode is used to advance an organization's data integrity  and internal compliance or to test retention settings before using SnapLock Compliance. For more information, see  SnapLock Enterprise.
        public let snaplockType: SnaplockType?
        /// Enables or disables volume-append mode  on an FSx for ONTAP SnapLock volume. Volume-append mode allows you to  create WORM-appendable files and write data to them incrementally. The default value is false.  For more information, see Volume-append mode.
        public let volumeAppendModeEnabled: Bool?

        @inlinable
        public init(auditLogVolume: Bool? = nil, autocommitPeriod: AutocommitPeriod? = nil, privilegedDelete: PrivilegedDelete? = nil, retentionPeriod: SnaplockRetentionPeriod? = nil, snaplockType: SnaplockType? = nil, volumeAppendModeEnabled: Bool? = nil) {
            self.auditLogVolume = auditLogVolume
            self.autocommitPeriod = autocommitPeriod
            self.privilegedDelete = privilegedDelete
            self.retentionPeriod = retentionPeriod
            self.snaplockType = snaplockType
            self.volumeAppendModeEnabled = volumeAppendModeEnabled
        }

        public func validate(name: String) throws {
            try self.autocommitPeriod?.validate(name: "\(name).autocommitPeriod")
            try self.retentionPeriod?.validate(name: "\(name).retentionPeriod")
        }

        private enum CodingKeys: String, CodingKey {
            case auditLogVolume = "AuditLogVolume"
            case autocommitPeriod = "AutocommitPeriod"
            case privilegedDelete = "PrivilegedDelete"
            case retentionPeriod = "RetentionPeriod"
            case snaplockType = "SnaplockType"
            case volumeAppendModeEnabled = "VolumeAppendModeEnabled"
        }
    }

    public struct CreateSnapshotRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The name of the snapshot.
        public let name: String?
        public let tags: [Tag]?
        /// The ID of the volume that you are taking a snapshot of.
        public let volumeId: String?

        @inlinable
        public init(clientRequestToken: String? = CreateSnapshotRequest.idempotencyToken(), name: String? = nil, tags: [Tag]? = nil, volumeId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.name = name
            self.tags = tags
            self.volumeId = volumeId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.name, name: "name", parent: name, max: 203)
            try self.validate(self.name, name: "name", parent: name, min: 1)
            try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_:.-]{1,203}$")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
            try self.validate(self.volumeId, name: "volumeId", parent: name, max: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, min: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case name = "Name"
            case tags = "Tags"
            case volumeId = "VolumeId"
        }
    }

    public struct CreateSnapshotResponse: AWSDecodableShape {
        /// A description of the snapshot.
        public let snapshot: Snapshot?

        @inlinable
        public init(snapshot: Snapshot? = nil) {
            self.snapshot = snapshot
        }

        private enum CodingKeys: String, CodingKey {
            case snapshot = "Snapshot"
        }
    }

    public struct CreateStorageVirtualMachineRequest: AWSEncodableShape {
        /// Describes the self-managed Microsoft Active Directory to which you want to join the SVM.  Joining an Active Directory provides user authentication and access control for SMB clients,  including Microsoft Windows and macOS clients accessing the file system.
        public let activeDirectoryConfiguration: CreateSvmActiveDirectoryConfiguration?
        public let clientRequestToken: String?
        public let fileSystemId: String?
        /// The name of the SVM.
        public let name: String?
        /// The security style of the root volume of the SVM. Specify one of the following values:    UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.    NTFS if the file system is managed by a Microsoft Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Microsoft Windows user as the service account.    MIXED This is an advanced setting. For more information, see  Volume security style in the Amazon FSx for NetApp ONTAP User Guide.
        public let rootVolumeSecurityStyle: StorageVirtualMachineRootVolumeSecurityStyle?
        /// The password to use when managing the SVM using the NetApp ONTAP CLI or REST API. If you do not specify a password, you can still use the file system's fsxadmin user to manage the SVM.
        public let svmAdminPassword: String?
        public let tags: [Tag]?

        @inlinable
        public init(activeDirectoryConfiguration: CreateSvmActiveDirectoryConfiguration? = nil, clientRequestToken: String? = CreateStorageVirtualMachineRequest.idempotencyToken(), fileSystemId: String? = nil, name: String? = nil, rootVolumeSecurityStyle: StorageVirtualMachineRootVolumeSecurityStyle? = nil, svmAdminPassword: String? = nil, tags: [Tag]? = nil) {
            self.activeDirectoryConfiguration = activeDirectoryConfiguration
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
            self.name = name
            self.rootVolumeSecurityStyle = rootVolumeSecurityStyle
            self.svmAdminPassword = svmAdminPassword
            self.tags = tags
        }

        public func validate(name: String) throws {
            try self.activeDirectoryConfiguration?.validate(name: "\(name).activeDirectoryConfiguration")
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            try self.validate(self.name, name: "name", parent: name, max: 47)
            try self.validate(self.name, name: "name", parent: name, min: 1)
            try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,47}$")
            try self.validate(self.svmAdminPassword, name: "svmAdminPassword", parent: name, max: 50)
            try self.validate(self.svmAdminPassword, name: "svmAdminPassword", parent: name, min: 8)
            try self.validate(self.svmAdminPassword, name: "svmAdminPassword", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{8,50}$")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case activeDirectoryConfiguration = "ActiveDirectoryConfiguration"
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
            case name = "Name"
            case rootVolumeSecurityStyle = "RootVolumeSecurityStyle"
            case svmAdminPassword = "SvmAdminPassword"
            case tags = "Tags"
        }
    }

    public struct CreateStorageVirtualMachineResponse: AWSDecodableShape {
        /// Returned after a successful CreateStorageVirtualMachine operation; describes the SVM just created.
        public let storageVirtualMachine: StorageVirtualMachine?

        @inlinable
        public init(storageVirtualMachine: StorageVirtualMachine? = nil) {
            self.storageVirtualMachine = storageVirtualMachine
        }

        private enum CodingKeys: String, CodingKey {
            case storageVirtualMachine = "StorageVirtualMachine"
        }
    }

    public struct CreateSvmActiveDirectoryConfiguration: AWSEncodableShape {
        /// The NetBIOS name of the Active Directory computer object that will be created for your SVM.
        public let netBiosName: String?
        public let selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfiguration?

        @inlinable
        public init(netBiosName: String? = nil, selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfiguration? = nil) {
            self.netBiosName = netBiosName
            self.selfManagedActiveDirectoryConfiguration = selfManagedActiveDirectoryConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.netBiosName, name: "netBiosName", parent: name, max: 15)
            try self.validate(self.netBiosName, name: "netBiosName", parent: name, min: 1)
            try self.validate(self.netBiosName, name: "netBiosName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$")
            try self.selfManagedActiveDirectoryConfiguration?.validate(name: "\(name).selfManagedActiveDirectoryConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case netBiosName = "NetBiosName"
            case selfManagedActiveDirectoryConfiguration = "SelfManagedActiveDirectoryConfiguration"
        }
    }

    public struct CreateVolumeFromBackupRequest: AWSEncodableShape {
        public let backupId: String?
        public let clientRequestToken: String?
        /// The name of the new volume you're creating.
        public let name: String?
        /// Specifies the configuration of the ONTAP volume that you are creating.
        public let ontapConfiguration: CreateOntapVolumeConfiguration?
        public let tags: [Tag]?

        @inlinable
        public init(backupId: String? = nil, clientRequestToken: String? = CreateVolumeFromBackupRequest.idempotencyToken(), name: String? = nil, ontapConfiguration: CreateOntapVolumeConfiguration? = nil, tags: [Tag]? = nil) {
            self.backupId = backupId
            self.clientRequestToken = clientRequestToken
            self.name = name
            self.ontapConfiguration = ontapConfiguration
            self.tags = tags
        }

        public func validate(name: String) throws {
            try self.validate(self.backupId, name: "backupId", parent: name, max: 128)
            try self.validate(self.backupId, name: "backupId", parent: name, min: 12)
            try self.validate(self.backupId, name: "backupId", parent: name, pattern: "^(backup-[0-9a-f]{8,})$")
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.name, name: "name", parent: name, max: 203)
            try self.validate(self.name, name: "name", parent: name, min: 1)
            try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,203}$")
            try self.ontapConfiguration?.validate(name: "\(name).ontapConfiguration")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case backupId = "BackupId"
            case clientRequestToken = "ClientRequestToken"
            case name = "Name"
            case ontapConfiguration = "OntapConfiguration"
            case tags = "Tags"
        }
    }

    public struct CreateVolumeFromBackupResponse: AWSDecodableShape {
        /// Returned after a successful CreateVolumeFromBackup API operation, describing the volume just created.
        public let volume: Volume?

        @inlinable
        public init(volume: Volume? = nil) {
            self.volume = volume
        }

        private enum CodingKeys: String, CodingKey {
            case volume = "Volume"
        }
    }

    public struct CreateVolumeRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// Specifies the name of the volume that you're creating.
        public let name: String?
        /// Specifies the configuration to use when creating the ONTAP volume.
        public let ontapConfiguration: CreateOntapVolumeConfiguration?
        /// Specifies the configuration to use when creating the OpenZFS volume.
        public let openZFSConfiguration: CreateOpenZFSVolumeConfiguration?
        public let tags: [Tag]?
        /// Specifies the type of volume to create; ONTAP and OPENZFS are the only valid volume types.
        public let volumeType: VolumeType?

        @inlinable
        public init(clientRequestToken: String? = CreateVolumeRequest.idempotencyToken(), name: String? = nil, ontapConfiguration: CreateOntapVolumeConfiguration? = nil, openZFSConfiguration: CreateOpenZFSVolumeConfiguration? = nil, tags: [Tag]? = nil, volumeType: VolumeType? = nil) {
            self.clientRequestToken = clientRequestToken
            self.name = name
            self.ontapConfiguration = ontapConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.tags = tags
            self.volumeType = volumeType
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.name, name: "name", parent: name, max: 203)
            try self.validate(self.name, name: "name", parent: name, min: 1)
            try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,203}$")
            try self.ontapConfiguration?.validate(name: "\(name).ontapConfiguration")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case name = "Name"
            case ontapConfiguration = "OntapConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case tags = "Tags"
            case volumeType = "VolumeType"
        }
    }

    public struct CreateVolumeResponse: AWSDecodableShape {
        /// Returned after a successful CreateVolume API operation, describing the volume just created.
        public let volume: Volume?

        @inlinable
        public init(volume: Volume? = nil) {
            self.volume = volume
        }

        private enum CodingKeys: String, CodingKey {
            case volume = "Volume"
        }
    }

    public struct DataRepositoryAssociation: AWSDecodableShape {
        /// The system-generated, unique ID of the data repository association.
        public let associationId: String?
        /// A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to true.   BatchImportMetaDataOnCreate is not supported for data repositories linked to an Amazon File Cache resource.
        public let batchImportMetaDataOnCreate: Bool?
        public let creationTime: Date?
        /// The path to the data repository that will be linked to the cache or file system.   For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats:   If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association.   If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter.     For Amazon File Cache, the path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional).   For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional).
        public let dataRepositoryPath: String?
        /// For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories.
        public let dataRepositorySubdirectories: [String]?
        public let failureDetails: DataRepositoryFailureDetails?
        /// The globally unique ID of the Amazon File Cache resource.
        public let fileCacheId: String?
        /// A path on the Amazon File Cache that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path /ns1/, then you cannot link another data repository with cache path /ns1/ns2. This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.  The cache path can only be set to root (/) on an NFS DRA when DataRepositorySubdirectories is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache. The cache path cannot be set to root (/) for an S3 DRA.
        public let fileCachePath: String?
        public let fileSystemId: String?
        /// A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.  If you specify only a forward slash (/) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.
        public let fileSystemPath: String?
        /// For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
        public let importedFileChunkSize: Int?
        /// Describes the state of a data repository association. The lifecycle can have the following values:    CREATING - The data repository association between  the file system or cache and the data repository is being created.  The data repository is unavailable.    AVAILABLE - The data repository association is available for use.    MISCONFIGURED - The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).    UPDATING - The data repository association is undergoing a customer initiated update that might affect its availability.    DELETING - The data repository association is undergoing a customer initiated deletion.    FAILED - The data repository association is in a terminal state that cannot be recovered.
        public let lifecycle: DataRepositoryLifecycle?
        /// The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.
        public let nfs: NFSDataRepositoryConfiguration?
        public let resourceARN: String?
        /// The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.
        public let s3: S3DataRepositoryConfiguration?
        public let tags: [Tag]?

        @inlinable
        public init(associationId: String? = nil, batchImportMetaDataOnCreate: Bool? = nil, creationTime: Date? = nil, dataRepositoryPath: String? = nil, dataRepositorySubdirectories: [String]? = nil, failureDetails: DataRepositoryFailureDetails? = nil, fileCacheId: String? = nil, fileCachePath: String? = nil, fileSystemId: String? = nil, fileSystemPath: String? = nil, importedFileChunkSize: Int? = nil, lifecycle: DataRepositoryLifecycle? = nil, nfs: NFSDataRepositoryConfiguration? = nil, resourceARN: String? = nil, s3: S3DataRepositoryConfiguration? = nil, tags: [Tag]? = nil) {
            self.associationId = associationId
            self.batchImportMetaDataOnCreate = batchImportMetaDataOnCreate
            self.creationTime = creationTime
            self.dataRepositoryPath = dataRepositoryPath
            self.dataRepositorySubdirectories = dataRepositorySubdirectories
            self.failureDetails = failureDetails
            self.fileCacheId = fileCacheId
            self.fileCachePath = fileCachePath
            self.fileSystemId = fileSystemId
            self.fileSystemPath = fileSystemPath
            self.importedFileChunkSize = importedFileChunkSize
            self.lifecycle = lifecycle
            self.nfs = nfs
            self.resourceARN = resourceARN
            self.s3 = s3
            self.tags = tags
        }

        private enum CodingKeys: String, CodingKey {
            case associationId = "AssociationId"
            case batchImportMetaDataOnCreate = "BatchImportMetaDataOnCreate"
            case creationTime = "CreationTime"
            case dataRepositoryPath = "DataRepositoryPath"
            case dataRepositorySubdirectories = "DataRepositorySubdirectories"
            case failureDetails = "FailureDetails"
            case fileCacheId = "FileCacheId"
            case fileCachePath = "FileCachePath"
            case fileSystemId = "FileSystemId"
            case fileSystemPath = "FileSystemPath"
            case importedFileChunkSize = "ImportedFileChunkSize"
            case lifecycle = "Lifecycle"
            case nfs = "NFS"
            case resourceARN = "ResourceARN"
            case s3 = "S3"
            case tags = "Tags"
        }
    }

    public struct DataRepositoryConfiguration: AWSDecodableShape {
        /// Describes the file system's linked S3 data repository's AutoImportPolicy.  The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date  as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:    NONE - (Default) AutoImport is off. Amazon FSx only updates  file and directory listings from the linked S3 bucket  when the file system is created. FSx does not update file and directory  listings for any new or changed objects after choosing this option.    NEW - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that  do not currently exist in the FSx file system.     NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports  file and directory listings of any new objects added to the S3 bucket and any  existing objects that are changed in the S3 bucket after you choose this option.    NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any  existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.
        public let autoImportPolicy: AutoImportPolicyType?
        /// The export path to the Amazon S3 bucket (and prefix) that you are using to store new and changed Lustre file system files in S3.
        public let exportPath: String?
        public let failureDetails: DataRepositoryFailureDetails?
        /// For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
        public let importedFileChunkSize: Int?
        /// The import path to the Amazon S3 bucket (and optional prefix) that you're using as the data repository for your FSx for Lustre file system, for example s3://import-bucket/optional-prefix. If a prefix is specified after the Amazon S3 bucket name, only object keys with that prefix are loaded into the file system.
        public let importPath: String?
        /// Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository.  The lifecycle can have the following values:    CREATING - The data repository configuration between  the FSx file system and the linked S3 data repository is being created.  The data repository is unavailable.    AVAILABLE - The data repository is available for use.    MISCONFIGURED - Amazon FSx cannot automatically import updates from the S3 bucket  until the data repository configuration is corrected. For more information, see  Troubleshooting a Misconfigured linked S3 bucket.     UPDATING - The data repository is undergoing a customer initiated update and availability may be impacted.    FAILED - The data repository is in a terminal state that cannot be recovered.
        public let lifecycle: DataRepositoryLifecycle?

        @inlinable
        public init(autoImportPolicy: AutoImportPolicyType? = nil, exportPath: String? = nil, failureDetails: DataRepositoryFailureDetails? = nil, importedFileChunkSize: Int? = nil, importPath: String? = nil, lifecycle: DataRepositoryLifecycle? = nil) {
            self.autoImportPolicy = autoImportPolicy
            self.exportPath = exportPath
            self.failureDetails = failureDetails
            self.importedFileChunkSize = importedFileChunkSize
            self.importPath = importPath
            self.lifecycle = lifecycle
        }

        private enum CodingKeys: String, CodingKey {
            case autoImportPolicy = "AutoImportPolicy"
            case exportPath = "ExportPath"
            case failureDetails = "FailureDetails"
            case importedFileChunkSize = "ImportedFileChunkSize"
            case importPath = "ImportPath"
            case lifecycle = "Lifecycle"
        }
    }

    public struct DataRepositoryFailureDetails: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct DataRepositoryTask: AWSDecodableShape {
        /// Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.
        public let capacityToRelease: Int64?
        public let creationTime: Date?
        /// The time the system completed processing the task, populated after the task is complete.
        public let endTime: Date?
        /// Failure message describing why the task failed, it is populated only when Lifecycle is set to FAILED.
        public let failureDetails: DataRepositoryTaskFailureDetails?
        /// The system-generated, unique ID of the cache.
        public let fileCacheId: String?
        /// The globally unique ID of the file system.
        public let fileSystemId: String?
        /// The lifecycle status of the data repository task, as follows:    PENDING - The task has not started.    EXECUTING - The task is in process.    FAILED -  The task was not able to be completed. For example, there may be files the task failed to process.  The DataRepositoryTaskFailureDetails property provides more information about task failures.    SUCCEEDED - The task has completed successfully.    CANCELED - The task was canceled and it did not complete.    CANCELING - The task is in process of being canceled.    You cannot delete an FSx for Lustre file system if there are data  repository tasks for the file system in the PENDING or EXECUTING states. Please retry when the data repository task is finished (with a status of CANCELED, SUCCEEDED, or FAILED).  You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.
        public let lifecycle: DataRepositoryTaskLifecycle?
        /// An array of paths that specify the data for the data repository task to process.  For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository. (Default) If Paths is not specified, Amazon FSx uses the file system root directory.
        public let paths: [String]?
        /// The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system.
        public let releaseConfiguration: ReleaseConfiguration?
        public let report: CompletionReport?
        public let resourceARN: String?
        /// The time the system began processing the task.
        public let startTime: Date?
        /// Provides the status of the number of files that the task has processed successfully and failed to process.
        public let status: DataRepositoryTaskStatus?
        public let tags: [Tag]?
        /// The system-generated, unique 17-digit ID of the data repository task.
        public let taskId: String?
        /// The type of data repository task.    EXPORT_TO_REPOSITORY tasks export from your Amazon FSx for Lustre file system to a linked data repository.    IMPORT_METADATA_FROM_REPOSITORY tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.    RELEASE_DATA_FROM_FILESYSTEM tasks release files in your Amazon FSx for Lustre file system that have been exported to a linked S3 bucket and that meet your specified release criteria.    AUTO_RELEASE_DATA tasks automatically release files from an Amazon File Cache resource.
        public let type: DataRepositoryTaskType?

        @inlinable
        public init(capacityToRelease: Int64? = nil, creationTime: Date? = nil, endTime: Date? = nil, failureDetails: DataRepositoryTaskFailureDetails? = nil, fileCacheId: String? = nil, fileSystemId: String? = nil, lifecycle: DataRepositoryTaskLifecycle? = nil, paths: [String]? = nil, releaseConfiguration: ReleaseConfiguration? = nil, report: CompletionReport? = nil, resourceARN: String? = nil, startTime: Date? = nil, status: DataRepositoryTaskStatus? = nil, tags: [Tag]? = nil, taskId: String? = nil, type: DataRepositoryTaskType? = nil) {
            self.capacityToRelease = capacityToRelease
            self.creationTime = creationTime
            self.endTime = endTime
            self.failureDetails = failureDetails
            self.fileCacheId = fileCacheId
            self.fileSystemId = fileSystemId
            self.lifecycle = lifecycle
            self.paths = paths
            self.releaseConfiguration = releaseConfiguration
            self.report = report
            self.resourceARN = resourceARN
            self.startTime = startTime
            self.status = status
            self.tags = tags
            self.taskId = taskId
            self.type = type
        }

        private enum CodingKeys: String, CodingKey {
            case capacityToRelease = "CapacityToRelease"
            case creationTime = "CreationTime"
            case endTime = "EndTime"
            case failureDetails = "FailureDetails"
            case fileCacheId = "FileCacheId"
            case fileSystemId = "FileSystemId"
            case lifecycle = "Lifecycle"
            case paths = "Paths"
            case releaseConfiguration = "ReleaseConfiguration"
            case report = "Report"
            case resourceARN = "ResourceARN"
            case startTime = "StartTime"
            case status = "Status"
            case tags = "Tags"
            case taskId = "TaskId"
            case type = "Type"
        }
    }

    public struct DataRepositoryTaskFailureDetails: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct DataRepositoryTaskFilter: AWSEncodableShape {
        /// Name of the task property to use in filtering the tasks returned in the response.   Use file-system-id to retrieve data repository tasks for specific file systems.   Use task-lifecycle to retrieve data repository tasks with one or more specific lifecycle states,  as follows: CANCELED, EXECUTING, FAILED, PENDING, and SUCCEEDED.
        public let name: DataRepositoryTaskFilterName?
        /// Use Values to include the specific file system IDs and task  lifecycle states for the filters you are using.
        public let values: [String]?

        @inlinable
        public init(name: DataRepositoryTaskFilterName? = nil, values: [String]? = nil) {
            self.name = name
            self.values = values
        }

        public func validate(name: String) throws {
            try self.values?.forEach {
                try validate($0, name: "values[]", parent: name, max: 128)
                try validate($0, name: "values[]", parent: name, min: 1)
                try validate($0, name: "values[]", parent: name, pattern: "^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$")
            }
            try self.validate(self.values, name: "values", parent: name, max: 20)
        }

        private enum CodingKeys: String, CodingKey {
            case name = "Name"
            case values = "Values"
        }
    }

    public struct DataRepositoryTaskStatus: AWSDecodableShape {
        /// A running total of the number of files that the task failed to process.
        public let failedCount: Int64?
        /// The time at which the task status was last updated.
        public let lastUpdatedTime: Date?
        /// The total amount of data, in GiB, released by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.
        public let releasedCapacity: Int64?
        /// A running total of the number of files that the task has successfully processed.
        public let succeededCount: Int64?
        /// The total number of files that the task will process. While a task is executing, the sum of  SucceededCount plus FailedCount may not equal TotalCount. When the task is complete,  TotalCount equals the sum of SucceededCount plus FailedCount.
        public let totalCount: Int64?

        @inlinable
        public init(failedCount: Int64? = nil, lastUpdatedTime: Date? = nil, releasedCapacity: Int64? = nil, succeededCount: Int64? = nil, totalCount: Int64? = nil) {
            self.failedCount = failedCount
            self.lastUpdatedTime = lastUpdatedTime
            self.releasedCapacity = releasedCapacity
            self.succeededCount = succeededCount
            self.totalCount = totalCount
        }

        private enum CodingKeys: String, CodingKey {
            case failedCount = "FailedCount"
            case lastUpdatedTime = "LastUpdatedTime"
            case releasedCapacity = "ReleasedCapacity"
            case succeededCount = "SucceededCount"
            case totalCount = "TotalCount"
        }
    }

    public struct DeleteBackupRequest: AWSEncodableShape {
        /// The ID of the backup that you want to delete.
        public let backupId: String?
        /// A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This parameter is automatically filled on your behalf when using the CLI or SDK.
        public let clientRequestToken: String?

        @inlinable
        public init(backupId: String? = nil, clientRequestToken: String? = DeleteBackupRequest.idempotencyToken()) {
            self.backupId = backupId
            self.clientRequestToken = clientRequestToken
        }

        public func validate(name: String) throws {
            try self.validate(self.backupId, name: "backupId", parent: name, max: 128)
            try self.validate(self.backupId, name: "backupId", parent: name, min: 12)
            try self.validate(self.backupId, name: "backupId", parent: name, pattern: "^(backup-[0-9a-f]{8,})$")
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
        }

        private enum CodingKeys: String, CodingKey {
            case backupId = "BackupId"
            case clientRequestToken = "ClientRequestToken"
        }
    }

    public struct DeleteBackupResponse: AWSDecodableShape {
        /// The ID of the backup that was deleted.
        public let backupId: String?
        /// The lifecycle status of the backup. If the DeleteBackup operation is successful, the status is DELETED.
        public let lifecycle: BackupLifecycle?

        @inlinable
        public init(backupId: String? = nil, lifecycle: BackupLifecycle? = nil) {
            self.backupId = backupId
            self.lifecycle = lifecycle
        }

        private enum CodingKeys: String, CodingKey {
            case backupId = "BackupId"
            case lifecycle = "Lifecycle"
        }
    }

    public struct DeleteDataRepositoryAssociationRequest: AWSEncodableShape {
        /// The ID of the data repository association that you want to delete.
        public let associationId: String?
        public let clientRequestToken: String?
        /// Set to true to delete the data in the file system that corresponds to the data repository association.
        public let deleteDataInFileSystem: Bool?

        @inlinable
        public init(associationId: String? = nil, clientRequestToken: String? = DeleteDataRepositoryAssociationRequest.idempotencyToken(), deleteDataInFileSystem: Bool? = nil) {
            self.associationId = associationId
            self.clientRequestToken = clientRequestToken
            self.deleteDataInFileSystem = deleteDataInFileSystem
        }

        public func validate(name: String) throws {
            try self.validate(self.associationId, name: "associationId", parent: name, max: 23)
            try self.validate(self.associationId, name: "associationId", parent: name, min: 13)
            try self.validate(self.associationId, name: "associationId", parent: name, pattern: "^(dra-[0-9a-f]{8,})$")
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
        }

        private enum CodingKeys: String, CodingKey {
            case associationId = "AssociationId"
            case clientRequestToken = "ClientRequestToken"
            case deleteDataInFileSystem = "DeleteDataInFileSystem"
        }
    }

    public struct DeleteDataRepositoryAssociationResponse: AWSDecodableShape {
        /// The ID of the data repository association being deleted.
        public let associationId: String?
        /// Indicates whether data in the file system that corresponds to the data repository association is being deleted. Default is false.
        public let deleteDataInFileSystem: Bool?
        /// Describes the lifecycle state of the data repository association being deleted.
        public let lifecycle: DataRepositoryLifecycle?

        @inlinable
        public init(associationId: String? = nil, deleteDataInFileSystem: Bool? = nil, lifecycle: DataRepositoryLifecycle? = nil) {
            self.associationId = associationId
            self.deleteDataInFileSystem = deleteDataInFileSystem
            self.lifecycle = lifecycle
        }

        private enum CodingKeys: String, CodingKey {
            case associationId = "AssociationId"
            case deleteDataInFileSystem = "DeleteDataInFileSystem"
            case lifecycle = "Lifecycle"
        }
    }

    public struct DeleteFileCacheRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The ID of the cache that's being deleted.
        public let fileCacheId: String?

        @inlinable
        public init(clientRequestToken: String? = DeleteFileCacheRequest.idempotencyToken(), fileCacheId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileCacheId = fileCacheId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileCacheId, name: "fileCacheId", parent: name, max: 21)
            try self.validate(self.fileCacheId, name: "fileCacheId", parent: name, min: 11)
            try self.validate(self.fileCacheId, name: "fileCacheId", parent: name, pattern: "^(fc-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileCacheId = "FileCacheId"
        }
    }

    public struct DeleteFileCacheResponse: AWSDecodableShape {
        /// The ID of the cache that's being deleted.
        public let fileCacheId: String?
        /// The cache lifecycle for the deletion request. If the DeleteFileCache operation is successful, this status is DELETING.
        public let lifecycle: FileCacheLifecycle?

        @inlinable
        public init(fileCacheId: String? = nil, lifecycle: FileCacheLifecycle? = nil) {
            self.fileCacheId = fileCacheId
            self.lifecycle = lifecycle
        }

        private enum CodingKeys: String, CodingKey {
            case fileCacheId = "FileCacheId"
            case lifecycle = "Lifecycle"
        }
    }

    public struct DeleteFileSystemLustreConfiguration: AWSEncodableShape {
        /// Use if SkipFinalBackup is set to false,  and you want to apply an array of tags to the final backup. If you have set the file system property CopyTagsToBackups to true, and  you specify one or more FinalBackupTags when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.
        public let finalBackupTags: [Tag]?
        /// Set SkipFinalBackup to false if you want to take a final backup of the file  system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the DeleteFileSystem operation is invoked. (Default = true)  The fsx:CreateBackup permission is required if you set SkipFinalBackup to false in order to delete the file system and take a final backup.
        public let skipFinalBackup: Bool?

        @inlinable
        public init(finalBackupTags: [Tag]? = nil, skipFinalBackup: Bool? = nil) {
            self.finalBackupTags = finalBackupTags
            self.skipFinalBackup = skipFinalBackup
        }

        public func validate(name: String) throws {
            try self.finalBackupTags?.forEach {
                try $0.validate(name: "\(name).finalBackupTags[]")
            }
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, max: 50)
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case finalBackupTags = "FinalBackupTags"
            case skipFinalBackup = "SkipFinalBackup"
        }
    }

    public struct DeleteFileSystemLustreResponse: AWSDecodableShape {
        /// The ID of the final backup for this file system.
        public let finalBackupId: String?
        /// The set of tags applied to the final backup.
        public let finalBackupTags: [Tag]?

        @inlinable
        public init(finalBackupId: String? = nil, finalBackupTags: [Tag]? = nil) {
            self.finalBackupId = finalBackupId
            self.finalBackupTags = finalBackupTags
        }

        private enum CodingKeys: String, CodingKey {
            case finalBackupId = "FinalBackupId"
            case finalBackupTags = "FinalBackupTags"
        }
    }

    public struct DeleteFileSystemOpenZFSConfiguration: AWSEncodableShape {
        /// A list of tags to apply to the file system's final backup.
        public let finalBackupTags: [Tag]?
        /// To delete a file system if there are child volumes present below the root volume, use the string DELETE_CHILD_VOLUMES_AND_SNAPSHOTS. If your file system has child volumes and you don't use this option, the delete request will fail.
        public let options: [DeleteFileSystemOpenZFSOption]?
        /// By default, Amazon FSx for OpenZFS takes a final backup on your behalf when the DeleteFileSystem operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip taking a final backup, set this value to true.
        public let skipFinalBackup: Bool?

        @inlinable
        public init(finalBackupTags: [Tag]? = nil, options: [DeleteFileSystemOpenZFSOption]? = nil, skipFinalBackup: Bool? = nil) {
            self.finalBackupTags = finalBackupTags
            self.options = options
            self.skipFinalBackup = skipFinalBackup
        }

        public func validate(name: String) throws {
            try self.finalBackupTags?.forEach {
                try $0.validate(name: "\(name).finalBackupTags[]")
            }
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, max: 50)
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, min: 1)
            try self.validate(self.options, name: "options", parent: name, max: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case finalBackupTags = "FinalBackupTags"
            case options = "Options"
            case skipFinalBackup = "SkipFinalBackup"
        }
    }

    public struct DeleteFileSystemOpenZFSResponse: AWSDecodableShape {
        public let finalBackupId: String?
        public let finalBackupTags: [Tag]?

        @inlinable
        public init(finalBackupId: String? = nil, finalBackupTags: [Tag]? = nil) {
            self.finalBackupId = finalBackupId
            self.finalBackupTags = finalBackupTags
        }

        private enum CodingKeys: String, CodingKey {
            case finalBackupId = "FinalBackupId"
            case finalBackupTags = "FinalBackupTags"
        }
    }

    public struct DeleteFileSystemRequest: AWSEncodableShape {
        /// A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent deletion. This token is automatically filled on your behalf when using the Command Line Interface (CLI) or an Amazon Web Services SDK.
        public let clientRequestToken: String?
        /// The ID of the file system that you want to delete.
        public let fileSystemId: String?
        public let lustreConfiguration: DeleteFileSystemLustreConfiguration?
        /// The configuration object for the OpenZFS file system used in the DeleteFileSystem operation.
        public let openZFSConfiguration: DeleteFileSystemOpenZFSConfiguration?
        public let windowsConfiguration: DeleteFileSystemWindowsConfiguration?

        @inlinable
        public init(clientRequestToken: String? = DeleteFileSystemRequest.idempotencyToken(), fileSystemId: String? = nil, lustreConfiguration: DeleteFileSystemLustreConfiguration? = nil, openZFSConfiguration: DeleteFileSystemOpenZFSConfiguration? = nil, windowsConfiguration: DeleteFileSystemWindowsConfiguration? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
            self.lustreConfiguration = lustreConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.windowsConfiguration = windowsConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            try self.lustreConfiguration?.validate(name: "\(name).lustreConfiguration")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.windowsConfiguration?.validate(name: "\(name).windowsConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
            case lustreConfiguration = "LustreConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case windowsConfiguration = "WindowsConfiguration"
        }
    }

    public struct DeleteFileSystemResponse: AWSDecodableShape {
        /// The ID of the file system that's being deleted.
        public let fileSystemId: String?
        /// The file system lifecycle for the deletion request. If the DeleteFileSystem operation is successful, this status is DELETING.
        public let lifecycle: FileSystemLifecycle?
        public let lustreResponse: DeleteFileSystemLustreResponse?
        /// The response object for the OpenZFS file system that's being deleted in the DeleteFileSystem operation.
        public let openZFSResponse: DeleteFileSystemOpenZFSResponse?
        public let windowsResponse: DeleteFileSystemWindowsResponse?

        @inlinable
        public init(fileSystemId: String? = nil, lifecycle: FileSystemLifecycle? = nil, lustreResponse: DeleteFileSystemLustreResponse? = nil, openZFSResponse: DeleteFileSystemOpenZFSResponse? = nil, windowsResponse: DeleteFileSystemWindowsResponse? = nil) {
            self.fileSystemId = fileSystemId
            self.lifecycle = lifecycle
            self.lustreResponse = lustreResponse
            self.openZFSResponse = openZFSResponse
            self.windowsResponse = windowsResponse
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystemId = "FileSystemId"
            case lifecycle = "Lifecycle"
            case lustreResponse = "LustreResponse"
            case openZFSResponse = "OpenZFSResponse"
            case windowsResponse = "WindowsResponse"
        }
    }

    public struct DeleteFileSystemWindowsConfiguration: AWSEncodableShape {
        /// A set of tags for your final backup.
        public let finalBackupTags: [Tag]?
        /// By default, Amazon FSx for Windows takes a final backup on your behalf when the DeleteFileSystem operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip this backup, use this flag to do so.
        public let skipFinalBackup: Bool?

        @inlinable
        public init(finalBackupTags: [Tag]? = nil, skipFinalBackup: Bool? = nil) {
            self.finalBackupTags = finalBackupTags
            self.skipFinalBackup = skipFinalBackup
        }

        public func validate(name: String) throws {
            try self.finalBackupTags?.forEach {
                try $0.validate(name: "\(name).finalBackupTags[]")
            }
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, max: 50)
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case finalBackupTags = "FinalBackupTags"
            case skipFinalBackup = "SkipFinalBackup"
        }
    }

    public struct DeleteFileSystemWindowsResponse: AWSDecodableShape {
        /// The ID of the final backup for this file system.
        public let finalBackupId: String?
        /// The set of tags applied to the final backup.
        public let finalBackupTags: [Tag]?

        @inlinable
        public init(finalBackupId: String? = nil, finalBackupTags: [Tag]? = nil) {
            self.finalBackupId = finalBackupId
            self.finalBackupTags = finalBackupTags
        }

        private enum CodingKeys: String, CodingKey {
            case finalBackupId = "FinalBackupId"
            case finalBackupTags = "FinalBackupTags"
        }
    }

    public struct DeleteSnapshotRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The ID of the snapshot that you want to delete.
        public let snapshotId: String?

        @inlinable
        public init(clientRequestToken: String? = DeleteSnapshotRequest.idempotencyToken(), snapshotId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.snapshotId = snapshotId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, max: 28)
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, min: 11)
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, pattern: "^((fs)?volsnap-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case snapshotId = "SnapshotId"
        }
    }

    public struct DeleteSnapshotResponse: AWSDecodableShape {
        /// The lifecycle status of the snapshot. If the DeleteSnapshot operation is successful, this status is DELETING.
        public let lifecycle: SnapshotLifecycle?
        /// The ID of the deleted snapshot.
        public let snapshotId: String?

        @inlinable
        public init(lifecycle: SnapshotLifecycle? = nil, snapshotId: String? = nil) {
            self.lifecycle = lifecycle
            self.snapshotId = snapshotId
        }

        private enum CodingKeys: String, CodingKey {
            case lifecycle = "Lifecycle"
            case snapshotId = "SnapshotId"
        }
    }

    public struct DeleteStorageVirtualMachineRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The ID of the SVM that you want to delete.
        public let storageVirtualMachineId: String?

        @inlinable
        public init(clientRequestToken: String? = DeleteStorageVirtualMachineRequest.idempotencyToken(), storageVirtualMachineId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.storageVirtualMachineId = storageVirtualMachineId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, max: 21)
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, min: 21)
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, pattern: "^(svm-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case storageVirtualMachineId = "StorageVirtualMachineId"
        }
    }

    public struct DeleteStorageVirtualMachineResponse: AWSDecodableShape {
        /// Describes the lifecycle state of the SVM being deleted.
        public let lifecycle: StorageVirtualMachineLifecycle?
        /// The ID of the SVM Amazon FSx is deleting.
        public let storageVirtualMachineId: String?

        @inlinable
        public init(lifecycle: StorageVirtualMachineLifecycle? = nil, storageVirtualMachineId: String? = nil) {
            self.lifecycle = lifecycle
            self.storageVirtualMachineId = storageVirtualMachineId
        }

        private enum CodingKeys: String, CodingKey {
            case lifecycle = "Lifecycle"
            case storageVirtualMachineId = "StorageVirtualMachineId"
        }
    }

    public struct DeleteVolumeOntapConfiguration: AWSEncodableShape {
        /// Setting this to true allows a SnapLock administrator to delete an FSx for ONTAP SnapLock Enterprise volume  with unexpired write once, read many (WORM) files. The IAM permission fsx:BypassSnaplockEnterpriseRetention is also  required to delete SnapLock Enterprise volumes with unexpired WORM files. The default value is false.  For more information, see   Deleting a SnapLock volume.
        public let bypassSnaplockEnterpriseRetention: Bool?
        public let finalBackupTags: [Tag]?
        /// Set to true if you want to skip taking a final backup of the volume  you are deleting.
        public let skipFinalBackup: Bool?

        @inlinable
        public init(bypassSnaplockEnterpriseRetention: Bool? = nil, finalBackupTags: [Tag]? = nil, skipFinalBackup: Bool? = nil) {
            self.bypassSnaplockEnterpriseRetention = bypassSnaplockEnterpriseRetention
            self.finalBackupTags = finalBackupTags
            self.skipFinalBackup = skipFinalBackup
        }

        public func validate(name: String) throws {
            try self.finalBackupTags?.forEach {
                try $0.validate(name: "\(name).finalBackupTags[]")
            }
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, max: 50)
            try self.validate(self.finalBackupTags, name: "finalBackupTags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case bypassSnaplockEnterpriseRetention = "BypassSnaplockEnterpriseRetention"
            case finalBackupTags = "FinalBackupTags"
            case skipFinalBackup = "SkipFinalBackup"
        }
    }

    public struct DeleteVolumeOntapResponse: AWSDecodableShape {
        public let finalBackupId: String?
        public let finalBackupTags: [Tag]?

        @inlinable
        public init(finalBackupId: String? = nil, finalBackupTags: [Tag]? = nil) {
            self.finalBackupId = finalBackupId
            self.finalBackupTags = finalBackupTags
        }

        private enum CodingKeys: String, CodingKey {
            case finalBackupId = "FinalBackupId"
            case finalBackupTags = "FinalBackupTags"
        }
    }

    public struct DeleteVolumeOpenZFSConfiguration: AWSEncodableShape {
        /// To delete the volume's child volumes, snapshots, and clones, use the string DELETE_CHILD_VOLUMES_AND_SNAPSHOTS.
        public let options: [DeleteOpenZFSVolumeOption]?

        @inlinable
        public init(options: [DeleteOpenZFSVolumeOption]? = nil) {
            self.options = options
        }

        public func validate(name: String) throws {
            try self.validate(self.options, name: "options", parent: name, max: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case options = "Options"
        }
    }

    public struct DeleteVolumeRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// For Amazon FSx for ONTAP volumes, specify whether to take a final backup of the volume and apply tags to the backup. To apply tags to the backup, you must have the fsx:TagResource permission.
        public let ontapConfiguration: DeleteVolumeOntapConfiguration?
        /// For Amazon FSx for OpenZFS volumes, specify whether to delete all child volumes and snapshots.
        public let openZFSConfiguration: DeleteVolumeOpenZFSConfiguration?
        /// The ID of the volume that you are deleting.
        public let volumeId: String?

        @inlinable
        public init(clientRequestToken: String? = DeleteVolumeRequest.idempotencyToken(), ontapConfiguration: DeleteVolumeOntapConfiguration? = nil, openZFSConfiguration: DeleteVolumeOpenZFSConfiguration? = nil, volumeId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.ontapConfiguration = ontapConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.volumeId = volumeId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.ontapConfiguration?.validate(name: "\(name).ontapConfiguration")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.validate(self.volumeId, name: "volumeId", parent: name, max: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, min: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case ontapConfiguration = "OntapConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case volumeId = "VolumeId"
        }
    }

    public struct DeleteVolumeResponse: AWSDecodableShape {
        /// The lifecycle state of the volume being deleted. If the DeleteVolume operation is successful, this value is DELETING.
        public let lifecycle: VolumeLifecycle?
        /// Returned after a DeleteVolume request, showing the status of the delete request.
        public let ontapResponse: DeleteVolumeOntapResponse?
        /// The ID of the volume that's being deleted.
        public let volumeId: String?

        @inlinable
        public init(lifecycle: VolumeLifecycle? = nil, ontapResponse: DeleteVolumeOntapResponse? = nil, volumeId: String? = nil) {
            self.lifecycle = lifecycle
            self.ontapResponse = ontapResponse
            self.volumeId = volumeId
        }

        private enum CodingKeys: String, CodingKey {
            case lifecycle = "Lifecycle"
            case ontapResponse = "OntapResponse"
            case volumeId = "VolumeId"
        }
    }

    public struct DescribeBackupsRequest: AWSEncodableShape {
        /// The IDs of the backups that you want to retrieve. This parameter value overrides any filters. If any IDs aren't found, a BackupNotFound error occurs.
        public let backupIds: [String]?
        /// The filters structure. The supported names are file-system-id, backup-type, file-system-type, and volume-id.
        public let filters: [Filter]?
        /// Maximum number of backups to return in the response. This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.
        public let maxResults: Int?
        /// An opaque pagination token returned from a previous DescribeBackups operation. If a token is present, the operation continues the list from where the returning call left off.
        public let nextToken: String?

        @inlinable
        public init(backupIds: [String]? = nil, filters: [Filter]? = nil, maxResults: Int? = nil, nextToken: String? = nil) {
            self.backupIds = backupIds
            self.filters = filters
            self.maxResults = maxResults
            self.nextToken = nextToken
        }

        public func validate(name: String) throws {
            try self.backupIds?.forEach {
                try validate($0, name: "backupIds[]", parent: name, max: 128)
                try validate($0, name: "backupIds[]", parent: name, min: 12)
                try validate($0, name: "backupIds[]", parent: name, pattern: "^(backup-[0-9a-f]{8,})$")
            }
            try self.validate(self.backupIds, name: "backupIds", parent: name, max: 50)
            try self.filters?.forEach {
                try $0.validate(name: "\(name).filters[]")
            }
            try self.validate(self.filters, name: "filters", parent: name, max: 10)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
        }

        private enum CodingKeys: String, CodingKey {
            case backupIds = "BackupIds"
            case filters = "Filters"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeBackupsResponse: AWSDecodableShape {
        /// An array of backups.
        public let backups: [Backup]?
        /// A NextToken value is present if there are more backups than returned in the response. You can use the NextToken value in the subsequent request to fetch the backups.
        public let nextToken: String?

        @inlinable
        public init(backups: [Backup]? = nil, nextToken: String? = nil) {
            self.backups = backups
            self.nextToken = nextToken
        }

        private enum CodingKeys: String, CodingKey {
            case backups = "Backups"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeDataRepositoryAssociationsRequest: AWSEncodableShape {
        /// IDs of the data repository associations whose descriptions you want to retrieve (String).
        public let associationIds: [String]?
        public let filters: [Filter]?
        /// The maximum number of resources to return in the response. This value must be an integer greater than zero.
        public let maxResults: Int?
        public let nextToken: String?

        @inlinable
        public init(associationIds: [String]? = nil, filters: [Filter]? = nil, maxResults: Int? = nil, nextToken: String? = nil) {
            self.associationIds = associationIds
            self.filters = filters
            self.maxResults = maxResults
            self.nextToken = nextToken
        }

        public func validate(name: String) throws {
            try self.associationIds?.forEach {
                try validate($0, name: "associationIds[]", parent: name, max: 23)
                try validate($0, name: "associationIds[]", parent: name, min: 13)
                try validate($0, name: "associationIds[]", parent: name, pattern: "^(dra-[0-9a-f]{8,})$")
            }
            try self.validate(self.associationIds, name: "associationIds", parent: name, max: 50)
            try self.filters?.forEach {
                try $0.validate(name: "\(name).filters[]")
            }
            try self.validate(self.filters, name: "filters", parent: name, max: 10)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
        }

        private enum CodingKeys: String, CodingKey {
            case associationIds = "AssociationIds"
            case filters = "Filters"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeDataRepositoryAssociationsResponse: AWSDecodableShape {
        /// An array of one or more data repository association descriptions.
        public let associations: [DataRepositoryAssociation]?
        public let nextToken: String?

        @inlinable
        public init(associations: [DataRepositoryAssociation]? = nil, nextToken: String? = nil) {
            self.associations = associations
            self.nextToken = nextToken
        }

        private enum CodingKeys: String, CodingKey {
            case associations = "Associations"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeDataRepositoryTasksRequest: AWSEncodableShape {
        /// (Optional) You can use filters to narrow the DescribeDataRepositoryTasks response to  include just tasks for specific file systems, or tasks in a specific lifecycle state.
        public let filters: [DataRepositoryTaskFilter]?
        public let maxResults: Int?
        public let nextToken: String?
        /// (Optional) IDs of the tasks whose descriptions you want to retrieve (String).
        public let taskIds: [String]?

        @inlinable
        public init(filters: [DataRepositoryTaskFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, taskIds: [String]? = nil) {
            self.filters = filters
            self.maxResults = maxResults
            self.nextToken = nextToken
            self.taskIds = taskIds
        }

        public func validate(name: String) throws {
            try self.filters?.forEach {
                try $0.validate(name: "\(name).filters[]")
            }
            try self.validate(self.filters, name: "filters", parent: name, max: 3)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
            try self.taskIds?.forEach {
                try validate($0, name: "taskIds[]", parent: name, max: 128)
                try validate($0, name: "taskIds[]", parent: name, min: 12)
                try validate($0, name: "taskIds[]", parent: name, pattern: "^(task-[0-9a-f]{17,})$")
            }
            try self.validate(self.taskIds, name: "taskIds", parent: name, max: 50)
        }

        private enum CodingKeys: String, CodingKey {
            case filters = "Filters"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
            case taskIds = "TaskIds"
        }
    }

    public struct DescribeDataRepositoryTasksResponse: AWSDecodableShape {
        /// The collection of data repository task descriptions returned.
        public let dataRepositoryTasks: [DataRepositoryTask]?
        public let nextToken: String?

        @inlinable
        public init(dataRepositoryTasks: [DataRepositoryTask]? = nil, nextToken: String? = nil) {
            self.dataRepositoryTasks = dataRepositoryTasks
            self.nextToken = nextToken
        }

        private enum CodingKeys: String, CodingKey {
            case dataRepositoryTasks = "DataRepositoryTasks"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeFileCachesRequest: AWSEncodableShape {
        /// IDs of the caches whose descriptions you want to retrieve (String).
        public let fileCacheIds: [String]?
        public let maxResults: Int?
        public let nextToken: String?

        @inlinable
        public init(fileCacheIds: [String]? = nil, maxResults: Int? = nil, nextToken: String? = nil) {
            self.fileCacheIds = fileCacheIds
            self.maxResults = maxResults
            self.nextToken = nextToken
        }

        public func validate(name: String) throws {
            try self.fileCacheIds?.forEach {
                try validate($0, name: "fileCacheIds[]", parent: name, max: 21)
                try validate($0, name: "fileCacheIds[]", parent: name, min: 11)
                try validate($0, name: "fileCacheIds[]", parent: name, pattern: "^(fc-[0-9a-f]{8,})$")
            }
            try self.validate(self.fileCacheIds, name: "fileCacheIds", parent: name, max: 50)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
        }

        private enum CodingKeys: String, CodingKey {
            case fileCacheIds = "FileCacheIds"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeFileCachesResponse: AWSDecodableShape {
        /// The response object for the DescribeFileCaches operation.
        public let fileCaches: [FileCache]?
        public let nextToken: String?

        @inlinable
        public init(fileCaches: [FileCache]? = nil, nextToken: String? = nil) {
            self.fileCaches = fileCaches
            self.nextToken = nextToken
        }

        private enum CodingKeys: String, CodingKey {
            case fileCaches = "FileCaches"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeFileSystemAliasesRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The ID of the file system to return the associated DNS aliases for (String).
        public let fileSystemId: String?
        /// Maximum number of DNS aliases to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.
        public let maxResults: Int?
        /// Opaque pagination token returned from a previous DescribeFileSystemAliases operation (String). If a token is included in the request, the action continues the list from where the previous returning call left off.
        public let nextToken: String?

        @inlinable
        public init(clientRequestToken: String? = DescribeFileSystemAliasesRequest.idempotencyToken(), fileSystemId: String? = nil, maxResults: Int? = nil, nextToken: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
            self.maxResults = maxResults
            self.nextToken = nextToken
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeFileSystemAliasesResponse: AWSDecodableShape {
        /// An array of one or more DNS aliases currently associated with the specified file system.
        public let aliases: [Alias]?
        /// Present if there are more DNS aliases than returned in the response (String). You can use the NextToken value in a later request to fetch additional descriptions.
        public let nextToken: String?

        @inlinable
        public init(aliases: [Alias]? = nil, nextToken: String? = nil) {
            self.aliases = aliases
            self.nextToken = nextToken
        }

        private enum CodingKeys: String, CodingKey {
            case aliases = "Aliases"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeFileSystemsRequest: AWSEncodableShape {
        /// IDs of the file systems whose descriptions you want to retrieve (String).
        public let fileSystemIds: [String]?
        /// Maximum number of file systems to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.
        public let maxResults: Int?
        /// Opaque pagination token returned from a previous DescribeFileSystems operation (String). If a token present, the operation continues the list from where the returning call left off.
        public let nextToken: String?

        @inlinable
        public init(fileSystemIds: [String]? = nil, maxResults: Int? = nil, nextToken: String? = nil) {
            self.fileSystemIds = fileSystemIds
            self.maxResults = maxResults
            self.nextToken = nextToken
        }

        public func validate(name: String) throws {
            try self.fileSystemIds?.forEach {
                try validate($0, name: "fileSystemIds[]", parent: name, max: 21)
                try validate($0, name: "fileSystemIds[]", parent: name, min: 11)
                try validate($0, name: "fileSystemIds[]", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            }
            try self.validate(self.fileSystemIds, name: "fileSystemIds", parent: name, max: 50)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystemIds = "FileSystemIds"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeFileSystemsResponse: AWSDecodableShape {
        /// An array of file system descriptions.
        public let fileSystems: [FileSystem]?
        /// Present if there are more file systems than returned in the response (String). You can use the NextToken value in the later request to fetch the descriptions.
        public let nextToken: String?

        @inlinable
        public init(fileSystems: [FileSystem]? = nil, nextToken: String? = nil) {
            self.fileSystems = fileSystems
            self.nextToken = nextToken
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystems = "FileSystems"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeS3AccessPointAttachmentsRequest: AWSEncodableShape {
        /// Enter a filter Name and Values pair to view a select set of S3 access point attachments.
        public let filters: [S3AccessPointAttachmentsFilter]?
        public let maxResults: Int?
        /// The names of the S3 access point attachments whose descriptions you want to retrieve.
        public let names: [String]?
        public let nextToken: String?

        @inlinable
        public init(filters: [S3AccessPointAttachmentsFilter]? = nil, maxResults: Int? = nil, names: [String]? = nil, nextToken: String? = nil) {
            self.filters = filters
            self.maxResults = maxResults
            self.names = names
            self.nextToken = nextToken
        }

        public func validate(name: String) throws {
            try self.filters?.forEach {
                try $0.validate(name: "\(name).filters[]")
            }
            try self.validate(self.filters, name: "filters", parent: name, max: 2)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.names?.forEach {
                try validate($0, name: "names[]", parent: name, max: 50)
                try validate($0, name: "names[]", parent: name, min: 3)
                try validate($0, name: "names[]", parent: name, pattern: "^(?=[a-z0-9])[a-z0-9-]{1,48}[a-z0-9]$")
            }
            try self.validate(self.names, name: "names", parent: name, max: 50)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
        }

        private enum CodingKeys: String, CodingKey {
            case filters = "Filters"
            case maxResults = "MaxResults"
            case names = "Names"
            case nextToken = "NextToken"
        }
    }

    public struct DescribeS3AccessPointAttachmentsResponse: AWSDecodableShape {
        public let nextToken: String?
        /// Array of S3 access point attachments returned after a successful DescribeS3AccessPointAttachments operation.
        public let s3AccessPointAttachments: [S3AccessPointAttachment]?

        @inlinable
        public init(nextToken: String? = nil, s3AccessPointAttachments: [S3AccessPointAttachment]? = nil) {
            self.nextToken = nextToken
            self.s3AccessPointAttachments = s3AccessPointAttachments
        }

        private enum CodingKeys: String, CodingKey {
            case nextToken = "NextToken"
            case s3AccessPointAttachments = "S3AccessPointAttachments"
        }
    }

    public struct DescribeSharedVpcConfigurationRequest: AWSEncodableShape {
        public init() {}
    }

    public struct DescribeSharedVpcConfigurationResponse: AWSDecodableShape {
        /// Indicates whether participant accounts can create FSx for ONTAP Multi-AZ file systems in shared subnets.
        public let enableFsxRouteTableUpdatesFromParticipantAccounts: String?

        @inlinable
        public init(enableFsxRouteTableUpdatesFromParticipantAccounts: String? = nil) {
            self.enableFsxRouteTableUpdatesFromParticipantAccounts = enableFsxRouteTableUpdatesFromParticipantAccounts
        }

        private enum CodingKeys: String, CodingKey {
            case enableFsxRouteTableUpdatesFromParticipantAccounts = "EnableFsxRouteTableUpdatesFromParticipantAccounts"
        }
    }

    public struct DescribeSnapshotsRequest: AWSEncodableShape {
        /// The filters structure. The supported names are file-system-id or volume-id.
        public let filters: [SnapshotFilter]?
        /// Set to false (default) if you want to only see the snapshots owned by your Amazon Web Services account. Set to true if you want to see the snapshots in your account and the ones shared with you from another account.
        public let includeShared: Bool?
        public let maxResults: Int?
        public let nextToken: String?
        /// The IDs of the snapshots that you want to retrieve. This parameter value overrides any filters. If any IDs aren't found, a SnapshotNotFound error occurs.
        public let snapshotIds: [String]?

        @inlinable
        public init(filters: [SnapshotFilter]? = nil, includeShared: Bool? = nil, maxResults: Int? = nil, nextToken: String? = nil, snapshotIds: [String]? = nil) {
            self.filters = filters
            self.includeShared = includeShared
            self.maxResults = maxResults
            self.nextToken = nextToken
            self.snapshotIds = snapshotIds
        }

        public func validate(name: String) throws {
            try self.filters?.forEach {
                try $0.validate(name: "\(name).filters[]")
            }
            try self.validate(self.filters, name: "filters", parent: name, max: 2)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
            try self.snapshotIds?.forEach {
                try validate($0, name: "snapshotIds[]", parent: name, max: 28)
                try validate($0, name: "snapshotIds[]", parent: name, min: 11)
                try validate($0, name: "snapshotIds[]", parent: name, pattern: "^((fs)?volsnap-[0-9a-f]{8,})$")
            }
            try self.validate(self.snapshotIds, name: "snapshotIds", parent: name, max: 50)
        }

        private enum CodingKeys: String, CodingKey {
            case filters = "Filters"
            case includeShared = "IncludeShared"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
            case snapshotIds = "SnapshotIds"
        }
    }

    public struct DescribeSnapshotsResponse: AWSDecodableShape {
        public let nextToken: String?
        /// An array of snapshots.
        public let snapshots: [Snapshot]?

        @inlinable
        public init(nextToken: String? = nil, snapshots: [Snapshot]? = nil) {
            self.nextToken = nextToken
            self.snapshots = snapshots
        }

        private enum CodingKeys: String, CodingKey {
            case nextToken = "NextToken"
            case snapshots = "Snapshots"
        }
    }

    public struct DescribeStorageVirtualMachinesRequest: AWSEncodableShape {
        /// Enter a filter name:value pair to view a select set of SVMs.
        public let filters: [StorageVirtualMachineFilter]?
        public let maxResults: Int?
        public let nextToken: String?
        /// Enter the ID of one or more SVMs that you want to view.
        public let storageVirtualMachineIds: [String]?

        @inlinable
        public init(filters: [StorageVirtualMachineFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, storageVirtualMachineIds: [String]? = nil) {
            self.filters = filters
            self.maxResults = maxResults
            self.nextToken = nextToken
            self.storageVirtualMachineIds = storageVirtualMachineIds
        }

        public func validate(name: String) throws {
            try self.filters?.forEach {
                try $0.validate(name: "\(name).filters[]")
            }
            try self.validate(self.filters, name: "filters", parent: name, max: 1)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
            try self.storageVirtualMachineIds?.forEach {
                try validate($0, name: "storageVirtualMachineIds[]", parent: name, max: 21)
                try validate($0, name: "storageVirtualMachineIds[]", parent: name, min: 21)
                try validate($0, name: "storageVirtualMachineIds[]", parent: name, pattern: "^(svm-[0-9a-f]{17,})$")
            }
            try self.validate(self.storageVirtualMachineIds, name: "storageVirtualMachineIds", parent: name, max: 50)
        }

        private enum CodingKeys: String, CodingKey {
            case filters = "Filters"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
            case storageVirtualMachineIds = "StorageVirtualMachineIds"
        }
    }

    public struct DescribeStorageVirtualMachinesResponse: AWSDecodableShape {
        public let nextToken: String?
        /// Returned after a successful DescribeStorageVirtualMachines operation, describing each SVM.
        public let storageVirtualMachines: [StorageVirtualMachine]?

        @inlinable
        public init(nextToken: String? = nil, storageVirtualMachines: [StorageVirtualMachine]? = nil) {
            self.nextToken = nextToken
            self.storageVirtualMachines = storageVirtualMachines
        }

        private enum CodingKeys: String, CodingKey {
            case nextToken = "NextToken"
            case storageVirtualMachines = "StorageVirtualMachines"
        }
    }

    public struct DescribeVolumesRequest: AWSEncodableShape {
        /// Enter a filter Name and Values pair to view a select set of volumes.
        public let filters: [VolumeFilter]?
        public let maxResults: Int?
        public let nextToken: String?
        /// The IDs of the volumes whose descriptions you want to retrieve.
        public let volumeIds: [String]?

        @inlinable
        public init(filters: [VolumeFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, volumeIds: [String]? = nil) {
            self.filters = filters
            self.maxResults = maxResults
            self.nextToken = nextToken
            self.volumeIds = volumeIds
        }

        public func validate(name: String) throws {
            try self.filters?.forEach {
                try $0.validate(name: "\(name).filters[]")
            }
            try self.validate(self.filters, name: "filters", parent: name, max: 2)
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
            try self.volumeIds?.forEach {
                try validate($0, name: "volumeIds[]", parent: name, max: 23)
                try validate($0, name: "volumeIds[]", parent: name, min: 23)
                try validate($0, name: "volumeIds[]", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
            }
            try self.validate(self.volumeIds, name: "volumeIds", parent: name, max: 50)
        }

        private enum CodingKeys: String, CodingKey {
            case filters = "Filters"
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
            case volumeIds = "VolumeIds"
        }
    }

    public struct DescribeVolumesResponse: AWSDecodableShape {
        public let nextToken: String?
        /// Returned after a successful DescribeVolumes operation, describing each volume.
        public let volumes: [Volume]?

        @inlinable
        public init(nextToken: String? = nil, volumes: [Volume]? = nil) {
            self.nextToken = nextToken
            self.volumes = volumes
        }

        private enum CodingKeys: String, CodingKey {
            case nextToken = "NextToken"
            case volumes = "Volumes"
        }
    }

    public struct DetachAndDeleteS3AccessPointRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The name of the S3 access point attachment that you want to delete.
        public let name: String?

        @inlinable
        public init(clientRequestToken: String? = DetachAndDeleteS3AccessPointRequest.idempotencyToken(), name: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.name = name
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.name, name: "name", parent: name, max: 50)
            try self.validate(self.name, name: "name", parent: name, min: 3)
            try self.validate(self.name, name: "name", parent: name, pattern: "^(?=[a-z0-9])[a-z0-9-]{1,48}[a-z0-9]$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case name = "Name"
        }
    }

    public struct DetachAndDeleteS3AccessPointResponse: AWSDecodableShape {
        /// The lifecycle status of the S3 access point attachment.
        public let lifecycle: S3AccessPointAttachmentLifecycle?
        /// The name of the S3 access point attachment being deleted.
        public let name: String?

        @inlinable
        public init(lifecycle: S3AccessPointAttachmentLifecycle? = nil, name: String? = nil) {
            self.lifecycle = lifecycle
            self.name = name
        }

        private enum CodingKeys: String, CodingKey {
            case lifecycle = "Lifecycle"
            case name = "Name"
        }
    }

    public struct DisassociateFileSystemAliasesRequest: AWSEncodableShape {
        /// An array of one or more DNS alias names to disassociate, or remove, from the file system.
        public let aliases: [String]?
        public let clientRequestToken: String?
        /// Specifies the file system from which to disassociate the DNS aliases.
        public let fileSystemId: String?

        @inlinable
        public init(aliases: [String]? = nil, clientRequestToken: String? = DisassociateFileSystemAliasesRequest.idempotencyToken(), fileSystemId: String? = nil) {
            self.aliases = aliases
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
        }

        public func validate(name: String) throws {
            try self.aliases?.forEach {
                try validate($0, name: "aliases[]", parent: name, max: 253)
                try validate($0, name: "aliases[]", parent: name, min: 4)
                try validate($0, name: "aliases[]", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,253}$")
            }
            try self.validate(self.aliases, name: "aliases", parent: name, max: 50)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case aliases = "Aliases"
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
        }
    }

    public struct DisassociateFileSystemAliasesResponse: AWSDecodableShape {
        /// An array of one or more DNS aliases that Amazon FSx is attempting to disassociate from the file system.
        public let aliases: [Alias]?

        @inlinable
        public init(aliases: [Alias]? = nil) {
            self.aliases = aliases
        }

        private enum CodingKeys: String, CodingKey {
            case aliases = "Aliases"
        }
    }

    public struct DiskIopsConfiguration: AWSEncodableShape & AWSDecodableShape {
        /// The total number of SSD IOPS provisioned for the file system. The minimum and maximum values for this property depend on the value of HAPairs and StorageCapacity. The minimum value is calculated as StorageCapacity * 3 * HAPairs (3 IOPS per GB of StorageCapacity). The maximum value is calculated as 200,000 * HAPairs. Amazon FSx responds with an HTTP status code 400 (Bad Request) if the value of Iops is outside of the minimum or maximum values.
        public let iops: Int64?
        /// Specifies whether the file system is  using the AUTOMATIC setting of SSD IOPS of 3 IOPS per GB of storage capacity, or  if it using a USER_PROVISIONED value.
        public let mode: DiskIopsConfigurationMode?

        @inlinable
        public init(iops: Int64? = nil, mode: DiskIopsConfigurationMode? = nil) {
            self.iops = iops
            self.mode = mode
        }

        public func validate(name: String) throws {
            try self.validate(self.iops, name: "iops", parent: name, max: 2400000)
            try self.validate(self.iops, name: "iops", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case iops = "Iops"
            case mode = "Mode"
        }
    }

    public struct DurationSinceLastAccess: AWSEncodableShape & AWSDecodableShape {
        /// The unit of time used by the Value parameter to determine if a file can be released, based on when it was last accessed. DAYS is the only supported value. This is a required parameter.
        public let unit: Unit?
        /// An integer that represents the minimum amount of time (in days) since a file was last accessed in the file system. Only exported files with a MAX(atime, ctime, mtime) timestamp that is more than this amount of time in the past (relative to the task create time) will be released. The default of Value is 0. This is a required parameter.  If an exported file meets the last accessed time criteria, its file or directory path must also be specified in the Paths parameter of the  operation in order for the file to be released.
        public let value: Int64?

        @inlinable
        public init(unit: Unit? = nil, value: Int64? = nil) {
            self.unit = unit
            self.value = value
        }

        public func validate(name: String) throws {
            try self.validate(self.value, name: "value", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case unit = "Unit"
            case value = "Value"
        }
    }

    public struct FileCache: AWSDecodableShape {
        public let creationTime: Date?
        /// A list of IDs of data repository associations that are associated with this cache.
        public let dataRepositoryAssociationIds: [String]?
        /// The Domain Name System (DNS) name for the cache.
        public let dnsName: String?
        /// A structure providing details of any failures that occurred.
        public let failureDetails: FileCacheFailureDetails?
        /// The system-generated, unique ID of the cache.
        public let fileCacheId: String?
        /// The type of cache, which must be LUSTRE.
        public let fileCacheType: FileCacheType?
        /// The Lustre version of the cache, which must be 2.12.
        public let fileCacheTypeVersion: String?
        /// Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.
        public let kmsKeyId: String?
        /// The lifecycle status of the cache. The following are the possible values and what they mean:    AVAILABLE - The cache is in a healthy state, and is reachable and available for use.    CREATING - The new cache is being created.    DELETING - An existing cache is being deleted.    UPDATING - The cache is undergoing a customer-initiated update.    FAILED - An existing cache has experienced an unrecoverable failure.  When creating a new cache, the cache was unable to be created.
        public let lifecycle: FileCacheLifecycle?
        /// The configuration for the Amazon File Cache resource.
        public let lustreConfiguration: FileCacheLustreConfiguration?
        public let networkInterfaceIds: [String]?
        public let ownerId: String?
        public let resourceARN: String?
        /// The storage capacity of the cache in gibibytes (GiB).
        public let storageCapacity: Int?
        public let subnetIds: [String]?
        public let vpcId: String?

        @inlinable
        public init(creationTime: Date? = nil, dataRepositoryAssociationIds: [String]? = nil, dnsName: String? = nil, failureDetails: FileCacheFailureDetails? = nil, fileCacheId: String? = nil, fileCacheType: FileCacheType? = nil, fileCacheTypeVersion: String? = nil, kmsKeyId: String? = nil, lifecycle: FileCacheLifecycle? = nil, lustreConfiguration: FileCacheLustreConfiguration? = nil, networkInterfaceIds: [String]? = nil, ownerId: String? = nil, resourceARN: String? = nil, storageCapacity: Int? = nil, subnetIds: [String]? = nil, vpcId: String? = nil) {
            self.creationTime = creationTime
            self.dataRepositoryAssociationIds = dataRepositoryAssociationIds
            self.dnsName = dnsName
            self.failureDetails = failureDetails
            self.fileCacheId = fileCacheId
            self.fileCacheType = fileCacheType
            self.fileCacheTypeVersion = fileCacheTypeVersion
            self.kmsKeyId = kmsKeyId
            self.lifecycle = lifecycle
            self.lustreConfiguration = lustreConfiguration
            self.networkInterfaceIds = networkInterfaceIds
            self.ownerId = ownerId
            self.resourceARN = resourceARN
            self.storageCapacity = storageCapacity
            self.subnetIds = subnetIds
            self.vpcId = vpcId
        }

        private enum CodingKeys: String, CodingKey {
            case creationTime = "CreationTime"
            case dataRepositoryAssociationIds = "DataRepositoryAssociationIds"
            case dnsName = "DNSName"
            case failureDetails = "FailureDetails"
            case fileCacheId = "FileCacheId"
            case fileCacheType = "FileCacheType"
            case fileCacheTypeVersion = "FileCacheTypeVersion"
            case kmsKeyId = "KmsKeyId"
            case lifecycle = "Lifecycle"
            case lustreConfiguration = "LustreConfiguration"
            case networkInterfaceIds = "NetworkInterfaceIds"
            case ownerId = "OwnerId"
            case resourceARN = "ResourceARN"
            case storageCapacity = "StorageCapacity"
            case subnetIds = "SubnetIds"
            case vpcId = "VpcId"
        }
    }

    public struct FileCacheCreating: AWSDecodableShape {
        /// A boolean flag indicating whether tags for the cache should be copied to data repository associations.
        public let copyTagsToDataRepositoryAssociations: Bool?
        public let creationTime: Date?
        /// A list of IDs of data repository associations that are associated with this cache.
        public let dataRepositoryAssociationIds: [String]?
        /// The Domain Name System (DNS) name for the cache.
        public let dnsName: String?
        /// A structure providing details of any failures that occurred in creating a cache.
        public let failureDetails: FileCacheFailureDetails?
        /// The system-generated, unique ID of the cache.
        public let fileCacheId: String?
        /// The type of cache, which must be LUSTRE.
        public let fileCacheType: FileCacheType?
        /// The Lustre version of the cache, which must be 2.12.
        public let fileCacheTypeVersion: String?
        /// Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.
        public let kmsKeyId: String?
        /// The lifecycle status of the cache. The following are the possible values and what they mean:    AVAILABLE - The cache is in a healthy state, and is reachable and available for use.    CREATING - The new cache is being created.    DELETING - An existing cache is being deleted.    UPDATING - The cache is undergoing a customer-initiated update.    FAILED - An existing cache has experienced an unrecoverable failure.  When creating a new cache, the cache was unable to be created.
        public let lifecycle: FileCacheLifecycle?
        /// The configuration for the Amazon File Cache resource.
        public let lustreConfiguration: FileCacheLustreConfiguration?
        public let networkInterfaceIds: [String]?
        public let ownerId: String?
        public let resourceARN: String?
        /// The storage capacity of the cache in gibibytes (GiB).
        public let storageCapacity: Int?
        public let subnetIds: [String]?
        public let tags: [Tag]?
        public let vpcId: String?

        @inlinable
        public init(copyTagsToDataRepositoryAssociations: Bool? = nil, creationTime: Date? = nil, dataRepositoryAssociationIds: [String]? = nil, dnsName: String? = nil, failureDetails: FileCacheFailureDetails? = nil, fileCacheId: String? = nil, fileCacheType: FileCacheType? = nil, fileCacheTypeVersion: String? = nil, kmsKeyId: String? = nil, lifecycle: FileCacheLifecycle? = nil, lustreConfiguration: FileCacheLustreConfiguration? = nil, networkInterfaceIds: [String]? = nil, ownerId: String? = nil, resourceARN: String? = nil, storageCapacity: Int? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, vpcId: String? = nil) {
            self.copyTagsToDataRepositoryAssociations = copyTagsToDataRepositoryAssociations
            self.creationTime = creationTime
            self.dataRepositoryAssociationIds = dataRepositoryAssociationIds
            self.dnsName = dnsName
            self.failureDetails = failureDetails
            self.fileCacheId = fileCacheId
            self.fileCacheType = fileCacheType
            self.fileCacheTypeVersion = fileCacheTypeVersion
            self.kmsKeyId = kmsKeyId
            self.lifecycle = lifecycle
            self.lustreConfiguration = lustreConfiguration
            self.networkInterfaceIds = networkInterfaceIds
            self.ownerId = ownerId
            self.resourceARN = resourceARN
            self.storageCapacity = storageCapacity
            self.subnetIds = subnetIds
            self.tags = tags
            self.vpcId = vpcId
        }

        private enum CodingKeys: String, CodingKey {
            case copyTagsToDataRepositoryAssociations = "CopyTagsToDataRepositoryAssociations"
            case creationTime = "CreationTime"
            case dataRepositoryAssociationIds = "DataRepositoryAssociationIds"
            case dnsName = "DNSName"
            case failureDetails = "FailureDetails"
            case fileCacheId = "FileCacheId"
            case fileCacheType = "FileCacheType"
            case fileCacheTypeVersion = "FileCacheTypeVersion"
            case kmsKeyId = "KmsKeyId"
            case lifecycle = "Lifecycle"
            case lustreConfiguration = "LustreConfiguration"
            case networkInterfaceIds = "NetworkInterfaceIds"
            case ownerId = "OwnerId"
            case resourceARN = "ResourceARN"
            case storageCapacity = "StorageCapacity"
            case subnetIds = "SubnetIds"
            case tags = "Tags"
            case vpcId = "VpcId"
        }
    }

    public struct FileCacheDataRepositoryAssociation: AWSEncodableShape {
        /// The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:   The path can be an NFS data repository that links to the cache. The path can be in one of two formats:   If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nfs://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association.   If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter.     The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional).
        public let dataRepositoryPath: String?
        /// A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories.
        public let dataRepositorySubdirectories: [String]?
        /// A path on the cache that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path /ns1/, then you cannot link another data repository with cache path /ns1/ns2. This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory.  The cache path can only be set to root (/) on an NFS DRA when DataRepositorySubdirectories is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache. The cache path cannot be set to root (/) for an S3 DRA.
        public let fileCachePath: String?
        /// The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.
        public let nfs: FileCacheNFSConfiguration?

        @inlinable
        public init(dataRepositoryPath: String? = nil, dataRepositorySubdirectories: [String]? = nil, fileCachePath: String? = nil, nfs: FileCacheNFSConfiguration? = nil) {
            self.dataRepositoryPath = dataRepositoryPath
            self.dataRepositorySubdirectories = dataRepositorySubdirectories
            self.fileCachePath = fileCachePath
            self.nfs = nfs
        }

        public func validate(name: String) throws {
            try self.validate(self.dataRepositoryPath, name: "dataRepositoryPath", parent: name, max: 4357)
            try self.validate(self.dataRepositoryPath, name: "dataRepositoryPath", parent: name, min: 3)
            try self.validate(self.dataRepositoryPath, name: "dataRepositoryPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$")
            try self.dataRepositorySubdirectories?.forEach {
                try validate($0, name: "dataRepositorySubdirectories[]", parent: name, max: 4096)
                try validate($0, name: "dataRepositorySubdirectories[]", parent: name, min: 1)
                try validate($0, name: "dataRepositorySubdirectories[]", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,4096}$")
            }
            try self.validate(self.dataRepositorySubdirectories, name: "dataRepositorySubdirectories", parent: name, max: 500)
            try self.validate(self.fileCachePath, name: "fileCachePath", parent: name, max: 4096)
            try self.validate(self.fileCachePath, name: "fileCachePath", parent: name, min: 1)
            try self.validate(self.fileCachePath, name: "fileCachePath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,4096}$")
            try self.nfs?.validate(name: "\(name).nfs")
        }

        private enum CodingKeys: String, CodingKey {
            case dataRepositoryPath = "DataRepositoryPath"
            case dataRepositorySubdirectories = "DataRepositorySubdirectories"
            case fileCachePath = "FileCachePath"
            case nfs = "NFS"
        }
    }

    public struct FileCacheFailureDetails: AWSDecodableShape {
        /// A message describing any failures that occurred.
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct FileCacheLustreConfiguration: AWSDecodableShape {
        /// The deployment type of the Amazon File Cache resource, which must be CACHE_1.
        public let deploymentType: FileCacheLustreDeploymentType?
        /// The configuration for Lustre logging used to write the enabled logging events for your Amazon File Cache resource to Amazon CloudWatch Logs.
        public let logConfiguration: LustreLogConfiguration?
        /// The configuration for a Lustre MDT (Metadata Target) storage volume.
        public let metadataConfiguration: FileCacheLustreMetadataConfiguration?
        /// You use the MountName value when mounting the cache. If you pass a cache ID to the DescribeFileCaches operation, it returns the the MountName value as part of the cache's description.
        public let mountName: String?
        /// Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. Cache throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). The only supported value is 1000.
        public let perUnitStorageThroughput: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(deploymentType: FileCacheLustreDeploymentType? = nil, logConfiguration: LustreLogConfiguration? = nil, metadataConfiguration: FileCacheLustreMetadataConfiguration? = nil, mountName: String? = nil, perUnitStorageThroughput: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.deploymentType = deploymentType
            self.logConfiguration = logConfiguration
            self.metadataConfiguration = metadataConfiguration
            self.mountName = mountName
            self.perUnitStorageThroughput = perUnitStorageThroughput
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        private enum CodingKeys: String, CodingKey {
            case deploymentType = "DeploymentType"
            case logConfiguration = "LogConfiguration"
            case metadataConfiguration = "MetadataConfiguration"
            case mountName = "MountName"
            case perUnitStorageThroughput = "PerUnitStorageThroughput"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct FileCacheLustreMetadataConfiguration: AWSEncodableShape & AWSDecodableShape {
        /// The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is 2400 GiB.
        public let storageCapacity: Int?

        @inlinable
        public init(storageCapacity: Int? = nil) {
            self.storageCapacity = storageCapacity
        }

        public func validate(name: String) throws {
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, max: 2147483647)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case storageCapacity = "StorageCapacity"
        }
    }

    public struct FileCacheNFSConfiguration: AWSEncodableShape {
        /// A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.
        public let dnsIps: [String]?
        /// The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is NFS3, which indicates that the data repository must support the NFSv3 protocol.
        public let version: NfsVersion?

        @inlinable
        public init(dnsIps: [String]? = nil, version: NfsVersion? = nil) {
            self.dnsIps = dnsIps
            self.version = version
        }

        public func validate(name: String) throws {
            try self.dnsIps?.forEach {
                try validate($0, name: "dnsIps[]", parent: name, max: 45)
                try validate($0, name: "dnsIps[]", parent: name, min: 1)
                try validate($0, name: "dnsIps[]", parent: name, pattern: "^(^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))$|^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$)$")
            }
            try self.validate(self.dnsIps, name: "dnsIps", parent: name, max: 10)
        }

        private enum CodingKeys: String, CodingKey {
            case dnsIps = "DnsIps"
            case version = "Version"
        }
    }

    public struct FileSystem: AWSDecodableShape {
        /// A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system that you have initiated using the UpdateFileSystem operation.
        public let administrativeActions: [AdministrativeAction]?
        /// The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), also known as Unix time.
        public let creationTime: Date?
        /// The Domain Name System (DNS) name for the file system.
        public let dnsName: String?
        public let failureDetails: FileSystemFailureDetails?
        /// The system-generated, unique 17-digit ID of the file system.
        public let fileSystemId: String?
        /// The type of Amazon FSx file system, which can be LUSTRE, WINDOWS, ONTAP, or OPENZFS.
        public let fileSystemType: FileSystemType?
        /// The Lustre version of the Amazon FSx for Lustre file system, which can be 2.10, 2.12, or 2.15.
        public let fileSystemTypeVersion: String?
        /// The ID of the Key Management Service (KMS) key used to encrypt Amazon FSx file system data. Used as follows with Amazon FSx file system types:   Amazon FSx for Lustre PERSISTENT_1 and PERSISTENT_2 deployment types only.  SCRATCH_1 and SCRATCH_2 types are encrypted using  the Amazon FSx service KMS key for your account.   Amazon FSx for NetApp ONTAP   Amazon FSx for OpenZFS   Amazon FSx for Windows File Server
        public let kmsKeyId: String?
        /// The lifecycle status of the file system. The following are the possible values and what they mean:    AVAILABLE - The file system is in a healthy state, and is reachable and available for use.    CREATING - Amazon FSx is creating the new file system.    DELETING - Amazon FSx is deleting an existing file system.    FAILED - An existing file system has experienced an unrecoverable failure.  When creating a new file system, Amazon FSx was unable to create the file system.    MISCONFIGURED - The file system is in a failed but recoverable state.    MISCONFIGURED_UNAVAILABLE - (Amazon FSx for Windows File Server only) The file system is currently unavailable due to a change in your Active Directory configuration.    UPDATING - The file system is undergoing a customer-initiated update.
        public let lifecycle: FileSystemLifecycle?
        public let lustreConfiguration: LustreFileSystemConfiguration?
        /// The IDs of the elastic network interfaces from which a specific file system is accessible. The elastic network interface is automatically created in the same virtual private cloud (VPC) that the Amazon FSx file system was created in. For more information, see Elastic Network Interfaces in the Amazon EC2 User Guide.  For an Amazon FSx for Windows File Server file system, you can have one network interface ID. For an Amazon FSx for Lustre file system, you can have more than one.
        public let networkInterfaceIds: [String]?
        /// The network type of the file system.
        public let networkType: NetworkType?
        /// The configuration for this Amazon FSx for NetApp ONTAP file system.
        public let ontapConfiguration: OntapFileSystemConfiguration?
        /// The configuration for this Amazon FSx for OpenZFS file system.
        public let openZFSConfiguration: OpenZFSFileSystemConfiguration?
        /// The Amazon Web Services account that created the file system. If the file system was created by a user in IAM Identity Center, the Amazon Web Services account to which the IAM user belongs is the owner.
        public let ownerId: String?
        /// The Amazon Resource Name (ARN) of the file system resource.
        public let resourceARN: String?
        /// The storage capacity of the file system in gibibytes (GiB). Amazon FSx responds with an HTTP status code 400 (Bad Request) if the value of StorageCapacity is outside of the minimum or maximum values.
        public let storageCapacity: Int?
        /// The type of storage the file system is using.   If set to SSD, the file system uses solid state drive storage.   If set to HDD, the file system uses hard disk drive storage.   If set to INTELLIGENT_TIERING, the file system uses fully elastic, intelligently-tiered storage.
        public let storageType: StorageType?
        /// Specifies the IDs of the subnets that the file system is accessible from. For the Amazon FSx Windows and ONTAP MULTI_AZ_1 file system deployment type, there are two subnet IDs, one for the preferred file server and one for the standby file server. The preferred file server subnet identified in the PreferredSubnetID property. All other file systems have only one subnet ID. For FSx for Lustre file systems, and Single-AZ Windows file systems, this is the ID of  the subnet that contains the file system's endpoint. For MULTI_AZ_1 Windows and ONTAP file systems, the file system endpoint is available in the PreferredSubnetID.
        public let subnetIds: [String]?
        /// The tags to associate with the file system. For more information, see Tagging your Amazon FSx resources in the Amazon FSx for Lustre User Guide.
        public let tags: [Tag]?
        /// The ID of the primary virtual private cloud (VPC) for the file system.
        public let vpcId: String?
        /// The configuration for this Amazon FSx for Windows File Server file system.
        public let windowsConfiguration: WindowsFileSystemConfiguration?

        @inlinable
        public init(administrativeActions: [AdministrativeAction]? = nil, creationTime: Date? = nil, dnsName: String? = nil, failureDetails: FileSystemFailureDetails? = nil, fileSystemId: String? = nil, fileSystemType: FileSystemType? = nil, fileSystemTypeVersion: String? = nil, kmsKeyId: String? = nil, lifecycle: FileSystemLifecycle? = nil, lustreConfiguration: LustreFileSystemConfiguration? = nil, networkInterfaceIds: [String]? = nil, networkType: NetworkType? = nil, ontapConfiguration: OntapFileSystemConfiguration? = nil, openZFSConfiguration: OpenZFSFileSystemConfiguration? = nil, ownerId: String? = nil, resourceARN: String? = nil, storageCapacity: Int? = nil, storageType: StorageType? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, vpcId: String? = nil, windowsConfiguration: WindowsFileSystemConfiguration? = nil) {
            self.administrativeActions = administrativeActions
            self.creationTime = creationTime
            self.dnsName = dnsName
            self.failureDetails = failureDetails
            self.fileSystemId = fileSystemId
            self.fileSystemType = fileSystemType
            self.fileSystemTypeVersion = fileSystemTypeVersion
            self.kmsKeyId = kmsKeyId
            self.lifecycle = lifecycle
            self.lustreConfiguration = lustreConfiguration
            self.networkInterfaceIds = networkInterfaceIds
            self.networkType = networkType
            self.ontapConfiguration = ontapConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.ownerId = ownerId
            self.resourceARN = resourceARN
            self.storageCapacity = storageCapacity
            self.storageType = storageType
            self.subnetIds = subnetIds
            self.tags = tags
            self.vpcId = vpcId
            self.windowsConfiguration = windowsConfiguration
        }

        private enum CodingKeys: String, CodingKey {
            case administrativeActions = "AdministrativeActions"
            case creationTime = "CreationTime"
            case dnsName = "DNSName"
            case failureDetails = "FailureDetails"
            case fileSystemId = "FileSystemId"
            case fileSystemType = "FileSystemType"
            case fileSystemTypeVersion = "FileSystemTypeVersion"
            case kmsKeyId = "KmsKeyId"
            case lifecycle = "Lifecycle"
            case lustreConfiguration = "LustreConfiguration"
            case networkInterfaceIds = "NetworkInterfaceIds"
            case networkType = "NetworkType"
            case ontapConfiguration = "OntapConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case ownerId = "OwnerId"
            case resourceARN = "ResourceARN"
            case storageCapacity = "StorageCapacity"
            case storageType = "StorageType"
            case subnetIds = "SubnetIds"
            case tags = "Tags"
            case vpcId = "VpcId"
            case windowsConfiguration = "WindowsConfiguration"
        }
    }

    public struct FileSystemEndpoint: AWSDecodableShape {
        public let dnsName: String?
        /// The IPv4 addresses of the file system endpoint.
        public let ipAddresses: [String]?
        /// The IPv6 addresses of the file system endpoint.
        public let ipv6Addresses: [String]?

        @inlinable
        public init(dnsName: String? = nil, ipAddresses: [String]? = nil, ipv6Addresses: [String]? = nil) {
            self.dnsName = dnsName
            self.ipAddresses = ipAddresses
            self.ipv6Addresses = ipv6Addresses
        }

        private enum CodingKeys: String, CodingKey {
            case dnsName = "DNSName"
            case ipAddresses = "IpAddresses"
            case ipv6Addresses = "Ipv6Addresses"
        }
    }

    public struct FileSystemEndpoints: AWSDecodableShape {
        /// An endpoint for managing your file system by setting up NetApp SnapMirror with other ONTAP systems.
        public let intercluster: FileSystemEndpoint?
        /// An endpoint for managing your file system using the NetApp ONTAP CLI and NetApp ONTAP API.
        public let management: FileSystemEndpoint?

        @inlinable
        public init(intercluster: FileSystemEndpoint? = nil, management: FileSystemEndpoint? = nil) {
            self.intercluster = intercluster
            self.management = management
        }

        private enum CodingKeys: String, CodingKey {
            case intercluster = "Intercluster"
            case management = "Management"
        }
    }

    public struct FileSystemFailureDetails: AWSDecodableShape {
        /// A message describing any failures that occurred.
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct FileSystemLustreMetadataConfiguration: AWSDecodableShape {
        /// The number of Metadata IOPS provisioned for the file system.   For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.   For Intelligent-Tiering file systems, valid values are 6000 and 12000.
        public let iops: Int?
        /// The metadata configuration mode for provisioning Metadata IOPS for the file system.   In AUTOMATIC mode (supported only on SSD file systems), FSx for Lustre automatically provisions and scales the number of Metadata IOPS on your file system based on your file system storage capacity.   In USER_PROVISIONED mode, you can choose to specify the number of Metadata IOPS to provision for your file system.
        public let mode: MetadataConfigurationMode?

        @inlinable
        public init(iops: Int? = nil, mode: MetadataConfigurationMode? = nil) {
            self.iops = iops
            self.mode = mode
        }

        private enum CodingKeys: String, CodingKey {
            case iops = "Iops"
            case mode = "Mode"
        }
    }

    public struct Filter: AWSEncodableShape {
        /// The name for this filter.
        public let name: FilterName?
        /// The values of the filter. These are all the values for any of the applied filters.
        public let values: [String]?

        @inlinable
        public init(name: FilterName? = nil, values: [String]? = nil) {
            self.name = name
            self.values = values
        }

        public func validate(name: String) throws {
            try self.values?.forEach {
                try validate($0, name: "values[]", parent: name, max: 128)
                try validate($0, name: "values[]", parent: name, min: 1)
                try validate($0, name: "values[]", parent: name, pattern: "^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$")
            }
            try self.validate(self.values, name: "values", parent: name, max: 20)
        }

        private enum CodingKeys: String, CodingKey {
            case name = "Name"
            case values = "Values"
        }
    }

    public struct IncompatibleParameterError: AWSErrorShape {
        public let message: String?
        /// A parameter that is incompatible with the earlier request.
        public let parameter: String?

        @inlinable
        public init(message: String? = nil, parameter: String? = nil) {
            self.message = message
            self.parameter = parameter
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
            case parameter = "Parameter"
        }
    }

    public struct InvalidAccessPoint: AWSErrorShape {
        /// An error code indicating that the access point specified doesn't exist.
        public let errorCode: String?
        public let message: String?

        @inlinable
        public init(errorCode: String? = nil, message: String? = nil) {
            self.errorCode = errorCode
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case errorCode = "ErrorCode"
            case message = "Message"
        }
    }

    public struct InvalidNetworkSettings: AWSErrorShape {
        /// The route table ID is either invalid or not part of the VPC specified.
        public let invalidRouteTableId: String?
        /// The security group ID is either invalid or not part of the VPC specified.
        public let invalidSecurityGroupId: String?
        /// The subnet ID that is either invalid or not part of the VPC specified.
        public let invalidSubnetId: String?
        /// Error message explaining what's wrong with network settings.
        public let message: String?

        @inlinable
        public init(invalidRouteTableId: String? = nil, invalidSecurityGroupId: String? = nil, invalidSubnetId: String? = nil, message: String? = nil) {
            self.invalidRouteTableId = invalidRouteTableId
            self.invalidSecurityGroupId = invalidSecurityGroupId
            self.invalidSubnetId = invalidSubnetId
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case invalidRouteTableId = "InvalidRouteTableId"
            case invalidSecurityGroupId = "InvalidSecurityGroupId"
            case invalidSubnetId = "InvalidSubnetId"
            case message = "Message"
        }
    }

    public struct InvalidRequest: AWSErrorShape {
        /// An error code indicating that the action or operation requested is invalid.
        public let errorCode: String?
        public let message: String?

        @inlinable
        public init(errorCode: String? = nil, message: String? = nil) {
            self.errorCode = errorCode
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case errorCode = "ErrorCode"
            case message = "Message"
        }
    }

    public struct LifecycleTransitionReason: AWSDecodableShape {
        public let message: String?

        @inlinable
        public init(message: String? = nil) {
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
        }
    }

    public struct ListTagsForResourceRequest: AWSEncodableShape {
        /// Maximum number of tags to return in the response (integer). This parameter value must be greater than 0. The number of items that Amazon FSx returns is the minimum of the MaxResults parameter specified in the request and the service's internal maximum number of items per page.
        public let maxResults: Int?
        /// Opaque pagination token returned from a previous ListTagsForResource operation (String). If a token present, the action continues the list from where the returning call left off.
        public let nextToken: String?
        /// The ARN of the Amazon FSx resource that will have its tags listed.
        public let resourceARN: String?

        @inlinable
        public init(maxResults: Int? = nil, nextToken: String? = nil, resourceARN: String? = nil) {
            self.maxResults = maxResults
            self.nextToken = nextToken
            self.resourceARN = resourceARN
        }

        public func validate(name: String) throws {
            try self.validate(self.maxResults, name: "maxResults", parent: name, max: 2147483647)
            try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, max: 255)
            try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1)
            try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$")
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 512)
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 8)
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:(?=[^:]+:fsx:[^:]+:\\d{12}:)((|(?=[a-z0-9-.]{1,63})(?!\\d{1,3}(\\.\\d{1,3}){3})(?![^:]*-{2})(?![^:]*-\\.)(?![^:]*\\.-)[a-z0-9].*(?<!-)):){4}(?!/).{0,1024}$")
        }

        private enum CodingKeys: String, CodingKey {
            case maxResults = "MaxResults"
            case nextToken = "NextToken"
            case resourceARN = "ResourceARN"
        }
    }

    public struct ListTagsForResourceResponse: AWSDecodableShape {
        /// This is present if there are more tags than returned in the response (String). You can use the NextToken value in the later request to fetch the tags.
        public let nextToken: String?
        /// A list of tags on the resource.
        public let tags: [Tag]?

        @inlinable
        public init(nextToken: String? = nil, tags: [Tag]? = nil) {
            self.nextToken = nextToken
            self.tags = tags
        }

        private enum CodingKeys: String, CodingKey {
            case nextToken = "NextToken"
            case tags = "Tags"
        }
    }

    public struct LustreFileSystemConfiguration: AWSDecodableShape {
        public let automaticBackupRetentionDays: Int?
        /// A boolean flag indicating whether tags on the file system are copied to backups. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when  creating a user-initiated backup, no tags are copied from the file system,  regardless of this value. (Default = false)
        public let copyTagsToBackups: Bool?
        public let dailyAutomaticBackupStartTime: String?
        /// The data compression configuration for the file system. DataCompressionType can have the following values:    NONE - Data compression is turned off for the file system.    LZ4 - Data compression is turned on with the LZ4 algorithm.   For more information, see Lustre data compression.
        public let dataCompressionType: DataCompressionType?
        /// Required when StorageType is set to INTELLIGENT_TIERING. Specifies the optional provisioned SSD read cache.
        public let dataReadCacheConfiguration: LustreReadCacheConfiguration?
        public let dataRepositoryConfiguration: DataRepositoryConfiguration?
        /// The deployment type of the FSx for Lustre file system.  Scratch deployment type is designed for temporary storage and shorter-term processing of data.  SCRATCH_1 and SCRATCH_2 deployment types are best suited  for when you need temporary storage and shorter-term processing of data. The  SCRATCH_2 deployment type provides in-transit encryption of data and higher burst  throughput capacity than SCRATCH_1. The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see Deployment and storage class options for FSx for Lustre file systems. The default is SCRATCH_1.
        public let deploymentType: LustreDeploymentType?
        /// The type of drive cache used by PERSISTENT_1 file systems that are provisioned with HDD storage devices. This parameter is required when StorageType is HDD. When set to READ the file system has an SSD storage cache that is sized to 20% of the file system's storage capacity. This improves the performance for frequently accessed files by caching up to 20% of the total storage capacity. This parameter is required when StorageType is set to HDD.
        public let driveCacheType: DriveCacheType?
        /// Specifies whether Elastic Fabric Adapter (EFA) and GPUDirect Storage (GDS) support is enabled for the Amazon FSx for Lustre file system.
        public let efaEnabled: Bool?
        /// The Lustre logging configuration. Lustre logging writes the enabled log events for your file system to Amazon CloudWatch Logs.
        public let logConfiguration: LustreLogConfiguration?
        /// The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type.
        public let metadataConfiguration: FileSystemLustreMetadataConfiguration?
        /// You use the MountName value when mounting the file system. For the SCRATCH_1 deployment type, this value is always "fsx".  For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 deployment types, this value is a string that is unique within an Amazon Web Services Region.
        public let mountName: String?
        /// Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. File system throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is only valid for PERSISTENT_1 and PERSISTENT_2 deployment types.  Valid values:   For PERSISTENT_1 SSD storage: 50, 100, 200.   For PERSISTENT_1 HDD storage: 12, 40.   For PERSISTENT_2 SSD storage: 125, 250, 500, 1000.
        public let perUnitStorageThroughput: Int?
        /// The Lustre root squash configuration for an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.
        public let rootSquashConfiguration: LustreRootSquashConfiguration?
        /// The throughput of an Amazon FSx for Lustre file system using the Intelligent-Tiering storage class, measured in megabytes per second (MBps).
        public let throughputCapacity: Int?
        /// The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Here, d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, dataReadCacheConfiguration: LustreReadCacheConfiguration? = nil, dataRepositoryConfiguration: DataRepositoryConfiguration? = nil, deploymentType: LustreDeploymentType? = nil, driveCacheType: DriveCacheType? = nil, efaEnabled: Bool? = nil, logConfiguration: LustreLogConfiguration? = nil, metadataConfiguration: FileSystemLustreMetadataConfiguration? = nil, mountName: String? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.copyTagsToBackups = copyTagsToBackups
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.dataCompressionType = dataCompressionType
            self.dataReadCacheConfiguration = dataReadCacheConfiguration
            self.dataRepositoryConfiguration = dataRepositoryConfiguration
            self.deploymentType = deploymentType
            self.driveCacheType = driveCacheType
            self.efaEnabled = efaEnabled
            self.logConfiguration = logConfiguration
            self.metadataConfiguration = metadataConfiguration
            self.mountName = mountName
            self.perUnitStorageThroughput = perUnitStorageThroughput
            self.rootSquashConfiguration = rootSquashConfiguration
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        private enum CodingKeys: String, CodingKey {
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case copyTagsToBackups = "CopyTagsToBackups"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case dataCompressionType = "DataCompressionType"
            case dataReadCacheConfiguration = "DataReadCacheConfiguration"
            case dataRepositoryConfiguration = "DataRepositoryConfiguration"
            case deploymentType = "DeploymentType"
            case driveCacheType = "DriveCacheType"
            case efaEnabled = "EfaEnabled"
            case logConfiguration = "LogConfiguration"
            case metadataConfiguration = "MetadataConfiguration"
            case mountName = "MountName"
            case perUnitStorageThroughput = "PerUnitStorageThroughput"
            case rootSquashConfiguration = "RootSquashConfiguration"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct LustreLogConfiguration: AWSDecodableShape {
        /// The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN. The destination ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.
        public let destination: String?
        /// The data repository events that are logged by Amazon FSx.    WARN_ONLY - only warning events are logged.    ERROR_ONLY - only error events are logged.    WARN_ERROR - both warning events and error events are logged.    DISABLED - logging of data repository events is turned off.   Note that Amazon File Cache uses a default setting of WARN_ERROR, which can't be changed.
        public let level: LustreAccessAuditLogLevel?

        @inlinable
        public init(destination: String? = nil, level: LustreAccessAuditLogLevel? = nil) {
            self.destination = destination
            self.level = level
        }

        private enum CodingKeys: String, CodingKey {
            case destination = "Destination"
            case level = "Level"
        }
    }

    public struct LustreLogCreateConfiguration: AWSEncodableShape {
        /// The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements:   The destination ARN that you provide must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.   The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix.   If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/lustre log group (for Amazon FSx for Lustre) or /aws/fsx/filecache (for Amazon File Cache).   If Destination is provided and the resource does not exist, the request will fail with a BadRequest error.   If Level is set to DISABLED, you cannot specify a destination in Destination.
        public let destination: String?
        /// Sets which data repository events are logged by Amazon FSx.    WARN_ONLY - only warning events are logged.    ERROR_ONLY - only error events are logged.    WARN_ERROR - both warning events and error events are logged.    DISABLED - logging of data repository events is turned off.
        public let level: LustreAccessAuditLogLevel?

        @inlinable
        public init(destination: String? = nil, level: LustreAccessAuditLogLevel? = nil) {
            self.destination = destination
            self.level = level
        }

        public func validate(name: String) throws {
            try self.validate(self.destination, name: "destination", parent: name, max: 1024)
            try self.validate(self.destination, name: "destination", parent: name, min: 8)
            try self.validate(self.destination, name: "destination", parent: name, pattern: "^arn:[^:]{1,63}:[^:]{0,63}:[^:]{0,63}:(?:|\\d{12}):[^/].{0,1023}$")
        }

        private enum CodingKeys: String, CodingKey {
            case destination = "Destination"
            case level = "Level"
        }
    }

    public struct LustreReadCacheConfiguration: AWSEncodableShape & AWSDecodableShape {
        ///  Required if SizingMode is set to USER_PROVISIONED. Specifies the size of the file system's SSD read cache, in gibibytes (GiB).  The SSD read cache size is distributed across provisioned file servers in your file system. Intelligent-Tiering file systems support a minimum of 32 GiB and maximum of 131072 GiB for SSD read cache size for every 4,000 MB/s of throughput capacity provisioned.
        public let sizeGiB: Int?
        ///  Specifies how the provisioned SSD read cache is sized, as follows:    Set to NO_CACHE if you do not want to use an SSD read cache with your Intelligent-Tiering file system.   Set to USER_PROVISIONED to specify the exact size of your SSD read cache.   Set to PROPORTIONAL_TO_THROUGHPUT_CAPACITY to have your SSD read cache automatically sized based on your throughput capacity.
        public let sizingMode: LustreReadCacheSizingMode?

        @inlinable
        public init(sizeGiB: Int? = nil, sizingMode: LustreReadCacheSizingMode? = nil) {
            self.sizeGiB = sizeGiB
            self.sizingMode = sizingMode
        }

        public func validate(name: String) throws {
            try self.validate(self.sizeGiB, name: "sizeGiB", parent: name, max: 2147483647)
            try self.validate(self.sizeGiB, name: "sizeGiB", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case sizeGiB = "SizeGiB"
            case sizingMode = "SizingMode"
        }
    }

    public struct LustreRootSquashConfiguration: AWSEncodableShape & AWSDecodableShape {
        /// When root squash is enabled, you can optionally specify an array of NIDs of clients for which root squash does not apply. A client NID is a Lustre Network Identifier used to uniquely identify a client. You can specify the NID as either a single address or a range of addresses:   A single address is described in standard Lustre NID format by specifying the client’s IP address followed by the Lustre network ID (for example, 10.0.1.6@tcp).   An address range is described using a dash to separate the range (for example, 10.0.[2-10].[1-255]@tcp).
        public let noSquashNids: [String]?
        /// You enable root squash by setting a user ID (UID) and group ID (GID) for the file system in the format UID:GID (for example, 365534:65534). The UID and GID values can range from 0 to 4294967294:   A non-zero value for UID and GID enables root squash. The UID and GID values can be different, but each must be a non-zero value.   A value of 0 (zero) for UID and GID indicates root, and therefore disables root squash.   When root squash is enabled, the user ID and group ID of a root user accessing the file system are re-mapped to the UID and GID you provide.
        public let rootSquash: String?

        @inlinable
        public init(noSquashNids: [String]? = nil, rootSquash: String? = nil) {
            self.noSquashNids = noSquashNids
            self.rootSquash = rootSquash
        }

        public func validate(name: String) throws {
            try self.noSquashNids?.forEach {
                try validate($0, name: "noSquashNids[]", parent: name, max: 43)
                try validate($0, name: "noSquashNids[]", parent: name, min: 11)
                try validate($0, name: "noSquashNids[]", parent: name, pattern: "^([0-9\\[\\]\\-]*\\.){3}([0-9\\[\\]\\-]*)@tcp$")
            }
            try self.validate(self.noSquashNids, name: "noSquashNids", parent: name, max: 64)
            try self.validate(self.rootSquash, name: "rootSquash", parent: name, max: 21)
            try self.validate(self.rootSquash, name: "rootSquash", parent: name, min: 3)
            try self.validate(self.rootSquash, name: "rootSquash", parent: name, pattern: "^([0-9]{1,10}):([0-9]{1,10})$")
        }

        private enum CodingKeys: String, CodingKey {
            case noSquashNids = "NoSquashNids"
            case rootSquash = "RootSquash"
        }
    }

    public struct NFSDataRepositoryConfiguration: AWSDecodableShape {
        /// This parameter is not supported for Amazon File Cache.
        public let autoExportPolicy: AutoExportPolicy?
        /// A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.
        public let dnsIps: [String]?
        /// The version of the NFS (Network File System) protocol of the NFS data repository. Currently, the only supported value is NFS3, which indicates that the data repository must support the NFSv3 protocol.
        public let version: NfsVersion?

        @inlinable
        public init(autoExportPolicy: AutoExportPolicy? = nil, dnsIps: [String]? = nil, version: NfsVersion? = nil) {
            self.autoExportPolicy = autoExportPolicy
            self.dnsIps = dnsIps
            self.version = version
        }

        private enum CodingKeys: String, CodingKey {
            case autoExportPolicy = "AutoExportPolicy"
            case dnsIps = "DnsIps"
            case version = "Version"
        }
    }

    public struct NotServiceResourceError: AWSErrorShape {
        public let message: String?
        /// The Amazon Resource Name (ARN) of the non-Amazon FSx resource.
        public let resourceARN: String?

        @inlinable
        public init(message: String? = nil, resourceARN: String? = nil) {
            self.message = message
            self.resourceARN = resourceARN
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
            case resourceARN = "ResourceARN"
        }
    }

    public struct OntapFileSystemConfiguration: AWSDecodableShape {
        public let automaticBackupRetentionDays: Int?
        public let dailyAutomaticBackupStartTime: String?
        /// Specifies the FSx for ONTAP file system deployment type in use in the file system.     MULTI_AZ_1 - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.    MULTI_AZ_2 - A high availability file system configured for Multi-AZ redundancy to tolerate  temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.    SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.    SINGLE_AZ_2 - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.  This is a second-generation FSx for ONTAP file system.   For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.
        public let deploymentType: OntapDeploymentType?
        /// The SSD IOPS configuration for the ONTAP file system, specifying the number of provisioned IOPS and the provision mode.
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// (Multi-AZ only) Specifies the IPv4 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx  API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the  Amazon FSx  console, Amazon FSx  chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.
        public let endpointIpAddressRange: String?
        /// (Multi-AZ only) Specifies the IPv6 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /118 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpv6AddressRange: String?
        /// The Management and Intercluster endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror.
        public let endpoints: FileSystemEndpoints?
        /// You can use the fsxadmin user account to access the NetApp ONTAP CLI and  REST API. The password value is always redacted in the response.
        public let fsxAdminPassword: String?
        /// Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity,  Iops, and ThroughputCapacity. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The value of HAPairs is less than 1 or greater than 12.   The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2.
        public let haPairs: Int?
        public let preferredSubnetId: String?
        /// (Multi-AZ only) The VPC route tables in which your file system's endpoints are created.
        public let routeTableIds: [String]?
        public let throughputCapacity: Int?
        /// Use to choose the throughput capacity per HA pair. When the value of HAPairs is equal to 1, the value of ThroughputCapacityPerHAPair is the total throughput for the file system. This field and ThroughputCapacity cannot be defined in the same API call, but one is required. This field and ThroughputCapacity are the same for file systems with one HA pair.   For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.   For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.   For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.   Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value.   The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).   The value of ThroughputCapacityPerHAPair is not a valid value.
        public let throughputCapacityPerHAPair: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, deploymentType: OntapDeploymentType? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, endpointIpAddressRange: String? = nil, endpointIpv6AddressRange: String? = nil, endpoints: FileSystemEndpoints? = nil, fsxAdminPassword: String? = nil, haPairs: Int? = nil, preferredSubnetId: String? = nil, routeTableIds: [String]? = nil, throughputCapacity: Int? = nil, throughputCapacityPerHAPair: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.deploymentType = deploymentType
            self.diskIopsConfiguration = diskIopsConfiguration
            self.endpointIpAddressRange = endpointIpAddressRange
            self.endpointIpv6AddressRange = endpointIpv6AddressRange
            self.endpoints = endpoints
            self.fsxAdminPassword = fsxAdminPassword
            self.haPairs = haPairs
            self.preferredSubnetId = preferredSubnetId
            self.routeTableIds = routeTableIds
            self.throughputCapacity = throughputCapacity
            self.throughputCapacityPerHAPair = throughputCapacityPerHAPair
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        private enum CodingKeys: String, CodingKey {
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case deploymentType = "DeploymentType"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case endpointIpAddressRange = "EndpointIpAddressRange"
            case endpointIpv6AddressRange = "EndpointIpv6AddressRange"
            case endpoints = "Endpoints"
            case fsxAdminPassword = "FsxAdminPassword"
            case haPairs = "HAPairs"
            case preferredSubnetId = "PreferredSubnetId"
            case routeTableIds = "RouteTableIds"
            case throughputCapacity = "ThroughputCapacity"
            case throughputCapacityPerHAPair = "ThroughputCapacityPerHAPair"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct OntapVolumeConfiguration: AWSDecodableShape {
        /// This structure specifies configuration options for a volume’s storage aggregate or aggregates.
        public let aggregateConfiguration: AggregateConfiguration?
        /// A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.
        public let copyTagsToBackups: Bool?
        /// Specifies the FlexCache endpoint type of the volume. Valid values are the following:    NONE specifies that the volume doesn't have a FlexCache configuration. NONE is the default.    ORIGIN specifies that the volume is the origin volume for a FlexCache volume.    CACHE specifies that the volume is a FlexCache volume.
        public let flexCacheEndpointType: FlexCacheEndpointType?
        /// Specifies the directory that network-attached storage (NAS) clients use to mount the volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP address. You can create a JunctionPath directly below a parent volume junction or on a directory within a volume. A JunctionPath for a volume named vol3 might be /vol1/vol2/vol3, or /vol1/dir2/vol3, or even /dir1/dir2/vol3.
        public let junctionPath: String?
        /// Specifies the type of volume. Valid values are the following:    RW specifies a read/write volume. RW is the default.    DP specifies a data-protection volume. You can protect data by replicating it to data-protection mirror copies. If a disaster occurs, you can use these data-protection mirror copies to recover data.    LS specifies a load-sharing mirror volume. A load-sharing mirror reduces the network traffic to a FlexVol volume by providing additional read-only access to clients.
        public let ontapVolumeType: OntapVolumeType?
        /// The security style for the volume, which can be UNIX, NTFS, or MIXED.
        public let securityStyle: SecurityStyle?
        /// The configured size of the volume, in bytes.
        public let sizeInBytes: Int64?
        /// The configured size of the volume, in megabytes (MBs).
        public let sizeInMegabytes: Int?
        /// The SnapLock configuration object for an FSx for ONTAP SnapLock volume.
        public let snaplockConfiguration: SnaplockConfiguration?
        /// Specifies the snapshot policy for the volume. There are three built-in snapshot policies:    default: This is the default policy. A maximum of six hourly snapshots taken five minutes past  the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.    default-1weekly: This policy is the same as the default policy except  that it only retains one snapshot from the weekly schedule.    none: This policy does not take any snapshots. This policy can be assigned to volumes to  prevent automatic snapshots from being taken.   You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API. For more information, see Snapshot policies  in the Amazon FSx for NetApp ONTAP User Guide.
        public let snapshotPolicy: String?
        /// The volume's storage efficiency setting.
        public let storageEfficiencyEnabled: Bool?
        /// The ID of the volume's storage virtual machine.
        public let storageVirtualMachineId: String?
        /// A Boolean flag indicating whether this volume is the root volume for its storage virtual machine (SVM). Only one volume on an SVM can be the root volume. This value defaults to false. If this value is true, then this is the SVM root volume. This flag is useful when you're deleting an SVM, because you must first delete all non-root volumes. This flag, when set to false, helps you identify which volumes to delete before you can delete the SVM.
        public let storageVirtualMachineRoot: Bool?
        /// The volume's TieringPolicy setting.
        public let tieringPolicy: TieringPolicy?
        /// The volume's universally unique identifier (UUID).
        public let uuid: String?
        /// Use to specify the style of an ONTAP volume. For more information about FlexVols and FlexGroups, see Volume types in Amazon FSx for NetApp ONTAP User Guide.
        public let volumeStyle: VolumeStyle?

        @inlinable
        public init(aggregateConfiguration: AggregateConfiguration? = nil, copyTagsToBackups: Bool? = nil, flexCacheEndpointType: FlexCacheEndpointType? = nil, junctionPath: String? = nil, ontapVolumeType: OntapVolumeType? = nil, securityStyle: SecurityStyle? = nil, sizeInBytes: Int64? = nil, sizeInMegabytes: Int? = nil, snaplockConfiguration: SnaplockConfiguration? = nil, snapshotPolicy: String? = nil, storageEfficiencyEnabled: Bool? = nil, storageVirtualMachineId: String? = nil, storageVirtualMachineRoot: Bool? = nil, tieringPolicy: TieringPolicy? = nil, uuid: String? = nil, volumeStyle: VolumeStyle? = nil) {
            self.aggregateConfiguration = aggregateConfiguration
            self.copyTagsToBackups = copyTagsToBackups
            self.flexCacheEndpointType = flexCacheEndpointType
            self.junctionPath = junctionPath
            self.ontapVolumeType = ontapVolumeType
            self.securityStyle = securityStyle
            self.sizeInBytes = sizeInBytes
            self.sizeInMegabytes = sizeInMegabytes
            self.snaplockConfiguration = snaplockConfiguration
            self.snapshotPolicy = snapshotPolicy
            self.storageEfficiencyEnabled = storageEfficiencyEnabled
            self.storageVirtualMachineId = storageVirtualMachineId
            self.storageVirtualMachineRoot = storageVirtualMachineRoot
            self.tieringPolicy = tieringPolicy
            self.uuid = uuid
            self.volumeStyle = volumeStyle
        }

        private enum CodingKeys: String, CodingKey {
            case aggregateConfiguration = "AggregateConfiguration"
            case copyTagsToBackups = "CopyTagsToBackups"
            case flexCacheEndpointType = "FlexCacheEndpointType"
            case junctionPath = "JunctionPath"
            case ontapVolumeType = "OntapVolumeType"
            case securityStyle = "SecurityStyle"
            case sizeInBytes = "SizeInBytes"
            case sizeInMegabytes = "SizeInMegabytes"
            case snaplockConfiguration = "SnaplockConfiguration"
            case snapshotPolicy = "SnapshotPolicy"
            case storageEfficiencyEnabled = "StorageEfficiencyEnabled"
            case storageVirtualMachineId = "StorageVirtualMachineId"
            case storageVirtualMachineRoot = "StorageVirtualMachineRoot"
            case tieringPolicy = "TieringPolicy"
            case uuid = "UUID"
            case volumeStyle = "VolumeStyle"
        }
    }

    public struct OpenZFSClientConfiguration: AWSEncodableShape & AWSDecodableShape {
        /// A value that specifies who can mount the file system. You can provide a wildcard character (*), an IP address (0.0.0.0), or a CIDR address (192.0.2.0/24). By default, Amazon FSx uses the wildcard character when specifying the client.
        public let clients: String?
        /// The options to use when mounting the file system. For a list of options that you can use with Network File System (NFS), see the exports(5) - Linux man page. When choosing your options, consider the following:    crossmnt is used by default. If you don't specify crossmnt when changing the client configuration, you won't be able to see or access snapshots in your file system's snapshot directory.    sync is used by default. If you instead specify async, the system acknowledges writes before writing to disk. If the system crashes before the writes are finished, you lose the unwritten data.
        public let options: [String]?

        @inlinable
        public init(clients: String? = nil, options: [String]? = nil) {
            self.clients = clients
            self.options = options
        }

        public func validate(name: String) throws {
            try self.validate(self.clients, name: "clients", parent: name, max: 128)
            try self.validate(self.clients, name: "clients", parent: name, min: 1)
            try self.validate(self.clients, name: "clients", parent: name, pattern: "^[ -~]{1,128}$")
            try self.options?.forEach {
                try validate($0, name: "options[]", parent: name, max: 128)
                try validate($0, name: "options[]", parent: name, min: 1)
                try validate($0, name: "options[]", parent: name, pattern: "^[ -~]{1,128}$")
            }
            try self.validate(self.options, name: "options", parent: name, max: 20)
            try self.validate(self.options, name: "options", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case clients = "Clients"
            case options = "Options"
        }
    }

    public struct OpenZFSCreateRootVolumeConfiguration: AWSEncodableShape {
        /// A Boolean value indicating whether tags for the volume should be copied to snapshots of the volume. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.
        public let copyTagsToSnapshots: Bool?
        /// Specifies the method used to compress the data on the volume. The compression type is NONE by default.    NONE - Doesn't compress the data on the volume. NONE is the default.    ZSTD - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.    LZ4 - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.
        public let dataCompressionType: OpenZFSDataCompressionType?
        /// The configuration object for mounting a file system.
        public let nfsExports: [OpenZFSNfsExport]?
        /// A Boolean value indicating whether the volume is read-only. Setting this value to true can be useful after you have completed changes to a volume and no longer want changes to occur.
        public let readOnly: Bool?
        /// Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the  default record size. Database workflows can benefit from a smaller record size, while streaming  workflows can benefit from a larger record size. For additional guidance on setting a custom record  size, see  Tips for maximizing performance in the Amazon FSx for OpenZFS User Guide.
        public let recordSizeKiB: Int?
        /// An object specifying how much storage users or groups can use on the volume.
        public let userAndGroupQuotas: [OpenZFSUserOrGroupQuota]?

        @inlinable
        public init(copyTagsToSnapshots: Bool? = nil, dataCompressionType: OpenZFSDataCompressionType? = nil, nfsExports: [OpenZFSNfsExport]? = nil, readOnly: Bool? = nil, recordSizeKiB: Int? = nil, userAndGroupQuotas: [OpenZFSUserOrGroupQuota]? = nil) {
            self.copyTagsToSnapshots = copyTagsToSnapshots
            self.dataCompressionType = dataCompressionType
            self.nfsExports = nfsExports
            self.readOnly = readOnly
            self.recordSizeKiB = recordSizeKiB
            self.userAndGroupQuotas = userAndGroupQuotas
        }

        public func validate(name: String) throws {
            try self.nfsExports?.forEach {
                try $0.validate(name: "\(name).nfsExports[]")
            }
            try self.validate(self.nfsExports, name: "nfsExports", parent: name, max: 1)
            try self.validate(self.recordSizeKiB, name: "recordSizeKiB", parent: name, max: 4096)
            try self.validate(self.recordSizeKiB, name: "recordSizeKiB", parent: name, min: 4)
            try self.userAndGroupQuotas?.forEach {
                try $0.validate(name: "\(name).userAndGroupQuotas[]")
            }
            try self.validate(self.userAndGroupQuotas, name: "userAndGroupQuotas", parent: name, max: 500)
        }

        private enum CodingKeys: String, CodingKey {
            case copyTagsToSnapshots = "CopyTagsToSnapshots"
            case dataCompressionType = "DataCompressionType"
            case nfsExports = "NfsExports"
            case readOnly = "ReadOnly"
            case recordSizeKiB = "RecordSizeKiB"
            case userAndGroupQuotas = "UserAndGroupQuotas"
        }
    }

    public struct OpenZFSFileSystemConfiguration: AWSDecodableShape {
        public let automaticBackupRetentionDays: Int?
        /// A Boolean value indicating whether tags on the file system should be copied to backups. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
        public let copyTagsToBackups: Bool?
        /// A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.
        public let copyTagsToVolumes: Bool?
        public let dailyAutomaticBackupStartTime: String?
        /// Specifies the file-system deployment type. Amazon FSx for OpenZFS supports  MULTI_AZ_1, SINGLE_AZ_HA_2, SINGLE_AZ_HA_1, SINGLE_AZ_2, and SINGLE_AZ_1.
        public let deploymentType: OpenZFSDeploymentType?
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// The IPv4 address of the endpoint that is used to access data or to manage the file system.
        public let endpointIpAddress: String?
        /// (Multi-AZ only) Specifies the IPv4 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx  API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.
        public let endpointIpAddressRange: String?
        /// The IPv6 address of the endpoint that is used to access data or to manage the file system.
        public let endpointIpv6Address: String?
        /// (Multi-AZ only) Specifies the IPv6 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /118 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpv6AddressRange: String?
        /// Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located.
        public let preferredSubnetId: String?
        ///  Required when StorageType is set to INTELLIGENT_TIERING. Specifies the optional provisioned SSD read cache.
        public let readCacheConfiguration: OpenZFSReadCacheConfiguration?
        /// The ID of the root volume of the OpenZFS file system.
        public let rootVolumeId: String?
        /// (Multi-AZ only) The VPC route tables in which your file system's endpoints are created.
        public let routeTableIds: [String]?
        /// The throughput of an Amazon FSx file system, measured in megabytes per second (MBps).
        public let throughputCapacity: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, copyTagsToVolumes: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, deploymentType: OpenZFSDeploymentType? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, endpointIpAddress: String? = nil, endpointIpAddressRange: String? = nil, endpointIpv6Address: String? = nil, endpointIpv6AddressRange: String? = nil, preferredSubnetId: String? = nil, readCacheConfiguration: OpenZFSReadCacheConfiguration? = nil, rootVolumeId: String? = nil, routeTableIds: [String]? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.copyTagsToBackups = copyTagsToBackups
            self.copyTagsToVolumes = copyTagsToVolumes
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.deploymentType = deploymentType
            self.diskIopsConfiguration = diskIopsConfiguration
            self.endpointIpAddress = endpointIpAddress
            self.endpointIpAddressRange = endpointIpAddressRange
            self.endpointIpv6Address = endpointIpv6Address
            self.endpointIpv6AddressRange = endpointIpv6AddressRange
            self.preferredSubnetId = preferredSubnetId
            self.readCacheConfiguration = readCacheConfiguration
            self.rootVolumeId = rootVolumeId
            self.routeTableIds = routeTableIds
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        private enum CodingKeys: String, CodingKey {
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case copyTagsToBackups = "CopyTagsToBackups"
            case copyTagsToVolumes = "CopyTagsToVolumes"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case deploymentType = "DeploymentType"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case endpointIpAddress = "EndpointIpAddress"
            case endpointIpAddressRange = "EndpointIpAddressRange"
            case endpointIpv6Address = "EndpointIpv6Address"
            case endpointIpv6AddressRange = "EndpointIpv6AddressRange"
            case preferredSubnetId = "PreferredSubnetId"
            case readCacheConfiguration = "ReadCacheConfiguration"
            case rootVolumeId = "RootVolumeId"
            case routeTableIds = "RouteTableIds"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct OpenZFSFileSystemIdentity: AWSEncodableShape & AWSDecodableShape {
        /// Specifies the UID and GIDs of the file system POSIX user.
        public let posixUser: OpenZFSPosixFileSystemUser?
        /// Specifies the FSx for OpenZFS user identity type, accepts only POSIX.
        public let type: OpenZFSFileSystemUserType?

        @inlinable
        public init(posixUser: OpenZFSPosixFileSystemUser? = nil, type: OpenZFSFileSystemUserType? = nil) {
            self.posixUser = posixUser
            self.type = type
        }

        public func validate(name: String) throws {
            try self.posixUser?.validate(name: "\(name).posixUser")
        }

        private enum CodingKeys: String, CodingKey {
            case posixUser = "PosixUser"
            case type = "Type"
        }
    }

    public struct OpenZFSNfsExport: AWSEncodableShape & AWSDecodableShape {
        /// A list of configuration objects that contain the client and options for mounting the OpenZFS file system.
        public let clientConfigurations: [OpenZFSClientConfiguration]?

        @inlinable
        public init(clientConfigurations: [OpenZFSClientConfiguration]? = nil) {
            self.clientConfigurations = clientConfigurations
        }

        public func validate(name: String) throws {
            try self.clientConfigurations?.forEach {
                try $0.validate(name: "\(name).clientConfigurations[]")
            }
            try self.validate(self.clientConfigurations, name: "clientConfigurations", parent: name, max: 25)
        }

        private enum CodingKeys: String, CodingKey {
            case clientConfigurations = "ClientConfigurations"
        }
    }

    public struct OpenZFSOriginSnapshotConfiguration: AWSDecodableShape {
        /// The strategy used when copying data from the snapshot to the new volume.     CLONE - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying the data from a snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data.     FULL_COPY - Copies all data from the snapshot to the new volume.    The INCREMENTAL_COPY option is only for updating an existing volume by using a snapshot from another FSx for OpenZFS file system. For more information, see CopySnapshotAndUpdateVolume.
        public let copyStrategy: OpenZFSCopyStrategy?
        public let snapshotARN: String?

        @inlinable
        public init(copyStrategy: OpenZFSCopyStrategy? = nil, snapshotARN: String? = nil) {
            self.copyStrategy = copyStrategy
            self.snapshotARN = snapshotARN
        }

        private enum CodingKeys: String, CodingKey {
            case copyStrategy = "CopyStrategy"
            case snapshotARN = "SnapshotARN"
        }
    }

    public struct OpenZFSPosixFileSystemUser: AWSEncodableShape & AWSDecodableShape {
        /// The GID of the file system user.
        public let gid: Int64?
        /// The list of secondary GIDs for the file system user.
        public let secondaryGids: [Int64]?
        /// The UID of the file system user.
        public let uid: Int64?

        @inlinable
        public init(gid: Int64? = nil, secondaryGids: [Int64]? = nil, uid: Int64? = nil) {
            self.gid = gid
            self.secondaryGids = secondaryGids
            self.uid = uid
        }

        public func validate(name: String) throws {
            try self.validate(self.gid, name: "gid", parent: name, max: 4294967295)
            try self.validate(self.gid, name: "gid", parent: name, min: 0)
            try self.secondaryGids?.forEach {
                try validate($0, name: "secondaryGids[]", parent: name, max: 4294967295)
                try validate($0, name: "secondaryGids[]", parent: name, min: 0)
            }
            try self.validate(self.secondaryGids, name: "secondaryGids", parent: name, max: 15)
            try self.validate(self.uid, name: "uid", parent: name, max: 4294967295)
            try self.validate(self.uid, name: "uid", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case gid = "Gid"
            case secondaryGids = "SecondaryGids"
            case uid = "Uid"
        }
    }

    public struct OpenZFSReadCacheConfiguration: AWSEncodableShape & AWSDecodableShape {
        ///  Required if SizingMode is set to USER_PROVISIONED. Specifies the size of the file system's SSD read cache, in gibibytes (GiB).
        public let sizeGiB: Int?
        ///  Specifies how the provisioned SSD read cache is sized, as follows:    Set to NO_CACHE if you do not want to use an SSD read cache with your Intelligent-Tiering file system.   Set to USER_PROVISIONED to specify the exact size of your SSD read cache.   Set to PROPORTIONAL_TO_THROUGHPUT_CAPACITY to have your SSD read cache automatically sized based on your throughput capacity.
        public let sizingMode: OpenZFSReadCacheSizingMode?

        @inlinable
        public init(sizeGiB: Int? = nil, sizingMode: OpenZFSReadCacheSizingMode? = nil) {
            self.sizeGiB = sizeGiB
            self.sizingMode = sizingMode
        }

        public func validate(name: String) throws {
            try self.validate(self.sizeGiB, name: "sizeGiB", parent: name, max: 2147483647)
            try self.validate(self.sizeGiB, name: "sizeGiB", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case sizeGiB = "SizeGiB"
            case sizingMode = "SizingMode"
        }
    }

    public struct OpenZFSUserOrGroupQuota: AWSEncodableShape & AWSDecodableShape {
        /// The ID of the user or group that the quota applies to.
        public let id: Int?
        /// The user or group's storage quota, in gibibytes (GiB).
        public let storageCapacityQuotaGiB: Int?
        /// Specifies whether the quota applies to a user or group.
        public let type: OpenZFSQuotaType?

        @inlinable
        public init(id: Int? = nil, storageCapacityQuotaGiB: Int? = nil, type: OpenZFSQuotaType? = nil) {
            self.id = id
            self.storageCapacityQuotaGiB = storageCapacityQuotaGiB
            self.type = type
        }

        public func validate(name: String) throws {
            try self.validate(self.id, name: "id", parent: name, max: 2147483647)
            try self.validate(self.id, name: "id", parent: name, min: 0)
            try self.validate(self.storageCapacityQuotaGiB, name: "storageCapacityQuotaGiB", parent: name, max: 2147483647)
            try self.validate(self.storageCapacityQuotaGiB, name: "storageCapacityQuotaGiB", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case id = "Id"
            case storageCapacityQuotaGiB = "StorageCapacityQuotaGiB"
            case type = "Type"
        }
    }

    public struct OpenZFSVolumeConfiguration: AWSDecodableShape {
        /// Specifies the strategy used when copying data from the snapshot to the new volume.     CLONE - The new volume references the data in the origin snapshot. Cloning a snapshot is faster than copying data from the snapshot to a new volume and doesn't consume disk throughput. However, the origin snapshot can't be deleted if there is a volume using its copied data.    FULL_COPY - Copies all data from the snapshot to the new volume. Specify this option to create the volume from a snapshot on another FSx for OpenZFS file system.    The INCREMENTAL_COPY option is only for updating an existing volume by using a snapshot from another FSx for OpenZFS file system. For more information, see CopySnapshotAndUpdateVolume.
        public let copyStrategy: OpenZFSCopyStrategy?
        /// A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.
        public let copyTagsToSnapshots: Bool?
        /// Specifies the method used to compress the data on the volume. The compression type is NONE by default.    NONE - Doesn't compress the data on the volume. NONE is the default.    ZSTD - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.    LZ4 - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.
        public let dataCompressionType: OpenZFSDataCompressionType?
        /// A Boolean value indicating whether dependent clone volumes  created from intermediate snapshots should be deleted when a volume is restored from snapshot.
        public let deleteClonedVolumes: Bool?
        /// A Boolean value indicating whether snapshot data that differs between the current state and the specified snapshot should be overwritten when a volume is restored from a snapshot.
        public let deleteIntermediateData: Bool?
        /// A Boolean value indicating whether snapshots between the current state and the specified snapshot should be deleted when a volume is restored from snapshot.
        public let deleteIntermediateSnaphots: Bool?
        /// The ID of the snapshot that's being copied or was most recently copied to the destination volume.
        public let destinationSnapshot: String?
        /// The configuration object for mounting a Network File System (NFS) file system.
        public let nfsExports: [OpenZFSNfsExport]?
        /// The configuration object that specifies the snapshot to use as the origin of the data for the volume.
        public let originSnapshot: OpenZFSOriginSnapshotConfiguration?
        /// The ID of the parent volume.
        public let parentVolumeId: String?
        /// A Boolean value indicating whether the volume is read-only.
        public let readOnly: Bool?
        /// The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. For guidance on when to set a custom record size, see the Amazon FSx for OpenZFS User Guide.
        public let recordSizeKiB: Int?
        /// Specifies the ID of the snapshot to which the volume was restored.
        public let restoreToSnapshot: String?
        public let sourceSnapshotARN: String?
        /// The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume.
        public let storageCapacityQuotaGiB: Int?
        /// The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved.
        public let storageCapacityReservationGiB: Int?
        /// An object specifying how much storage users or groups can use on the volume.
        public let userAndGroupQuotas: [OpenZFSUserOrGroupQuota]?
        /// The path to the volume from the root volume. For example, fsx/parentVolume/volume1.
        public let volumePath: String?

        @inlinable
        public init(copyStrategy: OpenZFSCopyStrategy? = nil, copyTagsToSnapshots: Bool? = nil, dataCompressionType: OpenZFSDataCompressionType? = nil, deleteClonedVolumes: Bool? = nil, deleteIntermediateData: Bool? = nil, deleteIntermediateSnaphots: Bool? = nil, destinationSnapshot: String? = nil, nfsExports: [OpenZFSNfsExport]? = nil, originSnapshot: OpenZFSOriginSnapshotConfiguration? = nil, parentVolumeId: String? = nil, readOnly: Bool? = nil, recordSizeKiB: Int? = nil, restoreToSnapshot: String? = nil, sourceSnapshotARN: String? = nil, storageCapacityQuotaGiB: Int? = nil, storageCapacityReservationGiB: Int? = nil, userAndGroupQuotas: [OpenZFSUserOrGroupQuota]? = nil, volumePath: String? = nil) {
            self.copyStrategy = copyStrategy
            self.copyTagsToSnapshots = copyTagsToSnapshots
            self.dataCompressionType = dataCompressionType
            self.deleteClonedVolumes = deleteClonedVolumes
            self.deleteIntermediateData = deleteIntermediateData
            self.deleteIntermediateSnaphots = deleteIntermediateSnaphots
            self.destinationSnapshot = destinationSnapshot
            self.nfsExports = nfsExports
            self.originSnapshot = originSnapshot
            self.parentVolumeId = parentVolumeId
            self.readOnly = readOnly
            self.recordSizeKiB = recordSizeKiB
            self.restoreToSnapshot = restoreToSnapshot
            self.sourceSnapshotARN = sourceSnapshotARN
            self.storageCapacityQuotaGiB = storageCapacityQuotaGiB
            self.storageCapacityReservationGiB = storageCapacityReservationGiB
            self.userAndGroupQuotas = userAndGroupQuotas
            self.volumePath = volumePath
        }

        private enum CodingKeys: String, CodingKey {
            case copyStrategy = "CopyStrategy"
            case copyTagsToSnapshots = "CopyTagsToSnapshots"
            case dataCompressionType = "DataCompressionType"
            case deleteClonedVolumes = "DeleteClonedVolumes"
            case deleteIntermediateData = "DeleteIntermediateData"
            case deleteIntermediateSnaphots = "DeleteIntermediateSnaphots"
            case destinationSnapshot = "DestinationSnapshot"
            case nfsExports = "NfsExports"
            case originSnapshot = "OriginSnapshot"
            case parentVolumeId = "ParentVolumeId"
            case readOnly = "ReadOnly"
            case recordSizeKiB = "RecordSizeKiB"
            case restoreToSnapshot = "RestoreToSnapshot"
            case sourceSnapshotARN = "SourceSnapshotARN"
            case storageCapacityQuotaGiB = "StorageCapacityQuotaGiB"
            case storageCapacityReservationGiB = "StorageCapacityReservationGiB"
            case userAndGroupQuotas = "UserAndGroupQuotas"
            case volumePath = "VolumePath"
        }
    }

    public struct ReleaseConfiguration: AWSEncodableShape & AWSDecodableShape {
        /// Defines the point-in-time since an exported file was last accessed, in order for that file to be eligible for release. Only files that were last accessed before this point-in-time are eligible to be released from the file system.
        public let durationSinceLastAccess: DurationSinceLastAccess?

        @inlinable
        public init(durationSinceLastAccess: DurationSinceLastAccess? = nil) {
            self.durationSinceLastAccess = durationSinceLastAccess
        }

        public func validate(name: String) throws {
            try self.durationSinceLastAccess?.validate(name: "\(name).durationSinceLastAccess")
        }

        private enum CodingKeys: String, CodingKey {
            case durationSinceLastAccess = "DurationSinceLastAccess"
        }
    }

    public struct ReleaseFileSystemNfsV3LocksRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        public let fileSystemId: String?

        @inlinable
        public init(clientRequestToken: String? = ReleaseFileSystemNfsV3LocksRequest.idempotencyToken(), fileSystemId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
        }
    }

    public struct ReleaseFileSystemNfsV3LocksResponse: AWSDecodableShape {
        public let fileSystem: FileSystem?

        @inlinable
        public init(fileSystem: FileSystem? = nil) {
            self.fileSystem = fileSystem
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystem = "FileSystem"
        }
    }

    public struct ResourceDoesNotSupportTagging: AWSErrorShape {
        public let message: String?
        /// The Amazon Resource Name (ARN) of the resource that doesn't support tagging.
        public let resourceARN: String?

        @inlinable
        public init(message: String? = nil, resourceARN: String? = nil) {
            self.message = message
            self.resourceARN = resourceARN
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
            case resourceARN = "ResourceARN"
        }
    }

    public struct ResourceNotFound: AWSErrorShape {
        public let message: String?
        /// The resource ARN of the resource that can't be found.
        public let resourceARN: String?

        @inlinable
        public init(message: String? = nil, resourceARN: String? = nil) {
            self.message = message
            self.resourceARN = resourceARN
        }

        private enum CodingKeys: String, CodingKey {
            case message = "Message"
            case resourceARN = "ResourceARN"
        }
    }

    public struct RestoreVolumeFromSnapshotRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The settings used when restoring the specified volume from snapshot.    DELETE_INTERMEDIATE_SNAPSHOTS - Deletes snapshots between the current state and the specified snapshot. If there are intermediate snapshots and this option isn't used, RestoreVolumeFromSnapshot fails.    DELETE_CLONED_VOLUMES - Deletes any dependent clone volumes  created from intermediate snapshots. If there are any dependent clone volumes and this  option isn't used, RestoreVolumeFromSnapshot fails.
        public let options: [RestoreOpenZFSVolumeOption]?
        /// The ID of the source snapshot. Specifies the snapshot that you are restoring from.
        public let snapshotId: String?
        /// The ID of the volume that you are restoring.
        public let volumeId: String?

        @inlinable
        public init(clientRequestToken: String? = RestoreVolumeFromSnapshotRequest.idempotencyToken(), options: [RestoreOpenZFSVolumeOption]? = nil, snapshotId: String? = nil, volumeId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.options = options
            self.snapshotId = snapshotId
            self.volumeId = volumeId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.options, name: "options", parent: name, max: 2)
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, max: 28)
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, min: 11)
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, pattern: "^((fs)?volsnap-[0-9a-f]{8,})$")
            try self.validate(self.volumeId, name: "volumeId", parent: name, max: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, min: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case options = "Options"
            case snapshotId = "SnapshotId"
            case volumeId = "VolumeId"
        }
    }

    public struct RestoreVolumeFromSnapshotResponse: AWSDecodableShape {
        /// A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system.
        public let administrativeActions: [AdministrativeAction]?
        /// The lifecycle state of the volume being restored.
        public let lifecycle: VolumeLifecycle?
        /// The ID of the volume that you restored.
        public let volumeId: String?

        @inlinable
        public init(administrativeActions: [AdministrativeAction]? = nil, lifecycle: VolumeLifecycle? = nil, volumeId: String? = nil) {
            self.administrativeActions = administrativeActions
            self.lifecycle = lifecycle
            self.volumeId = volumeId
        }

        private enum CodingKeys: String, CodingKey {
            case administrativeActions = "AdministrativeActions"
            case lifecycle = "Lifecycle"
            case volumeId = "VolumeId"
        }
    }

    public struct RetentionPeriod: AWSEncodableShape & AWSDecodableShape {
        /// Defines the type of time for the retention period of an FSx for ONTAP SnapLock volume.  Set it to  one of the valid types. If you set it to INFINITE, the files are retained forever. If you set it to  UNSPECIFIED, the files are retained until you set an explicit retention period.
        public let type: RetentionPeriodType?
        /// Defines the amount of time for the retention period of an FSx for ONTAP SnapLock volume.  You can't set a value for INFINITE or UNSPECIFIED. For all other options, the  following ranges are valid:     Seconds: 0 - 65,535    Minutes: 0 - 65,535    Hours: 0 - 24    Days: 0 - 365    Months: 0 - 12    Years: 0 - 100
        public let value: Int?

        @inlinable
        public init(type: RetentionPeriodType? = nil, value: Int? = nil) {
            self.type = type
            self.value = value
        }

        public func validate(name: String) throws {
            try self.validate(self.value, name: "value", parent: name, max: 65535)
            try self.validate(self.value, name: "value", parent: name, min: 0)
        }

        private enum CodingKeys: String, CodingKey {
            case type = "Type"
            case value = "Value"
        }
    }

    public struct S3AccessPoint: AWSDecodableShape {
        /// The S3 access point's alias.
        public let alias: String?
        /// he S3 access point's ARN.
        public let resourceARN: String?
        /// The S3 access point's virtual private cloud (VPC) configuration.
        public let vpcConfiguration: S3AccessPointVpcConfiguration?

        @inlinable
        public init(alias: String? = nil, resourceARN: String? = nil, vpcConfiguration: S3AccessPointVpcConfiguration? = nil) {
            self.alias = alias
            self.resourceARN = resourceARN
            self.vpcConfiguration = vpcConfiguration
        }

        private enum CodingKeys: String, CodingKey {
            case alias = "Alias"
            case resourceARN = "ResourceARN"
            case vpcConfiguration = "VpcConfiguration"
        }
    }

    public struct S3AccessPointAttachment: AWSDecodableShape {
        public let creationTime: Date?
        /// The lifecycle status of the S3 access point attachment. The lifecycle can have the following values:   AVAILABLE - the S3 access point attachment is available for use   CREATING - Amazon FSx is creating the S3 access point and attachment   DELETING - Amazon FSx is deleting the S3 access point and attachment   FAILED - The S3 access point attachment is in a failed state. Delete and detach the S3 access  point attachment, and create a new one.   UPDATING - Amazon FSx is updating the S3 access point attachment
        public let lifecycle: S3AccessPointAttachmentLifecycle?
        public let lifecycleTransitionReason: LifecycleTransitionReason?
        /// The name of the S3 access point attachment; also used for the name of the S3 access point.
        public let name: String?
        /// The OpenZFSConfiguration of the S3 access point attachment.
        public let openZFSConfiguration: S3AccessPointOpenZFSConfiguration?
        /// The S3 access point configuration of the S3 access point attachment.
        public let s3AccessPoint: S3AccessPoint?
        /// The type of Amazon FSx volume that the S3 access point is attached to.
        public let type: S3AccessPointAttachmentType?

        @inlinable
        public init(creationTime: Date? = nil, lifecycle: S3AccessPointAttachmentLifecycle? = nil, lifecycleTransitionReason: LifecycleTransitionReason? = nil, name: String? = nil, openZFSConfiguration: S3AccessPointOpenZFSConfiguration? = nil, s3AccessPoint: S3AccessPoint? = nil, type: S3AccessPointAttachmentType? = nil) {
            self.creationTime = creationTime
            self.lifecycle = lifecycle
            self.lifecycleTransitionReason = lifecycleTransitionReason
            self.name = name
            self.openZFSConfiguration = openZFSConfiguration
            self.s3AccessPoint = s3AccessPoint
            self.type = type
        }

        private enum CodingKeys: String, CodingKey {
            case creationTime = "CreationTime"
            case lifecycle = "Lifecycle"
            case lifecycleTransitionReason = "LifecycleTransitionReason"
            case name = "Name"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case s3AccessPoint = "S3AccessPoint"
            case type = "Type"
        }
    }

    public struct S3AccessPointAttachmentsFilter: AWSEncodableShape {
        /// The name of the filter.
        public let name: S3AccessPointAttachmentsFilterName?
        /// The values of the filter.
        public let values: [String]?

        @inlinable
        public init(name: S3AccessPointAttachmentsFilterName? = nil, values: [String]? = nil) {
            self.name = name
            self.values = values
        }

        public func validate(name: String) throws {
            try self.values?.forEach {
                try validate($0, name: "values[]", parent: name, max: 128)
                try validate($0, name: "values[]", parent: name, min: 1)
                try validate($0, name: "values[]", parent: name, pattern: "^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$")
            }
            try self.validate(self.values, name: "values", parent: name, max: 20)
        }

        private enum CodingKeys: String, CodingKey {
            case name = "Name"
            case values = "Values"
        }
    }

    public struct S3AccessPointOpenZFSConfiguration: AWSDecodableShape {
        /// The file system identity used to authorize file access requests made using the S3 access point.
        public let fileSystemIdentity: OpenZFSFileSystemIdentity?
        /// The ID of the FSx for OpenZFS volume that the S3 access point is attached to.
        public let volumeId: String?

        @inlinable
        public init(fileSystemIdentity: OpenZFSFileSystemIdentity? = nil, volumeId: String? = nil) {
            self.fileSystemIdentity = fileSystemIdentity
            self.volumeId = volumeId
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystemIdentity = "FileSystemIdentity"
            case volumeId = "VolumeId"
        }
    }

    public struct S3AccessPointVpcConfiguration: AWSEncodableShape & AWSDecodableShape {
        /// Specifies the virtual private cloud (VPC) for the S3 access point VPC configuration, if one exists.
        public let vpcId: String?

        @inlinable
        public init(vpcId: String? = nil) {
            self.vpcId = vpcId
        }

        public func validate(name: String) throws {
            try self.validate(self.vpcId, name: "vpcId", parent: name, max: 21)
            try self.validate(self.vpcId, name: "vpcId", parent: name, min: 12)
            try self.validate(self.vpcId, name: "vpcId", parent: name, pattern: "^(vpc-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case vpcId = "VpcId"
        }
    }

    public struct S3DataRepositoryConfiguration: AWSEncodableShape & AWSDecodableShape {
        /// Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.
        public let autoExportPolicy: AutoExportPolicy?
        /// Specifies the type of updated objects (new, changed, deleted) that will be automatically imported from the linked S3 bucket to your file system.
        public let autoImportPolicy: AutoImportPolicy?

        @inlinable
        public init(autoExportPolicy: AutoExportPolicy? = nil, autoImportPolicy: AutoImportPolicy? = nil) {
            self.autoExportPolicy = autoExportPolicy
            self.autoImportPolicy = autoImportPolicy
        }

        public func validate(name: String) throws {
            try self.autoExportPolicy?.validate(name: "\(name).autoExportPolicy")
            try self.autoImportPolicy?.validate(name: "\(name).autoImportPolicy")
        }

        private enum CodingKeys: String, CodingKey {
            case autoExportPolicy = "AutoExportPolicy"
            case autoImportPolicy = "AutoImportPolicy"
        }
    }

    public struct SelfManagedActiveDirectoryAttributes: AWSDecodableShape {
        /// A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.
        public let dnsIps: [String]?
        /// The fully qualified domain name of the self-managed AD directory.
        public let domainName: String?
        /// The name of the domain group whose members have administrative privileges for the FSx file system.
        public let fileSystemAdministratorsGroup: String?
        /// The fully qualified distinguished name of the organizational unit within the self-managed AD directory to which the Windows File Server  or ONTAP storage virtual machine (SVM) instance is joined.
        public let organizationalUnitDistinguishedName: String?
        /// The user name for the service account on your self-managed AD domain that FSx uses to join to your AD domain.
        public let userName: String?

        @inlinable
        public init(dnsIps: [String]? = nil, domainName: String? = nil, fileSystemAdministratorsGroup: String? = nil, organizationalUnitDistinguishedName: String? = nil, userName: String? = nil) {
            self.dnsIps = dnsIps
            self.domainName = domainName
            self.fileSystemAdministratorsGroup = fileSystemAdministratorsGroup
            self.organizationalUnitDistinguishedName = organizationalUnitDistinguishedName
            self.userName = userName
        }

        private enum CodingKeys: String, CodingKey {
            case dnsIps = "DnsIps"
            case domainName = "DomainName"
            case fileSystemAdministratorsGroup = "FileSystemAdministratorsGroup"
            case organizationalUnitDistinguishedName = "OrganizationalUnitDistinguishedName"
            case userName = "UserName"
        }
    }

    public struct SelfManagedActiveDirectoryConfiguration: AWSEncodableShape {
        /// A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.
        public let dnsIps: [String]?
        /// The fully qualified domain name of the self-managed AD directory, such as corp.example.com.
        public let domainName: String?
        /// (Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, setting audit controls (audit ACLs) on files and folders, and              administering the file system remotely by using the FSx Remote PowerShell. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.
        public let fileSystemAdministratorsGroup: String?
        /// (Optional) The fully qualified distinguished name of the organizational unit within your self-managed AD directory. Amazon FSx only accepts OU as the direct parent of the file system. An example is OU=FSx,DC=yourdomain,DC=corp,DC=com. To learn more, see RFC 2253. If none is provided, the FSx file system is created in the default location of your self-managed AD directory.   Only Organizational Unit (OU) objects can be the direct parent of the file system that you're creating.
        public let organizationalUnitDistinguishedName: String?
        /// The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.
        public let password: String?
        /// The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName, or in the default location of your AD domain.
        public let userName: String?

        @inlinable
        public init(dnsIps: [String]? = nil, domainName: String? = nil, fileSystemAdministratorsGroup: String? = nil, organizationalUnitDistinguishedName: String? = nil, password: String? = nil, userName: String? = nil) {
            self.dnsIps = dnsIps
            self.domainName = domainName
            self.fileSystemAdministratorsGroup = fileSystemAdministratorsGroup
            self.organizationalUnitDistinguishedName = organizationalUnitDistinguishedName
            self.password = password
            self.userName = userName
        }

        public func validate(name: String) throws {
            try self.dnsIps?.forEach {
                try validate($0, name: "dnsIps[]", parent: name, max: 45)
                try validate($0, name: "dnsIps[]", parent: name, min: 1)
                try validate($0, name: "dnsIps[]", parent: name, pattern: "^(^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))$|^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$)$")
            }
            try self.validate(self.dnsIps, name: "dnsIps", parent: name, max: 3)
            try self.validate(self.dnsIps, name: "dnsIps", parent: name, min: 1)
            try self.validate(self.domainName, name: "domainName", parent: name, max: 255)
            try self.validate(self.domainName, name: "domainName", parent: name, min: 1)
            try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$")
            try self.validate(self.fileSystemAdministratorsGroup, name: "fileSystemAdministratorsGroup", parent: name, max: 256)
            try self.validate(self.fileSystemAdministratorsGroup, name: "fileSystemAdministratorsGroup", parent: name, min: 1)
            try self.validate(self.fileSystemAdministratorsGroup, name: "fileSystemAdministratorsGroup", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$")
            try self.validate(self.organizationalUnitDistinguishedName, name: "organizationalUnitDistinguishedName", parent: name, max: 2000)
            try self.validate(self.organizationalUnitDistinguishedName, name: "organizationalUnitDistinguishedName", parent: name, min: 1)
            try self.validate(self.organizationalUnitDistinguishedName, name: "organizationalUnitDistinguishedName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,2000}$")
            try self.validate(self.password, name: "password", parent: name, max: 256)
            try self.validate(self.password, name: "password", parent: name, min: 1)
            try self.validate(self.password, name: "password", parent: name, pattern: "^.{1,256}$")
            try self.validate(self.userName, name: "userName", parent: name, max: 256)
            try self.validate(self.userName, name: "userName", parent: name, min: 1)
            try self.validate(self.userName, name: "userName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$")
        }

        private enum CodingKeys: String, CodingKey {
            case dnsIps = "DnsIps"
            case domainName = "DomainName"
            case fileSystemAdministratorsGroup = "FileSystemAdministratorsGroup"
            case organizationalUnitDistinguishedName = "OrganizationalUnitDistinguishedName"
            case password = "Password"
            case userName = "UserName"
        }
    }

    public struct SelfManagedActiveDirectoryConfigurationUpdates: AWSEncodableShape {
        /// A list of up to three DNS server or domain controller IP addresses in your self-managed Active Directory domain.
        public let dnsIps: [String]?
        /// Specifies an updated fully qualified domain name of your self-managed Active Directory configuration.
        public let domainName: String?
        /// For FSx for ONTAP file systems only - Specifies the updated name of the self-managed Active Directory domain group whose members are granted administrative privileges for the Amazon FSx resource.
        public let fileSystemAdministratorsGroup: String?
        /// Specifies an updated fully qualified distinguished name of the organization unit within your self-managed Active Directory.
        public let organizationalUnitDistinguishedName: String?
        /// Specifies the updated password for the service account on your self-managed Active Directory domain.  Amazon FSx uses this account to join to your self-managed Active Directory domain.
        public let password: String?
        /// Specifies the updated user name for the service account on your self-managed Active Directory domain. Amazon FSx uses this account to join to your self-managed Active Directory domain. This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.
        public let userName: String?

        @inlinable
        public init(dnsIps: [String]? = nil, domainName: String? = nil, fileSystemAdministratorsGroup: String? = nil, organizationalUnitDistinguishedName: String? = nil, password: String? = nil, userName: String? = nil) {
            self.dnsIps = dnsIps
            self.domainName = domainName
            self.fileSystemAdministratorsGroup = fileSystemAdministratorsGroup
            self.organizationalUnitDistinguishedName = organizationalUnitDistinguishedName
            self.password = password
            self.userName = userName
        }

        public func validate(name: String) throws {
            try self.dnsIps?.forEach {
                try validate($0, name: "dnsIps[]", parent: name, max: 45)
                try validate($0, name: "dnsIps[]", parent: name, min: 1)
                try validate($0, name: "dnsIps[]", parent: name, pattern: "^(^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))$|^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$)$")
            }
            try self.validate(self.dnsIps, name: "dnsIps", parent: name, max: 3)
            try self.validate(self.dnsIps, name: "dnsIps", parent: name, min: 1)
            try self.validate(self.domainName, name: "domainName", parent: name, max: 255)
            try self.validate(self.domainName, name: "domainName", parent: name, min: 1)
            try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$")
            try self.validate(self.fileSystemAdministratorsGroup, name: "fileSystemAdministratorsGroup", parent: name, max: 256)
            try self.validate(self.fileSystemAdministratorsGroup, name: "fileSystemAdministratorsGroup", parent: name, min: 1)
            try self.validate(self.fileSystemAdministratorsGroup, name: "fileSystemAdministratorsGroup", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$")
            try self.validate(self.organizationalUnitDistinguishedName, name: "organizationalUnitDistinguishedName", parent: name, max: 2000)
            try self.validate(self.organizationalUnitDistinguishedName, name: "organizationalUnitDistinguishedName", parent: name, min: 1)
            try self.validate(self.organizationalUnitDistinguishedName, name: "organizationalUnitDistinguishedName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,2000}$")
            try self.validate(self.password, name: "password", parent: name, max: 256)
            try self.validate(self.password, name: "password", parent: name, min: 1)
            try self.validate(self.password, name: "password", parent: name, pattern: "^.{1,256}$")
            try self.validate(self.userName, name: "userName", parent: name, max: 256)
            try self.validate(self.userName, name: "userName", parent: name, min: 1)
            try self.validate(self.userName, name: "userName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$")
        }

        private enum CodingKeys: String, CodingKey {
            case dnsIps = "DnsIps"
            case domainName = "DomainName"
            case fileSystemAdministratorsGroup = "FileSystemAdministratorsGroup"
            case organizationalUnitDistinguishedName = "OrganizationalUnitDistinguishedName"
            case password = "Password"
            case userName = "UserName"
        }
    }

    public struct ServiceLimitExceeded: AWSErrorShape {
        /// Enumeration of the service limit that was exceeded.
        public let limit: ServiceLimit?
        public let message: String?

        @inlinable
        public init(limit: ServiceLimit? = nil, message: String? = nil) {
            self.limit = limit
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case limit = "Limit"
            case message = "Message"
        }
    }

    public struct SnaplockConfiguration: AWSDecodableShape {
        /// Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default  value is false. If you set AuditLogVolume to true, the SnapLock volume is  created as an audit log volume. The minimum retention period for an audit log volume is six months.  For more information, see   SnapLock audit log volumes.
        public let auditLogVolume: Bool?
        /// The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume.
        public let autocommitPeriod: AutocommitPeriod?
        /// Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock  Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read  many (WORM) files even  if they have active retention periods. PERMANENTLY_DISABLED is a terminal state.  If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default  value is DISABLED.  For more information, see Privileged delete.
        public let privilegedDelete: PrivilegedDelete?
        /// Specifies the retention period of an FSx for ONTAP SnapLock volume.
        public let retentionPeriod: SnaplockRetentionPeriod?
        /// Specifies the retention mode of an FSx for ONTAP SnapLock volume.  After it is set, it can't be changed.  You can choose one of the following retention modes:     COMPLIANCE: Files transitioned to write once, read many (WORM) on a Compliance volume can't be deleted  until their retention periods expire. This retention mode is used to address government or industry-specific mandates or to protect  against ransomware attacks. For more information,  see SnapLock Compliance.     ENTERPRISE: Files transitioned to WORM on an Enterprise volume can be deleted by authorized users  before their retention periods expire using privileged delete. This retention mode is used to advance an organization's data integrity  and internal compliance or to test retention settings before using SnapLock Compliance. For more information, see  SnapLock Enterprise.
        public let snaplockType: SnaplockType?
        /// Enables or disables volume-append mode  on an FSx for ONTAP SnapLock volume. Volume-append mode allows you to  create WORM-appendable files and write data to them incrementally.  The default value is false.  For more information, see Volume-append mode.
        public let volumeAppendModeEnabled: Bool?

        @inlinable
        public init(auditLogVolume: Bool? = nil, autocommitPeriod: AutocommitPeriod? = nil, privilegedDelete: PrivilegedDelete? = nil, retentionPeriod: SnaplockRetentionPeriod? = nil, snaplockType: SnaplockType? = nil, volumeAppendModeEnabled: Bool? = nil) {
            self.auditLogVolume = auditLogVolume
            self.autocommitPeriod = autocommitPeriod
            self.privilegedDelete = privilegedDelete
            self.retentionPeriod = retentionPeriod
            self.snaplockType = snaplockType
            self.volumeAppendModeEnabled = volumeAppendModeEnabled
        }

        private enum CodingKeys: String, CodingKey {
            case auditLogVolume = "AuditLogVolume"
            case autocommitPeriod = "AutocommitPeriod"
            case privilegedDelete = "PrivilegedDelete"
            case retentionPeriod = "RetentionPeriod"
            case snaplockType = "SnaplockType"
            case volumeAppendModeEnabled = "VolumeAppendModeEnabled"
        }
    }

    public struct SnaplockRetentionPeriod: AWSEncodableShape & AWSDecodableShape {
        /// The retention period assigned to a write once, read many (WORM) file by default if an explicit retention period is not set for an  FSx for ONTAP SnapLock volume. The default retention period must be greater than or equal to  the minimum retention period and less than or equal to the maximum retention period.
        public let defaultRetention: RetentionPeriod?
        /// The longest retention period that can be assigned to a WORM file on  an FSx for ONTAP SnapLock volume.
        public let maximumRetention: RetentionPeriod?
        /// The shortest retention period that can be assigned to a WORM file on an FSx for ONTAP SnapLock volume.
        public let minimumRetention: RetentionPeriod?

        @inlinable
        public init(defaultRetention: RetentionPeriod? = nil, maximumRetention: RetentionPeriod? = nil, minimumRetention: RetentionPeriod? = nil) {
            self.defaultRetention = defaultRetention
            self.maximumRetention = maximumRetention
            self.minimumRetention = minimumRetention
        }

        public func validate(name: String) throws {
            try self.defaultRetention?.validate(name: "\(name).defaultRetention")
            try self.maximumRetention?.validate(name: "\(name).maximumRetention")
            try self.minimumRetention?.validate(name: "\(name).minimumRetention")
        }

        private enum CodingKeys: String, CodingKey {
            case defaultRetention = "DefaultRetention"
            case maximumRetention = "MaximumRetention"
            case minimumRetention = "MinimumRetention"
        }
    }

    public struct Snapshot: AWSDecodableShape {
        /// A list of administrative actions for the file system that are in process or waiting to be processed. Administrative actions describe changes to the Amazon FSx system.
        public let administrativeActions: [AdministrativeAction]?
        public let creationTime: Date?
        /// The lifecycle status of the snapshot.    PENDING - Amazon FSx hasn't started creating the snapshot.    CREATING - Amazon FSx is creating the snapshot.    DELETING - Amazon FSx is deleting the snapshot.    AVAILABLE - The snapshot is fully available.
        public let lifecycle: SnapshotLifecycle?
        public let lifecycleTransitionReason: LifecycleTransitionReason?
        /// The name of the snapshot.
        public let name: String?
        public let resourceARN: String?
        /// The ID of the snapshot.
        public let snapshotId: String?
        public let tags: [Tag]?
        /// The ID of the volume that the snapshot is of.
        public let volumeId: String?

        @inlinable
        public init(administrativeActions: [AdministrativeAction]? = nil, creationTime: Date? = nil, lifecycle: SnapshotLifecycle? = nil, lifecycleTransitionReason: LifecycleTransitionReason? = nil, name: String? = nil, resourceARN: String? = nil, snapshotId: String? = nil, tags: [Tag]? = nil, volumeId: String? = nil) {
            self.administrativeActions = administrativeActions
            self.creationTime = creationTime
            self.lifecycle = lifecycle
            self.lifecycleTransitionReason = lifecycleTransitionReason
            self.name = name
            self.resourceARN = resourceARN
            self.snapshotId = snapshotId
            self.tags = tags
            self.volumeId = volumeId
        }

        private enum CodingKeys: String, CodingKey {
            case administrativeActions = "AdministrativeActions"
            case creationTime = "CreationTime"
            case lifecycle = "Lifecycle"
            case lifecycleTransitionReason = "LifecycleTransitionReason"
            case name = "Name"
            case resourceARN = "ResourceARN"
            case snapshotId = "SnapshotId"
            case tags = "Tags"
            case volumeId = "VolumeId"
        }
    }

    public struct SnapshotFilter: AWSEncodableShape {
        /// The name of the filter to use. You can filter by the file-system-id or by volume-id.
        public let name: SnapshotFilterName?
        /// The file-system-id or volume-id that you are filtering for.
        public let values: [String]?

        @inlinable
        public init(name: SnapshotFilterName? = nil, values: [String]? = nil) {
            self.name = name
            self.values = values
        }

        public func validate(name: String) throws {
            try self.values?.forEach {
                try validate($0, name: "values[]", parent: name, max: 128)
                try validate($0, name: "values[]", parent: name, min: 1)
                try validate($0, name: "values[]", parent: name, pattern: "^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$")
            }
            try self.validate(self.values, name: "values", parent: name, max: 20)
        }

        private enum CodingKeys: String, CodingKey {
            case name = "Name"
            case values = "Values"
        }
    }

    public struct SourceBackupUnavailable: AWSErrorShape {
        public let backupId: String?
        public let message: String?

        @inlinable
        public init(backupId: String? = nil, message: String? = nil) {
            self.backupId = backupId
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case backupId = "BackupId"
            case message = "Message"
        }
    }

    public struct StartMisconfiguredStateRecoveryRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        public let fileSystemId: String?

        @inlinable
        public init(clientRequestToken: String? = StartMisconfiguredStateRecoveryRequest.idempotencyToken(), fileSystemId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
        }
    }

    public struct StartMisconfiguredStateRecoveryResponse: AWSDecodableShape {
        public let fileSystem: FileSystem?

        @inlinable
        public init(fileSystem: FileSystem? = nil) {
            self.fileSystem = fileSystem
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystem = "FileSystem"
        }
    }

    public struct StorageVirtualMachine: AWSDecodableShape {
        /// Describes the Microsoft Active Directory configuration to which the SVM is joined, if applicable.
        public let activeDirectoryConfiguration: SvmActiveDirectoryConfiguration?
        public let creationTime: Date?
        /// The endpoints  that are used to access data or to manage the SVM using the NetApp ONTAP CLI, REST API, or NetApp CloudManager. They are the Iscsi, Management, Nfs, and Smb endpoints.
        public let endpoints: SvmEndpoints?
        public let fileSystemId: String?
        /// Describes the SVM's lifecycle status.    CREATED - The SVM is fully available for use.    CREATING - Amazon FSx is creating the new SVM.    DELETING - Amazon FSx is deleting an existing SVM.    FAILED - Amazon FSx was unable to create the SVM.    MISCONFIGURED - The SVM is in a failed but recoverable state.    PENDING - Amazon FSx has not started creating the SVM.
        public let lifecycle: StorageVirtualMachineLifecycle?
        /// Describes why the SVM lifecycle state changed.
        public let lifecycleTransitionReason: LifecycleTransitionReason?
        /// The name of the SVM, if provisioned.
        public let name: String?
        public let resourceARN: String?
        /// The security style of the root volume of the SVM.
        public let rootVolumeSecurityStyle: StorageVirtualMachineRootVolumeSecurityStyle?
        /// The SVM's system generated unique ID.
        public let storageVirtualMachineId: String?
        /// Describes the SVM's subtype.
        public let subtype: StorageVirtualMachineSubtype?
        public let tags: [Tag]?
        /// The SVM's UUID (universally unique identifier).
        public let uuid: String?

        @inlinable
        public init(activeDirectoryConfiguration: SvmActiveDirectoryConfiguration? = nil, creationTime: Date? = nil, endpoints: SvmEndpoints? = nil, fileSystemId: String? = nil, lifecycle: StorageVirtualMachineLifecycle? = nil, lifecycleTransitionReason: LifecycleTransitionReason? = nil, name: String? = nil, resourceARN: String? = nil, rootVolumeSecurityStyle: StorageVirtualMachineRootVolumeSecurityStyle? = nil, storageVirtualMachineId: String? = nil, subtype: StorageVirtualMachineSubtype? = nil, tags: [Tag]? = nil, uuid: String? = nil) {
            self.activeDirectoryConfiguration = activeDirectoryConfiguration
            self.creationTime = creationTime
            self.endpoints = endpoints
            self.fileSystemId = fileSystemId
            self.lifecycle = lifecycle
            self.lifecycleTransitionReason = lifecycleTransitionReason
            self.name = name
            self.resourceARN = resourceARN
            self.rootVolumeSecurityStyle = rootVolumeSecurityStyle
            self.storageVirtualMachineId = storageVirtualMachineId
            self.subtype = subtype
            self.tags = tags
            self.uuid = uuid
        }

        private enum CodingKeys: String, CodingKey {
            case activeDirectoryConfiguration = "ActiveDirectoryConfiguration"
            case creationTime = "CreationTime"
            case endpoints = "Endpoints"
            case fileSystemId = "FileSystemId"
            case lifecycle = "Lifecycle"
            case lifecycleTransitionReason = "LifecycleTransitionReason"
            case name = "Name"
            case resourceARN = "ResourceARN"
            case rootVolumeSecurityStyle = "RootVolumeSecurityStyle"
            case storageVirtualMachineId = "StorageVirtualMachineId"
            case subtype = "Subtype"
            case tags = "Tags"
            case uuid = "UUID"
        }
    }

    public struct StorageVirtualMachineFilter: AWSEncodableShape {
        /// The name for this filter.
        public let name: StorageVirtualMachineFilterName?
        /// The values of the filter. These are all the values for any of the applied filters.
        public let values: [String]?

        @inlinable
        public init(name: StorageVirtualMachineFilterName? = nil, values: [String]? = nil) {
            self.name = name
            self.values = values
        }

        public func validate(name: String) throws {
            try self.values?.forEach {
                try validate($0, name: "values[]", parent: name, max: 128)
                try validate($0, name: "values[]", parent: name, min: 1)
                try validate($0, name: "values[]", parent: name, pattern: "^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$")
            }
            try self.validate(self.values, name: "values", parent: name, max: 20)
        }

        private enum CodingKeys: String, CodingKey {
            case name = "Name"
            case values = "Values"
        }
    }

    public struct SvmActiveDirectoryConfiguration: AWSDecodableShape {
        /// The NetBIOS name of the AD computer object to which the SVM is joined.
        public let netBiosName: String?
        public let selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryAttributes?

        @inlinable
        public init(netBiosName: String? = nil, selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryAttributes? = nil) {
            self.netBiosName = netBiosName
            self.selfManagedActiveDirectoryConfiguration = selfManagedActiveDirectoryConfiguration
        }

        private enum CodingKeys: String, CodingKey {
            case netBiosName = "NetBiosName"
            case selfManagedActiveDirectoryConfiguration = "SelfManagedActiveDirectoryConfiguration"
        }
    }

    public struct SvmEndpoint: AWSDecodableShape {
        public let dnsName: String?
        /// The SVM endpoint's IPv4 addresses.
        public let ipAddresses: [String]?
        /// The SVM endpoint's IPv6 addresses.
        public let ipv6Addresses: [String]?

        @inlinable
        public init(dnsName: String? = nil, ipAddresses: [String]? = nil, ipv6Addresses: [String]? = nil) {
            self.dnsName = dnsName
            self.ipAddresses = ipAddresses
            self.ipv6Addresses = ipv6Addresses
        }

        private enum CodingKeys: String, CodingKey {
            case dnsName = "DNSName"
            case ipAddresses = "IpAddresses"
            case ipv6Addresses = "Ipv6Addresses"
        }
    }

    public struct SvmEndpoints: AWSDecodableShape {
        /// An endpoint for connecting using the Internet Small Computer Systems Interface (iSCSI)  protocol.
        public let iscsi: SvmEndpoint?
        /// An endpoint for managing SVMs using the NetApp ONTAP CLI, NetApp ONTAP API, or NetApp CloudManager.
        public let management: SvmEndpoint?
        /// An endpoint for connecting using the Network File System (NFS) protocol.
        public let nfs: SvmEndpoint?
        /// An endpoint for connecting using the Server Message Block (SMB) protocol.
        public let smb: SvmEndpoint?

        @inlinable
        public init(iscsi: SvmEndpoint? = nil, management: SvmEndpoint? = nil, nfs: SvmEndpoint? = nil, smb: SvmEndpoint? = nil) {
            self.iscsi = iscsi
            self.management = management
            self.nfs = nfs
            self.smb = smb
        }

        private enum CodingKeys: String, CodingKey {
            case iscsi = "Iscsi"
            case management = "Management"
            case nfs = "Nfs"
            case smb = "Smb"
        }
    }

    public struct Tag: AWSEncodableShape & AWSDecodableShape {
        /// A value that specifies the TagKey, the name of the tag. Tag keys must be unique for the resource to which they are attached.
        public let key: String?
        /// A value that specifies the TagValue, the value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key-value pair in a tag set of finances : April and also of payroll : April.
        public let value: String?

        @inlinable
        public init(key: String? = nil, value: String? = nil) {
            self.key = key
            self.value = value
        }

        public func validate(name: String) throws {
            try self.validate(self.key, name: "key", parent: name, max: 128)
            try self.validate(self.key, name: "key", parent: name, min: 1)
            try self.validate(self.key, name: "key", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$")
            try self.validate(self.value, name: "value", parent: name, max: 256)
            try self.validate(self.value, name: "value", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$")
        }

        private enum CodingKeys: String, CodingKey {
            case key = "Key"
            case value = "Value"
        }
    }

    public struct TagResourceRequest: AWSEncodableShape {
        /// The Amazon Resource Name (ARN) of the Amazon FSx resource that you want to tag.
        public let resourceARN: String?
        /// A list of tags for the resource. If a tag with a given key already exists, the value is replaced by the one specified in this parameter.
        public let tags: [Tag]?

        @inlinable
        public init(resourceARN: String? = nil, tags: [Tag]? = nil) {
            self.resourceARN = resourceARN
            self.tags = tags
        }

        public func validate(name: String) throws {
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 512)
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 8)
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:(?=[^:]+:fsx:[^:]+:\\d{12}:)((|(?=[a-z0-9-.]{1,63})(?!\\d{1,3}(\\.\\d{1,3}){3})(?![^:]*-{2})(?![^:]*-\\.)(?![^:]*\\.-)[a-z0-9].*(?<!-)):){4}(?!/).{0,1024}$")
            try self.tags?.forEach {
                try $0.validate(name: "\(name).tags[]")
            }
            try self.validate(self.tags, name: "tags", parent: name, max: 50)
            try self.validate(self.tags, name: "tags", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case resourceARN = "ResourceARN"
            case tags = "Tags"
        }
    }

    public struct TagResourceResponse: AWSDecodableShape {
        public init() {}
    }

    public struct TieringPolicy: AWSEncodableShape & AWSDecodableShape {
        /// Specifies the number of days that user data in a volume must remain inactive before it is considered "cold"  and moved to the capacity pool. Used with the AUTO and SNAPSHOT_ONLY tiering policies.  Enter a whole number between 2 and 183. Default values are 31 days for AUTO and 2 days for  SNAPSHOT_ONLY.
        public let coolingPeriod: Int?
        /// Specifies the tiering policy used to transition data. Default value is SNAPSHOT_ONLY.    SNAPSHOT_ONLY - moves cold snapshots to the capacity pool storage tier.    AUTO - moves cold user data and snapshots to the capacity pool storage tier based on your access patterns.    ALL - moves all user data blocks in both the active file system and Snapshot copies to the storage pool tier.    NONE - keeps a volume's data in the primary storage tier, preventing it from being moved to the capacity pool tier.
        public let name: TieringPolicyName?

        @inlinable
        public init(coolingPeriod: Int? = nil, name: TieringPolicyName? = nil) {
            self.coolingPeriod = coolingPeriod
            self.name = name
        }

        public func validate(name: String) throws {
            try self.validate(self.coolingPeriod, name: "coolingPeriod", parent: name, max: 183)
            try self.validate(self.coolingPeriod, name: "coolingPeriod", parent: name, min: 2)
        }

        private enum CodingKeys: String, CodingKey {
            case coolingPeriod = "CoolingPeriod"
            case name = "Name"
        }
    }

    public struct TooManyAccessPoints: AWSErrorShape {
        /// An error code indicating that you have reached the maximum number of S3 access points attachments allowed for your account in this Amazon Web Services Region, or for the file system.
        public let errorCode: String?
        public let message: String?

        @inlinable
        public init(errorCode: String? = nil, message: String? = nil) {
            self.errorCode = errorCode
            self.message = message
        }

        private enum CodingKeys: String, CodingKey {
            case errorCode = "ErrorCode"
            case message = "Message"
        }
    }

    public struct UntagResourceRequest: AWSEncodableShape {
        /// The ARN of the Amazon FSx resource to untag.
        public let resourceARN: String?
        /// A list of keys of tags on the resource to untag. In case the tag key doesn't exist, the call will still succeed to be idempotent.
        public let tagKeys: [String]?

        @inlinable
        public init(resourceARN: String? = nil, tagKeys: [String]? = nil) {
            self.resourceARN = resourceARN
            self.tagKeys = tagKeys
        }

        public func validate(name: String) throws {
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 512)
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 8)
            try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:(?=[^:]+:fsx:[^:]+:\\d{12}:)((|(?=[a-z0-9-.]{1,63})(?!\\d{1,3}(\\.\\d{1,3}){3})(?![^:]*-{2})(?![^:]*-\\.)(?![^:]*\\.-)[a-z0-9].*(?<!-)):){4}(?!/).{0,1024}$")
            try self.tagKeys?.forEach {
                try validate($0, name: "tagKeys[]", parent: name, max: 128)
                try validate($0, name: "tagKeys[]", parent: name, min: 1)
                try validate($0, name: "tagKeys[]", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$")
            }
            try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50)
            try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1)
        }

        private enum CodingKeys: String, CodingKey {
            case resourceARN = "ResourceARN"
            case tagKeys = "TagKeys"
        }
    }

    public struct UntagResourceResponse: AWSDecodableShape {
        public init() {}
    }

    public struct UpdateDataRepositoryAssociationRequest: AWSEncodableShape {
        /// The ID of the data repository association that you are updating.
        public let associationId: String?
        public let clientRequestToken: String?
        /// For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.
        public let importedFileChunkSize: Int?
        /// The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
        public let s3: S3DataRepositoryConfiguration?

        @inlinable
        public init(associationId: String? = nil, clientRequestToken: String? = UpdateDataRepositoryAssociationRequest.idempotencyToken(), importedFileChunkSize: Int? = nil, s3: S3DataRepositoryConfiguration? = nil) {
            self.associationId = associationId
            self.clientRequestToken = clientRequestToken
            self.importedFileChunkSize = importedFileChunkSize
            self.s3 = s3
        }

        public func validate(name: String) throws {
            try self.validate(self.associationId, name: "associationId", parent: name, max: 23)
            try self.validate(self.associationId, name: "associationId", parent: name, min: 13)
            try self.validate(self.associationId, name: "associationId", parent: name, pattern: "^(dra-[0-9a-f]{8,})$")
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.importedFileChunkSize, name: "importedFileChunkSize", parent: name, max: 512000)
            try self.validate(self.importedFileChunkSize, name: "importedFileChunkSize", parent: name, min: 1)
            try self.s3?.validate(name: "\(name).s3")
        }

        private enum CodingKeys: String, CodingKey {
            case associationId = "AssociationId"
            case clientRequestToken = "ClientRequestToken"
            case importedFileChunkSize = "ImportedFileChunkSize"
            case s3 = "S3"
        }
    }

    public struct UpdateDataRepositoryAssociationResponse: AWSDecodableShape {
        /// The response object returned after the data repository association is updated.
        public let association: DataRepositoryAssociation?

        @inlinable
        public init(association: DataRepositoryAssociation? = nil) {
            self.association = association
        }

        private enum CodingKeys: String, CodingKey {
            case association = "Association"
        }
    }

    public struct UpdateFileCacheLustreConfiguration: AWSEncodableShape {
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(weeklyMaintenanceStartTime: String? = nil) {
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct UpdateFileCacheRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The ID of the cache that you are updating.
        public let fileCacheId: String?
        /// The configuration updates for an Amazon File Cache resource.
        public let lustreConfiguration: UpdateFileCacheLustreConfiguration?

        @inlinable
        public init(clientRequestToken: String? = UpdateFileCacheRequest.idempotencyToken(), fileCacheId: String? = nil, lustreConfiguration: UpdateFileCacheLustreConfiguration? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileCacheId = fileCacheId
            self.lustreConfiguration = lustreConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileCacheId, name: "fileCacheId", parent: name, max: 21)
            try self.validate(self.fileCacheId, name: "fileCacheId", parent: name, min: 11)
            try self.validate(self.fileCacheId, name: "fileCacheId", parent: name, pattern: "^(fc-[0-9a-f]{8,})$")
            try self.lustreConfiguration?.validate(name: "\(name).lustreConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileCacheId = "FileCacheId"
            case lustreConfiguration = "LustreConfiguration"
        }
    }

    public struct UpdateFileCacheResponse: AWSDecodableShape {
        /// A description of the cache that was updated.
        public let fileCache: FileCache?

        @inlinable
        public init(fileCache: FileCache? = nil) {
            self.fileCache = fileCache
        }

        private enum CodingKeys: String, CodingKey {
            case fileCache = "FileCache"
        }
    }

    public struct UpdateFileSystemLustreConfiguration: AWSEncodableShape {
        ///  (Optional) When you create your file system, your existing S3 objects appear as file and directory listings.  Use this property to choose how Amazon FSx keeps your file and directory listing up to date  as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:    NONE - (Default) AutoImport is off. Amazon FSx only updates  file and directory listings from the linked S3 bucket  when the file system is created. FSx does not update the file and directory  listing for any new or changed objects after choosing this option.    NEW - AutoImport is on. Amazon FSx automatically imports directory listings of any new objects added to the linked S3 bucket that  do not currently exist in the FSx file system.     NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports  file and directory listings of any new objects added to the S3 bucket and any  existing objects that are changed in the S3 bucket after you choose this option.    NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically imports file and directory listings of any new objects added to the S3 bucket, any  existing objects that are changed in the S3 bucket, and any objects that were deleted in the S3 bucket.   This parameter is not supported for file systems with a data repository association.
        public let autoImportPolicy: AutoImportPolicyType?
        /// The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.
        public let automaticBackupRetentionDays: Int?
        public let dailyAutomaticBackupStartTime: String?
        /// Sets the data compression configuration for the file system. DataCompressionType can have the following values:    NONE - Data compression is turned off for the file system.    LZ4 - Data compression is turned on with the LZ4 algorithm.   If you don't use DataCompressionType, the file system retains its current data compression configuration. For more information, see Lustre data compression.
        public let dataCompressionType: DataCompressionType?
        /// Specifies the optional provisioned SSD read cache on Amazon FSx for Lustre file systems that use the Intelligent-Tiering storage class.
        public let dataReadCacheConfiguration: LustreReadCacheConfiguration?
        /// The Lustre logging configuration used when updating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs.
        public let logConfiguration: LustreLogCreateConfiguration?
        /// The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. When this configuration is enabled, the file system supports increasing metadata performance.
        public let metadataConfiguration: UpdateFileSystemLustreMetadataConfiguration?
        /// The throughput of an Amazon FSx for Lustre Persistent SSD-based file system, measured in megabytes per second per tebibyte (MB/s/TiB). You can increase or decrease your file system's throughput. Valid values depend on the deployment type of the file system, as follows:   For PERSISTENT_1 SSD-based deployment types, valid values are 50, 100, and 200 MB/s/TiB.   For PERSISTENT_2 SSD-based deployment types, valid values are 125, 250, 500, and 1000 MB/s/TiB.   For more information, see   Managing throughput capacity.
        public let perUnitStorageThroughput: Int?
        /// The Lustre root squash configuration used when updating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user.
        public let rootSquashConfiguration: LustreRootSquashConfiguration?
        /// The throughput of an Amazon FSx for Lustre file system using an Intelligent-Tiering storage class, measured in megabytes per second (MBps). You can only increase your file system's throughput. Valid values are 4000 MBps or multiples of 4000 MBps.
        public let throughputCapacity: Int?
        /// (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(autoImportPolicy: AutoImportPolicyType? = nil, automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, dataReadCacheConfiguration: LustreReadCacheConfiguration? = nil, logConfiguration: LustreLogCreateConfiguration? = nil, metadataConfiguration: UpdateFileSystemLustreMetadataConfiguration? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.autoImportPolicy = autoImportPolicy
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.dataCompressionType = dataCompressionType
            self.dataReadCacheConfiguration = dataReadCacheConfiguration
            self.logConfiguration = logConfiguration
            self.metadataConfiguration = metadataConfiguration
            self.perUnitStorageThroughput = perUnitStorageThroughput
            self.rootSquashConfiguration = rootSquashConfiguration
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.dataReadCacheConfiguration?.validate(name: "\(name).dataReadCacheConfiguration")
            try self.logConfiguration?.validate(name: "\(name).logConfiguration")
            try self.metadataConfiguration?.validate(name: "\(name).metadataConfiguration")
            try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, max: 1000)
            try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, min: 12)
            try self.rootSquashConfiguration?.validate(name: "\(name).rootSquashConfiguration")
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 2000000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 4000)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case autoImportPolicy = "AutoImportPolicy"
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case dataCompressionType = "DataCompressionType"
            case dataReadCacheConfiguration = "DataReadCacheConfiguration"
            case logConfiguration = "LogConfiguration"
            case metadataConfiguration = "MetadataConfiguration"
            case perUnitStorageThroughput = "PerUnitStorageThroughput"
            case rootSquashConfiguration = "RootSquashConfiguration"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct UpdateFileSystemLustreMetadataConfiguration: AWSEncodableShape {
        /// (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for your file system.   For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.   For Intelligent-Tiering file systems, valid values are 6000 and 12000.   The value you provide must be greater than or equal to the current number of Metadata IOPS provisioned for the file system.
        public let iops: Int?
        /// The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.   To increase the Metadata IOPS or to switch an SSD file system from AUTOMATIC, specify USER_PROVISIONED as the value for this parameter. Then use the Iops parameter to provide a Metadata IOPS value that is greater than or equal to the current number of Metadata IOPS provisioned for the file system.   To switch from USER_PROVISIONED mode on an SSD file system, specify AUTOMATIC as the value for this parameter, but do not input a value for Iops.    If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the current Metadata IOPS value is greater than the automated default, FSx for Lustre rejects the request because downscaling Metadata IOPS is not supported.   AUTOMATIC mode is not supported on Intelligent-Tiering file systems. For Intelligent-Tiering file systems, use USER_PROVISIONED mode.
        public let mode: MetadataConfigurationMode?

        @inlinable
        public init(iops: Int? = nil, mode: MetadataConfigurationMode? = nil) {
            self.iops = iops
            self.mode = mode
        }

        public func validate(name: String) throws {
            try self.validate(self.iops, name: "iops", parent: name, max: 192000)
            try self.validate(self.iops, name: "iops", parent: name, min: 1500)
        }

        private enum CodingKeys: String, CodingKey {
            case iops = "Iops"
            case mode = "Mode"
        }
    }

    public struct UpdateFileSystemOntapConfiguration: AWSEncodableShape {
        /// (Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for NetApp ONTAP file system.
        public let addRouteTableIds: [String]?
        public let automaticBackupRetentionDays: Int?
        public let dailyAutomaticBackupStartTime: String?
        /// The SSD IOPS (input output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (AUTOMATIC or USER_PROVISIONED), and in the case of USER_PROVISIONED IOPS, the total number of SSD IOPS provisioned.  For more information, see  File system storage capacity and IOPS.
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// (Multi-AZ only) Specifies the IPv6 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /118 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpv6AddressRange: String?
        /// Update the password for the fsxadmin user by entering a new password.  You use the fsxadmin user to access the NetApp ONTAP CLI and REST API to manage your file system resources.  For more information, see  Managing resources using NetApp Application.
        public let fsxAdminPassword: String?
        /// Use to update the number of high-availability (HA) pairs for a second-generation single-AZ file system.  If you increase the number of HA pairs for your file system, you must specify proportional increases for StorageCapacity,  Iops, and ThroughputCapacity. For more information, see  High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support  (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see  Using block storage protocols.
        public let haPairs: Int?
        /// (Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for NetApp ONTAP file system. You can use the  API operation to retrieve the list of VPC route table IDs for a file system.
        public let removeRouteTableIds: [String]?
        /// Enter a new value to change the amount of throughput capacity for the file system in megabytes per second (MBps). For more information, see  Managing throughput capacity  in the FSx for ONTAP User Guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value.   The value of ThroughputCapacity when divided by the value of HAPairs is outside of the valid range for ThroughputCapacity.
        public let throughputCapacity: Int?
        /// Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.  This field and ThroughputCapacity cannot be defined in the same API call, but one is required. This field and ThroughputCapacity are the same for file systems with one HA pair.   For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.   For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps.   For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps.   Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:   The value of ThroughputCapacity and ThroughputCapacityPerHAPair are not the same value for file systems with one HA pair.   The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 and 12).   The value of ThroughputCapacityPerHAPair is not a valid value.
        public let throughputCapacityPerHAPair: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(addRouteTableIds: [String]? = nil, automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, endpointIpv6AddressRange: String? = nil, fsxAdminPassword: String? = nil, haPairs: Int? = nil, removeRouteTableIds: [String]? = nil, throughputCapacity: Int? = nil, throughputCapacityPerHAPair: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.addRouteTableIds = addRouteTableIds
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.diskIopsConfiguration = diskIopsConfiguration
            self.endpointIpv6AddressRange = endpointIpv6AddressRange
            self.fsxAdminPassword = fsxAdminPassword
            self.haPairs = haPairs
            self.removeRouteTableIds = removeRouteTableIds
            self.throughputCapacity = throughputCapacity
            self.throughputCapacityPerHAPair = throughputCapacityPerHAPair
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.addRouteTableIds?.forEach {
                try validate($0, name: "addRouteTableIds[]", parent: name, max: 21)
                try validate($0, name: "addRouteTableIds[]", parent: name, min: 12)
                try validate($0, name: "addRouteTableIds[]", parent: name, pattern: "^(rtb-[0-9a-f]{8,})$")
            }
            try self.validate(self.addRouteTableIds, name: "addRouteTableIds", parent: name, max: 50)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.diskIopsConfiguration?.validate(name: "\(name).diskIopsConfiguration")
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, max: 43)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, min: 4)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,43}$")
            try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, max: 50)
            try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, min: 8)
            try self.validate(self.fsxAdminPassword, name: "fsxAdminPassword", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{8,50}$")
            try self.validate(self.haPairs, name: "haPairs", parent: name, max: 12)
            try self.validate(self.haPairs, name: "haPairs", parent: name, min: 1)
            try self.removeRouteTableIds?.forEach {
                try validate($0, name: "removeRouteTableIds[]", parent: name, max: 21)
                try validate($0, name: "removeRouteTableIds[]", parent: name, min: 12)
                try validate($0, name: "removeRouteTableIds[]", parent: name, pattern: "^(rtb-[0-9a-f]{8,})$")
            }
            try self.validate(self.removeRouteTableIds, name: "removeRouteTableIds", parent: name, max: 50)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 100000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 8)
            try self.validate(self.throughputCapacityPerHAPair, name: "throughputCapacityPerHAPair", parent: name, max: 6144)
            try self.validate(self.throughputCapacityPerHAPair, name: "throughputCapacityPerHAPair", parent: name, min: 128)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case addRouteTableIds = "AddRouteTableIds"
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case endpointIpv6AddressRange = "EndpointIpv6AddressRange"
            case fsxAdminPassword = "FsxAdminPassword"
            case haPairs = "HAPairs"
            case removeRouteTableIds = "RemoveRouteTableIds"
            case throughputCapacity = "ThroughputCapacity"
            case throughputCapacityPerHAPair = "ThroughputCapacityPerHAPair"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct UpdateFileSystemOpenZFSConfiguration: AWSEncodableShape {
        /// (Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for OpenZFS file system.
        public let addRouteTableIds: [String]?
        public let automaticBackupRetentionDays: Int?
        /// A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
        public let copyTagsToBackups: Bool?
        /// A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.
        public let copyTagsToVolumes: Bool?
        public let dailyAutomaticBackupStartTime: String?
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// (Multi-AZ only) Specifies the IPv6 address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /118 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
        public let endpointIpv6AddressRange: String?
        ///  The configuration for the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
        public let readCacheConfiguration: OpenZFSReadCacheConfiguration?
        /// (Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route tables to disassociate (remove) from your Amazon FSx for OpenZFS file system. You can use the  API operation to retrieve the list of VPC route table IDs for a file system.
        public let removeRouteTableIds: [String]?
        /// The throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second  (MB/s). Valid values depend on the DeploymentType you choose, as follows:   For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s.   For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s.
        public let throughputCapacity: Int?
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(addRouteTableIds: [String]? = nil, automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, copyTagsToVolumes: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, endpointIpv6AddressRange: String? = nil, readCacheConfiguration: OpenZFSReadCacheConfiguration? = nil, removeRouteTableIds: [String]? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.addRouteTableIds = addRouteTableIds
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.copyTagsToBackups = copyTagsToBackups
            self.copyTagsToVolumes = copyTagsToVolumes
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.diskIopsConfiguration = diskIopsConfiguration
            self.endpointIpv6AddressRange = endpointIpv6AddressRange
            self.readCacheConfiguration = readCacheConfiguration
            self.removeRouteTableIds = removeRouteTableIds
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.addRouteTableIds?.forEach {
                try validate($0, name: "addRouteTableIds[]", parent: name, max: 21)
                try validate($0, name: "addRouteTableIds[]", parent: name, min: 12)
                try validate($0, name: "addRouteTableIds[]", parent: name, pattern: "^(rtb-[0-9a-f]{8,})$")
            }
            try self.validate(self.addRouteTableIds, name: "addRouteTableIds", parent: name, max: 50)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.diskIopsConfiguration?.validate(name: "\(name).diskIopsConfiguration")
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, max: 43)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, min: 4)
            try self.validate(self.endpointIpv6AddressRange, name: "endpointIpv6AddressRange", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{4,43}$")
            try self.readCacheConfiguration?.validate(name: "\(name).readCacheConfiguration")
            try self.removeRouteTableIds?.forEach {
                try validate($0, name: "removeRouteTableIds[]", parent: name, max: 21)
                try validate($0, name: "removeRouteTableIds[]", parent: name, min: 12)
                try validate($0, name: "removeRouteTableIds[]", parent: name, pattern: "^(rtb-[0-9a-f]{8,})$")
            }
            try self.validate(self.removeRouteTableIds, name: "removeRouteTableIds", parent: name, max: 50)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 100000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 8)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case addRouteTableIds = "AddRouteTableIds"
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case copyTagsToBackups = "CopyTagsToBackups"
            case copyTagsToVolumes = "CopyTagsToVolumes"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case endpointIpv6AddressRange = "EndpointIpv6AddressRange"
            case readCacheConfiguration = "ReadCacheConfiguration"
            case removeRouteTableIds = "RemoveRouteTableIds"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct UpdateFileSystemRequest: AWSEncodableShape {
        /// A string of up to 63 ASCII characters that Amazon FSx uses to ensure idempotent updates. This string is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.
        public let clientRequestToken: String?
        /// The ID of the file system that you are updating.
        public let fileSystemId: String?
        /// The Lustre version you are updating an FSx for Lustre file system to. Valid values are 2.12 and 2.15. The value you choose must be newer than the file system's current Lustre version.
        public let fileSystemTypeVersion: String?
        public let lustreConfiguration: UpdateFileSystemLustreConfiguration?
        /// Changes the network type of an FSx for OpenZFS file system.
        public let networkType: NetworkType?
        public let ontapConfiguration: UpdateFileSystemOntapConfiguration?
        /// The configuration updates for an FSx for OpenZFS file system.
        public let openZFSConfiguration: UpdateFileSystemOpenZFSConfiguration?
        /// Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. For second-generation FSx for ONTAP file systems, you can also decrease the storage capacity. Specifies the storage capacity target value, in GiB, for the file system that you're updating.   You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress.  For Lustre file systems, the storage capacity target value can be the following:   For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity.   For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity.   For SCRATCH_1 file systems, you can't increase the storage capacity.   For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide. For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide. For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSxfor Windows File Server User Guide. For ONTAP file systems, when increasing storage capacity, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. When decreasing storage capacity on second-generation file systems, the target value must be at least 9 percent smaller than the current SSD storage capacity. For more information, see File system storage capacity and IOPS in the Amazon FSx for NetApp ONTAP User Guide.
        public let storageCapacity: Int?
        public let storageType: StorageType?
        /// The configuration updates for an Amazon FSx for Windows File Server file system.
        public let windowsConfiguration: UpdateFileSystemWindowsConfiguration?

        @inlinable
        public init(clientRequestToken: String? = UpdateFileSystemRequest.idempotencyToken(), fileSystemId: String? = nil, fileSystemTypeVersion: String? = nil, lustreConfiguration: UpdateFileSystemLustreConfiguration? = nil, networkType: NetworkType? = nil, ontapConfiguration: UpdateFileSystemOntapConfiguration? = nil, openZFSConfiguration: UpdateFileSystemOpenZFSConfiguration? = nil, storageCapacity: Int? = nil, storageType: StorageType? = nil, windowsConfiguration: UpdateFileSystemWindowsConfiguration? = nil) {
            self.clientRequestToken = clientRequestToken
            self.fileSystemId = fileSystemId
            self.fileSystemTypeVersion = fileSystemTypeVersion
            self.lustreConfiguration = lustreConfiguration
            self.networkType = networkType
            self.ontapConfiguration = ontapConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.storageCapacity = storageCapacity
            self.storageType = storageType
            self.windowsConfiguration = windowsConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, max: 21)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, min: 11)
            try self.validate(self.fileSystemId, name: "fileSystemId", parent: name, pattern: "^(fs-[0-9a-f]{8,})$")
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, max: 20)
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, min: 1)
            try self.validate(self.fileSystemTypeVersion, name: "fileSystemTypeVersion", parent: name, pattern: "^[0-9](.[0-9]*)*$")
            try self.lustreConfiguration?.validate(name: "\(name).lustreConfiguration")
            try self.ontapConfiguration?.validate(name: "\(name).ontapConfiguration")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, max: 2147483647)
            try self.validate(self.storageCapacity, name: "storageCapacity", parent: name, min: 0)
            try self.windowsConfiguration?.validate(name: "\(name).windowsConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case fileSystemId = "FileSystemId"
            case fileSystemTypeVersion = "FileSystemTypeVersion"
            case lustreConfiguration = "LustreConfiguration"
            case networkType = "NetworkType"
            case ontapConfiguration = "OntapConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case storageCapacity = "StorageCapacity"
            case storageType = "StorageType"
            case windowsConfiguration = "WindowsConfiguration"
        }
    }

    public struct UpdateFileSystemResponse: AWSDecodableShape {
        /// A description of the file system that was updated.
        public let fileSystem: FileSystem?

        @inlinable
        public init(fileSystem: FileSystem? = nil) {
            self.fileSystem = fileSystem
        }

        private enum CodingKeys: String, CodingKey {
            case fileSystem = "FileSystem"
        }
    }

    public struct UpdateFileSystemWindowsConfiguration: AWSEncodableShape {
        /// The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system..
        public let auditLogConfiguration: WindowsAuditLogCreateConfiguration?
        /// The number of days to retain automatic backups. Setting this property to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 30. For more information, see Working with Automatic Daily Backups.
        public let automaticBackupRetentionDays: Int?
        /// The preferred time to start the daily automatic backup, in the UTC time zone, for example, 02:00
        public let dailyAutomaticBackupStartTime: String?
        /// The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// The configuration Amazon FSx uses to join the Windows File Server instance to the self-managed Microsoft AD directory. You cannot make a self-managed Microsoft AD update request if there is an existing self-managed Microsoft AD update request in progress.
        public let selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfigurationUpdates?
        /// Sets the target value for a file system's throughput capacity, in MB/s, that you are updating the file system to. Valid values are  8, 16, 32, 64, 128, 256, 512, 1024, 2048. You cannot make a throughput capacity update request if there is an existing throughput capacity update request in progress. For more information,  see Managing Throughput Capacity.
        public let throughputCapacity: Int?
        /// The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Where d is the weekday number, from 1 through 7, with 1 = Monday and 7 = Sunday.
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(auditLogConfiguration: WindowsAuditLogCreateConfiguration? = nil, automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfigurationUpdates? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.auditLogConfiguration = auditLogConfiguration
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.diskIopsConfiguration = diskIopsConfiguration
            self.selfManagedActiveDirectoryConfiguration = selfManagedActiveDirectoryConfiguration
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        public func validate(name: String) throws {
            try self.auditLogConfiguration?.validate(name: "\(name).auditLogConfiguration")
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, max: 90)
            try self.validate(self.automaticBackupRetentionDays, name: "automaticBackupRetentionDays", parent: name, min: 0)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, max: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5)
            try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$")
            try self.diskIopsConfiguration?.validate(name: "\(name).diskIopsConfiguration")
            try self.selfManagedActiveDirectoryConfiguration?.validate(name: "\(name).selfManagedActiveDirectoryConfiguration")
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, max: 100000)
            try self.validate(self.throughputCapacity, name: "throughputCapacity", parent: name, min: 8)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, max: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, min: 7)
            try self.validate(self.weeklyMaintenanceStartTime, name: "weeklyMaintenanceStartTime", parent: name, pattern: "^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$")
        }

        private enum CodingKeys: String, CodingKey {
            case auditLogConfiguration = "AuditLogConfiguration"
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case selfManagedActiveDirectoryConfiguration = "SelfManagedActiveDirectoryConfiguration"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }

    public struct UpdateOntapVolumeConfiguration: AWSEncodableShape {
        /// A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.
        public let copyTagsToBackups: Bool?
        /// Specifies the location in the SVM's namespace where the volume is mounted.  The JunctionPath must have a leading forward slash, such as /vol3.
        public let junctionPath: String?
        /// The security style for the volume, which can be UNIX, NTFS, or MIXED.
        public let securityStyle: SecurityStyle?
        /// The configured size of the volume, in bytes.
        public let sizeInBytes: Int64?
        /// Specifies the size of the volume in megabytes.
        public let sizeInMegabytes: Int?
        /// The configuration object for updating the SnapLock configuration of an FSx for ONTAP SnapLock volume.
        public let snaplockConfiguration: UpdateSnaplockConfiguration?
        /// Specifies the snapshot policy for the volume. There are three built-in snapshot policies:    default: This is the default policy. A maximum of six hourly snapshots taken five minutes past  the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.    default-1weekly: This policy is the same as the default policy except  that it only retains one snapshot from the weekly schedule.    none: This policy does not take any snapshots. This policy can be assigned to volumes to  prevent automatic snapshots from being taken.   You can also provide the name of a custom policy that you created with the ONTAP CLI or REST API. For more information, see Snapshot policies  in the Amazon FSx for NetApp ONTAP User Guide.
        public let snapshotPolicy: String?
        /// Default is false. Set to true to enable the deduplication, compression, and compaction storage efficiency features on the volume.
        public let storageEfficiencyEnabled: Bool?
        /// Update the volume's data tiering policy.
        public let tieringPolicy: TieringPolicy?

        @inlinable
        public init(copyTagsToBackups: Bool? = nil, junctionPath: String? = nil, securityStyle: SecurityStyle? = nil, sizeInBytes: Int64? = nil, sizeInMegabytes: Int? = nil, snaplockConfiguration: UpdateSnaplockConfiguration? = nil, snapshotPolicy: String? = nil, storageEfficiencyEnabled: Bool? = nil, tieringPolicy: TieringPolicy? = nil) {
            self.copyTagsToBackups = copyTagsToBackups
            self.junctionPath = junctionPath
            self.securityStyle = securityStyle
            self.sizeInBytes = sizeInBytes
            self.sizeInMegabytes = sizeInMegabytes
            self.snaplockConfiguration = snaplockConfiguration
            self.snapshotPolicy = snapshotPolicy
            self.storageEfficiencyEnabled = storageEfficiencyEnabled
            self.tieringPolicy = tieringPolicy
        }

        public func validate(name: String) throws {
            try self.validate(self.junctionPath, name: "junctionPath", parent: name, max: 255)
            try self.validate(self.junctionPath, name: "junctionPath", parent: name, min: 1)
            try self.validate(self.junctionPath, name: "junctionPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$")
            try self.validate(self.sizeInBytes, name: "sizeInBytes", parent: name, max: 22517998000000000)
            try self.validate(self.sizeInBytes, name: "sizeInBytes", parent: name, min: 0)
            try self.validate(self.sizeInMegabytes, name: "sizeInMegabytes", parent: name, max: 2147483647)
            try self.validate(self.sizeInMegabytes, name: "sizeInMegabytes", parent: name, min: 0)
            try self.snaplockConfiguration?.validate(name: "\(name).snaplockConfiguration")
            try self.validate(self.snapshotPolicy, name: "snapshotPolicy", parent: name, max: 255)
            try self.validate(self.snapshotPolicy, name: "snapshotPolicy", parent: name, min: 1)
            try self.tieringPolicy?.validate(name: "\(name).tieringPolicy")
        }

        private enum CodingKeys: String, CodingKey {
            case copyTagsToBackups = "CopyTagsToBackups"
            case junctionPath = "JunctionPath"
            case securityStyle = "SecurityStyle"
            case sizeInBytes = "SizeInBytes"
            case sizeInMegabytes = "SizeInMegabytes"
            case snaplockConfiguration = "SnaplockConfiguration"
            case snapshotPolicy = "SnapshotPolicy"
            case storageEfficiencyEnabled = "StorageEfficiencyEnabled"
            case tieringPolicy = "TieringPolicy"
        }
    }

    public struct UpdateOpenZFSVolumeConfiguration: AWSEncodableShape {
        /// Specifies the method used to compress the data on the volume. The compression type is NONE by default.    NONE - Doesn't compress the data on the volume. NONE is the default.    ZSTD - Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.    LZ4 - Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.
        public let dataCompressionType: OpenZFSDataCompressionType?
        /// The configuration object for mounting a Network File System (NFS) file system.
        public let nfsExports: [OpenZFSNfsExport]?
        /// A Boolean value indicating whether the volume is read-only.
        public let readOnly: Bool?
        /// Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows can benefit from a smaller  record size, while streaming workflows can benefit from a larger record size. For additional guidance on when to set a custom record size, see  Tips for maximizing performance in the Amazon FSx for OpenZFS User Guide.
        public let recordSizeKiB: Int?
        /// The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota larger than the storage on the parent volume. You can specify a value of -1 to unset a volume's storage capacity quota.
        public let storageCapacityQuotaGiB: Int?
        /// The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than the parent volume has reserved. You can specify a value of -1 to unset a volume's storage capacity reservation.
        public let storageCapacityReservationGiB: Int?
        /// An object specifying how much storage users or groups can use on the volume.
        public let userAndGroupQuotas: [OpenZFSUserOrGroupQuota]?

        @inlinable
        public init(dataCompressionType: OpenZFSDataCompressionType? = nil, nfsExports: [OpenZFSNfsExport]? = nil, readOnly: Bool? = nil, recordSizeKiB: Int? = nil, storageCapacityQuotaGiB: Int? = nil, storageCapacityReservationGiB: Int? = nil, userAndGroupQuotas: [OpenZFSUserOrGroupQuota]? = nil) {
            self.dataCompressionType = dataCompressionType
            self.nfsExports = nfsExports
            self.readOnly = readOnly
            self.recordSizeKiB = recordSizeKiB
            self.storageCapacityQuotaGiB = storageCapacityQuotaGiB
            self.storageCapacityReservationGiB = storageCapacityReservationGiB
            self.userAndGroupQuotas = userAndGroupQuotas
        }

        public func validate(name: String) throws {
            try self.nfsExports?.forEach {
                try $0.validate(name: "\(name).nfsExports[]")
            }
            try self.validate(self.nfsExports, name: "nfsExports", parent: name, max: 1)
            try self.validate(self.recordSizeKiB, name: "recordSizeKiB", parent: name, max: 4096)
            try self.validate(self.recordSizeKiB, name: "recordSizeKiB", parent: name, min: 4)
            try self.validate(self.storageCapacityQuotaGiB, name: "storageCapacityQuotaGiB", parent: name, max: 2147483647)
            try self.validate(self.storageCapacityQuotaGiB, name: "storageCapacityQuotaGiB", parent: name, min: -1)
            try self.validate(self.storageCapacityReservationGiB, name: "storageCapacityReservationGiB", parent: name, max: 2147483647)
            try self.validate(self.storageCapacityReservationGiB, name: "storageCapacityReservationGiB", parent: name, min: -1)
            try self.userAndGroupQuotas?.forEach {
                try $0.validate(name: "\(name).userAndGroupQuotas[]")
            }
            try self.validate(self.userAndGroupQuotas, name: "userAndGroupQuotas", parent: name, max: 500)
        }

        private enum CodingKeys: String, CodingKey {
            case dataCompressionType = "DataCompressionType"
            case nfsExports = "NfsExports"
            case readOnly = "ReadOnly"
            case recordSizeKiB = "RecordSizeKiB"
            case storageCapacityQuotaGiB = "StorageCapacityQuotaGiB"
            case storageCapacityReservationGiB = "StorageCapacityReservationGiB"
            case userAndGroupQuotas = "UserAndGroupQuotas"
        }
    }

    public struct UpdateSharedVpcConfigurationRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// Specifies whether participant accounts can create FSx for ONTAP Multi-AZ file systems in shared subnets. Set to true to enable or false to disable.
        public let enableFsxRouteTableUpdatesFromParticipantAccounts: String?

        @inlinable
        public init(clientRequestToken: String? = UpdateSharedVpcConfigurationRequest.idempotencyToken(), enableFsxRouteTableUpdatesFromParticipantAccounts: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.enableFsxRouteTableUpdatesFromParticipantAccounts = enableFsxRouteTableUpdatesFromParticipantAccounts
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.enableFsxRouteTableUpdatesFromParticipantAccounts, name: "enableFsxRouteTableUpdatesFromParticipantAccounts", parent: name, max: 5)
            try self.validate(self.enableFsxRouteTableUpdatesFromParticipantAccounts, name: "enableFsxRouteTableUpdatesFromParticipantAccounts", parent: name, min: 4)
            try self.validate(self.enableFsxRouteTableUpdatesFromParticipantAccounts, name: "enableFsxRouteTableUpdatesFromParticipantAccounts", parent: name, pattern: "^(?i)(true|false)$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case enableFsxRouteTableUpdatesFromParticipantAccounts = "EnableFsxRouteTableUpdatesFromParticipantAccounts"
        }
    }

    public struct UpdateSharedVpcConfigurationResponse: AWSDecodableShape {
        /// Indicates whether participant accounts can create FSx for ONTAP Multi-AZ file systems in shared subnets.
        public let enableFsxRouteTableUpdatesFromParticipantAccounts: String?

        @inlinable
        public init(enableFsxRouteTableUpdatesFromParticipantAccounts: String? = nil) {
            self.enableFsxRouteTableUpdatesFromParticipantAccounts = enableFsxRouteTableUpdatesFromParticipantAccounts
        }

        private enum CodingKeys: String, CodingKey {
            case enableFsxRouteTableUpdatesFromParticipantAccounts = "EnableFsxRouteTableUpdatesFromParticipantAccounts"
        }
    }

    public struct UpdateSnaplockConfiguration: AWSEncodableShape {
        /// Enables or disables the audit log volume for an FSx for ONTAP SnapLock volume. The default  value is false. If you set AuditLogVolume to true, the SnapLock volume is  created as an audit log volume. The minimum retention period for an audit log volume is six months.  For more information, see   SnapLock audit log volumes.
        public let auditLogVolume: Bool?
        /// The configuration object for setting the autocommit period of files in an FSx for ONTAP SnapLock volume.
        public let autocommitPeriod: AutocommitPeriod?
        /// Enables, disables, or permanently disables privileged delete on an FSx for ONTAP SnapLock  Enterprise volume. Enabling privileged delete allows SnapLock administrators to delete write once, read  many (WORM) files even  if they have active retention periods. PERMANENTLY_DISABLED is a terminal state.  If privileged delete is permanently disabled on a SnapLock volume, you can't re-enable it. The default  value is DISABLED.  For more information, see  Privileged delete.
        public let privilegedDelete: PrivilegedDelete?
        /// Specifies the retention period of an FSx for ONTAP SnapLock volume.
        public let retentionPeriod: SnaplockRetentionPeriod?
        /// Enables or disables volume-append mode  on an FSx for ONTAP SnapLock volume. Volume-append mode allows you to  create WORM-appendable files and write data to them incrementally. The default value is false.  For more information, see Volume-append mode.
        public let volumeAppendModeEnabled: Bool?

        @inlinable
        public init(auditLogVolume: Bool? = nil, autocommitPeriod: AutocommitPeriod? = nil, privilegedDelete: PrivilegedDelete? = nil, retentionPeriod: SnaplockRetentionPeriod? = nil, volumeAppendModeEnabled: Bool? = nil) {
            self.auditLogVolume = auditLogVolume
            self.autocommitPeriod = autocommitPeriod
            self.privilegedDelete = privilegedDelete
            self.retentionPeriod = retentionPeriod
            self.volumeAppendModeEnabled = volumeAppendModeEnabled
        }

        public func validate(name: String) throws {
            try self.autocommitPeriod?.validate(name: "\(name).autocommitPeriod")
            try self.retentionPeriod?.validate(name: "\(name).retentionPeriod")
        }

        private enum CodingKeys: String, CodingKey {
            case auditLogVolume = "AuditLogVolume"
            case autocommitPeriod = "AutocommitPeriod"
            case privilegedDelete = "PrivilegedDelete"
            case retentionPeriod = "RetentionPeriod"
            case volumeAppendModeEnabled = "VolumeAppendModeEnabled"
        }
    }

    public struct UpdateSnapshotRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The name of the snapshot to update.
        public let name: String?
        /// The ID of the snapshot that you want to update, in the format fsvolsnap-0123456789abcdef0.
        public let snapshotId: String?

        @inlinable
        public init(clientRequestToken: String? = UpdateSnapshotRequest.idempotencyToken(), name: String? = nil, snapshotId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.name = name
            self.snapshotId = snapshotId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.name, name: "name", parent: name, max: 203)
            try self.validate(self.name, name: "name", parent: name, min: 1)
            try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_:.-]{1,203}$")
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, max: 28)
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, min: 11)
            try self.validate(self.snapshotId, name: "snapshotId", parent: name, pattern: "^((fs)?volsnap-[0-9a-f]{8,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case name = "Name"
            case snapshotId = "SnapshotId"
        }
    }

    public struct UpdateSnapshotResponse: AWSDecodableShape {
        /// Returned after a successful UpdateSnapshot operation, describing the snapshot that you updated.
        public let snapshot: Snapshot?

        @inlinable
        public init(snapshot: Snapshot? = nil) {
            self.snapshot = snapshot
        }

        private enum CodingKeys: String, CodingKey {
            case snapshot = "Snapshot"
        }
    }

    public struct UpdateStorageVirtualMachineRequest: AWSEncodableShape {
        /// Specifies updates to an SVM's Microsoft Active Directory (AD) configuration.
        public let activeDirectoryConfiguration: UpdateSvmActiveDirectoryConfiguration?
        public let clientRequestToken: String?
        /// The ID of the SVM that you want to update, in the format svm-0123456789abcdef0.
        public let storageVirtualMachineId: String?
        /// Specifies a new SvmAdminPassword.
        public let svmAdminPassword: String?

        @inlinable
        public init(activeDirectoryConfiguration: UpdateSvmActiveDirectoryConfiguration? = nil, clientRequestToken: String? = UpdateStorageVirtualMachineRequest.idempotencyToken(), storageVirtualMachineId: String? = nil, svmAdminPassword: String? = nil) {
            self.activeDirectoryConfiguration = activeDirectoryConfiguration
            self.clientRequestToken = clientRequestToken
            self.storageVirtualMachineId = storageVirtualMachineId
            self.svmAdminPassword = svmAdminPassword
        }

        public func validate(name: String) throws {
            try self.activeDirectoryConfiguration?.validate(name: "\(name).activeDirectoryConfiguration")
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, max: 21)
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, min: 21)
            try self.validate(self.storageVirtualMachineId, name: "storageVirtualMachineId", parent: name, pattern: "^(svm-[0-9a-f]{17,})$")
            try self.validate(self.svmAdminPassword, name: "svmAdminPassword", parent: name, max: 50)
            try self.validate(self.svmAdminPassword, name: "svmAdminPassword", parent: name, min: 8)
            try self.validate(self.svmAdminPassword, name: "svmAdminPassword", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{8,50}$")
        }

        private enum CodingKeys: String, CodingKey {
            case activeDirectoryConfiguration = "ActiveDirectoryConfiguration"
            case clientRequestToken = "ClientRequestToken"
            case storageVirtualMachineId = "StorageVirtualMachineId"
            case svmAdminPassword = "SvmAdminPassword"
        }
    }

    public struct UpdateStorageVirtualMachineResponse: AWSDecodableShape {
        public let storageVirtualMachine: StorageVirtualMachine?

        @inlinable
        public init(storageVirtualMachine: StorageVirtualMachine? = nil) {
            self.storageVirtualMachine = storageVirtualMachine
        }

        private enum CodingKeys: String, CodingKey {
            case storageVirtualMachine = "StorageVirtualMachine"
        }
    }

    public struct UpdateSvmActiveDirectoryConfiguration: AWSEncodableShape {
        /// Specifies an updated NetBIOS name of the AD computer object NetBiosName to which an SVM is joined.
        public let netBiosName: String?
        public let selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfigurationUpdates?

        @inlinable
        public init(netBiosName: String? = nil, selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryConfigurationUpdates? = nil) {
            self.netBiosName = netBiosName
            self.selfManagedActiveDirectoryConfiguration = selfManagedActiveDirectoryConfiguration
        }

        public func validate(name: String) throws {
            try self.validate(self.netBiosName, name: "netBiosName", parent: name, max: 15)
            try self.validate(self.netBiosName, name: "netBiosName", parent: name, min: 1)
            try self.validate(self.netBiosName, name: "netBiosName", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,255}$")
            try self.selfManagedActiveDirectoryConfiguration?.validate(name: "\(name).selfManagedActiveDirectoryConfiguration")
        }

        private enum CodingKeys: String, CodingKey {
            case netBiosName = "NetBiosName"
            case selfManagedActiveDirectoryConfiguration = "SelfManagedActiveDirectoryConfiguration"
        }
    }

    public struct UpdateVolumeRequest: AWSEncodableShape {
        public let clientRequestToken: String?
        /// The name of the OpenZFS volume. OpenZFS root volumes are automatically named FSX. Child volume names must be unique among their parent volume's children. The name of the volume is part of the mount string for the OpenZFS volume.
        public let name: String?
        /// The configuration of the ONTAP volume that you are updating.
        public let ontapConfiguration: UpdateOntapVolumeConfiguration?
        /// The configuration of the OpenZFS volume that you are updating.
        public let openZFSConfiguration: UpdateOpenZFSVolumeConfiguration?
        /// The ID of the volume that you want to update, in the format fsvol-0123456789abcdef0.
        public let volumeId: String?

        @inlinable
        public init(clientRequestToken: String? = UpdateVolumeRequest.idempotencyToken(), name: String? = nil, ontapConfiguration: UpdateOntapVolumeConfiguration? = nil, openZFSConfiguration: UpdateOpenZFSVolumeConfiguration? = nil, volumeId: String? = nil) {
            self.clientRequestToken = clientRequestToken
            self.name = name
            self.ontapConfiguration = ontapConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.volumeId = volumeId
        }

        public func validate(name: String) throws {
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 63)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 1)
            try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$")
            try self.validate(self.name, name: "name", parent: name, max: 203)
            try self.validate(self.name, name: "name", parent: name, min: 1)
            try self.validate(self.name, name: "name", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,203}$")
            try self.ontapConfiguration?.validate(name: "\(name).ontapConfiguration")
            try self.openZFSConfiguration?.validate(name: "\(name).openZFSConfiguration")
            try self.validate(self.volumeId, name: "volumeId", parent: name, max: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, min: 23)
            try self.validate(self.volumeId, name: "volumeId", parent: name, pattern: "^(fsvol-[0-9a-f]{17,})$")
        }

        private enum CodingKeys: String, CodingKey {
            case clientRequestToken = "ClientRequestToken"
            case name = "Name"
            case ontapConfiguration = "OntapConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case volumeId = "VolumeId"
        }
    }

    public struct UpdateVolumeResponse: AWSDecodableShape {
        /// A description of the volume just updated. Returned after a successful UpdateVolume API operation.
        public let volume: Volume?

        @inlinable
        public init(volume: Volume? = nil) {
            self.volume = volume
        }

        private enum CodingKeys: String, CodingKey {
            case volume = "Volume"
        }
    }

    public struct Volume: AWSDecodableShape {
        /// A list of administrative actions for the volume that are in process or waiting to be processed.  Administrative actions describe changes to the volume that you have initiated using the UpdateVolume action.
        public let administrativeActions: [AdministrativeAction]?
        public let creationTime: Date?
        public let fileSystemId: String?
        /// The lifecycle status of the volume.    AVAILABLE - The volume is fully available for use.    CREATED - The volume has been created.    CREATING - Amazon FSx is creating the new volume.    DELETING - Amazon FSx is deleting an existing volume.    FAILED - Amazon FSx was unable to create the volume.    MISCONFIGURED - The volume is in a failed but recoverable state.    PENDING - Amazon FSx hasn't started creating the volume.
        public let lifecycle: VolumeLifecycle?
        /// The reason why the volume lifecycle status changed.
        public let lifecycleTransitionReason: LifecycleTransitionReason?
        /// The name of the volume.
        public let name: String?
        public let ontapConfiguration: OntapVolumeConfiguration?
        /// The configuration of an Amazon FSx for OpenZFS volume.
        public let openZFSConfiguration: OpenZFSVolumeConfiguration?
        public let resourceARN: String?
        public let tags: [Tag]?
        /// The system-generated, unique ID of the volume.
        public let volumeId: String?
        /// The type of the volume.
        public let volumeType: VolumeType?

        @inlinable
        public init(administrativeActions: [AdministrativeAction]? = nil, creationTime: Date? = nil, fileSystemId: String? = nil, lifecycle: VolumeLifecycle? = nil, lifecycleTransitionReason: LifecycleTransitionReason? = nil, name: String? = nil, ontapConfiguration: OntapVolumeConfiguration? = nil, openZFSConfiguration: OpenZFSVolumeConfiguration? = nil, resourceARN: String? = nil, tags: [Tag]? = nil, volumeId: String? = nil, volumeType: VolumeType? = nil) {
            self.administrativeActions = administrativeActions
            self.creationTime = creationTime
            self.fileSystemId = fileSystemId
            self.lifecycle = lifecycle
            self.lifecycleTransitionReason = lifecycleTransitionReason
            self.name = name
            self.ontapConfiguration = ontapConfiguration
            self.openZFSConfiguration = openZFSConfiguration
            self.resourceARN = resourceARN
            self.tags = tags
            self.volumeId = volumeId
            self.volumeType = volumeType
        }

        private enum CodingKeys: String, CodingKey {
            case administrativeActions = "AdministrativeActions"
            case creationTime = "CreationTime"
            case fileSystemId = "FileSystemId"
            case lifecycle = "Lifecycle"
            case lifecycleTransitionReason = "LifecycleTransitionReason"
            case name = "Name"
            case ontapConfiguration = "OntapConfiguration"
            case openZFSConfiguration = "OpenZFSConfiguration"
            case resourceARN = "ResourceARN"
            case tags = "Tags"
            case volumeId = "VolumeId"
            case volumeType = "VolumeType"
        }
    }

    public struct VolumeFilter: AWSEncodableShape {
        /// The name for this filter.
        public let name: VolumeFilterName?
        /// The values of the filter. These are all the values for any of the applied filters.
        public let values: [String]?

        @inlinable
        public init(name: VolumeFilterName? = nil, values: [String]? = nil) {
            self.name = name
            self.values = values
        }

        public func validate(name: String) throws {
            try self.values?.forEach {
                try validate($0, name: "values[]", parent: name, max: 128)
                try validate($0, name: "values[]", parent: name, min: 1)
                try validate($0, name: "values[]", parent: name, pattern: "^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$")
            }
            try self.validate(self.values, name: "values", parent: name, max: 20)
        }

        private enum CodingKeys: String, CodingKey {
            case name = "Name"
            case values = "Values"
        }
    }

    public struct WindowsAuditLogConfiguration: AWSDecodableShape {
        /// The Amazon Resource Name (ARN) for the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN. The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehose delivery stream must begin with the aws-fsx prefix. The destination ARN (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.
        public let auditLogDestination: String?
        /// Sets which attempt type is logged by Amazon FSx for file and folder accesses.    SUCCESS_ONLY - only successful attempts to access files or folders are logged.    FAILURE_ONLY - only failed attempts to access files or folders are logged.    SUCCESS_AND_FAILURE - both successful attempts and failed attempts to access files or folders are logged.    DISABLED - access auditing of files and folders is turned off.
        public let fileAccessAuditLogLevel: WindowsAccessAuditLogLevel?
        /// Sets which attempt type is logged by Amazon FSx for file share accesses.    SUCCESS_ONLY - only successful attempts to access file shares are logged.    FAILURE_ONLY - only failed attempts to access file shares are logged.    SUCCESS_AND_FAILURE - both successful attempts and failed attempts to access file shares are logged.    DISABLED - access auditing of file shares is turned off.
        public let fileShareAccessAuditLogLevel: WindowsAccessAuditLogLevel?

        @inlinable
        public init(auditLogDestination: String? = nil, fileAccessAuditLogLevel: WindowsAccessAuditLogLevel? = nil, fileShareAccessAuditLogLevel: WindowsAccessAuditLogLevel? = nil) {
            self.auditLogDestination = auditLogDestination
            self.fileAccessAuditLogLevel = fileAccessAuditLogLevel
            self.fileShareAccessAuditLogLevel = fileShareAccessAuditLogLevel
        }

        private enum CodingKeys: String, CodingKey {
            case auditLogDestination = "AuditLogDestination"
            case fileAccessAuditLogLevel = "FileAccessAuditLogLevel"
            case fileShareAccessAuditLogLevel = "FileShareAccessAuditLogLevel"
        }
    }

    public struct WindowsAuditLogCreateConfiguration: AWSEncodableShape {
        /// The Amazon Resource Name (ARN) that specifies the destination of the audit logs. The destination can be any Amazon CloudWatch Logs log group ARN or Amazon Kinesis Data Firehose delivery stream ARN, with the following requirements:   The destination ARN that you provide (either CloudWatch Logs log group or Kinesis Data Firehose delivery stream) must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.   The name of the Amazon CloudWatch Logs log group must begin with the /aws/fsx prefix. The name of the Amazon Kinesis Data Firehose delivery stream must begin with the aws-fsx prefix.   If you do not provide a destination in AuditLogDestination, Amazon FSx will create and use a log stream in the CloudWatch Logs /aws/fsx/windows log group.   If AuditLogDestination is provided and the resource does not exist, the request will fail with a BadRequest error.   If FileAccessAuditLogLevel and FileShareAccessAuditLogLevel are both set to DISABLED, you cannot specify a destination in AuditLogDestination.
        public let auditLogDestination: String?
        /// Sets which attempt type is logged by Amazon FSx for file and folder accesses.    SUCCESS_ONLY - only successful attempts to access files or folders are logged.    FAILURE_ONLY - only failed attempts to access files or folders are logged.    SUCCESS_AND_FAILURE - both successful attempts and failed attempts to access files or folders are logged.    DISABLED - access auditing of files and folders is turned off.
        public let fileAccessAuditLogLevel: WindowsAccessAuditLogLevel?
        /// Sets which attempt type is logged by Amazon FSx for file share accesses.    SUCCESS_ONLY - only successful attempts to access file shares are logged.    FAILURE_ONLY - only failed attempts to access file shares are logged.    SUCCESS_AND_FAILURE - both successful attempts and failed attempts to access file shares are logged.    DISABLED - access auditing of file shares is turned off.
        public let fileShareAccessAuditLogLevel: WindowsAccessAuditLogLevel?

        @inlinable
        public init(auditLogDestination: String? = nil, fileAccessAuditLogLevel: WindowsAccessAuditLogLevel? = nil, fileShareAccessAuditLogLevel: WindowsAccessAuditLogLevel? = nil) {
            self.auditLogDestination = auditLogDestination
            self.fileAccessAuditLogLevel = fileAccessAuditLogLevel
            self.fileShareAccessAuditLogLevel = fileShareAccessAuditLogLevel
        }

        public func validate(name: String) throws {
            try self.validate(self.auditLogDestination, name: "auditLogDestination", parent: name, max: 1024)
            try self.validate(self.auditLogDestination, name: "auditLogDestination", parent: name, min: 8)
            try self.validate(self.auditLogDestination, name: "auditLogDestination", parent: name, pattern: "^arn:[^:]{1,63}:[^:]{0,63}:[^:]{0,63}:(?:|\\d{12}):[^/].{0,1023}$")
        }

        private enum CodingKeys: String, CodingKey {
            case auditLogDestination = "AuditLogDestination"
            case fileAccessAuditLogLevel = "FileAccessAuditLogLevel"
            case fileShareAccessAuditLogLevel = "FileShareAccessAuditLogLevel"
        }
    }

    public struct WindowsFileSystemConfiguration: AWSDecodableShape {
        /// The ID for an existing Amazon Web Services Managed Microsoft Active Directory instance that the file system is joined to.
        public let activeDirectoryId: String?
        public let aliases: [Alias]?
        /// The configuration that Amazon FSx for Windows File Server uses to audit and log user accesses of files, folders, and file shares on the Amazon FSx for Windows File Server file system.
        public let auditLogConfiguration: WindowsAuditLogConfiguration?
        /// The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days.
        public let automaticBackupRetentionDays: Int?
        /// A boolean flag indicating whether tags on the file system should be copied to backups. This value defaults to false. If it's set to true, all tags on the file system are copied to all automatic backups and any user-initiated backups where the user doesn't specify any tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when  creating a user-initiated backup, no tags are copied from the file system,  regardless of this value.
        public let copyTagsToBackups: Bool?
        /// The preferred time to take daily automatic backups, in the UTC time zone.
        public let dailyAutomaticBackupStartTime: String?
        /// Specifies the file system deployment type, valid values are the following:    MULTI_AZ_1 - Specifies a high availability file system that is configured for Multi-AZ  redundancy to tolerate temporary Availability Zone (AZ) unavailability, and supports SSD and HDD storage.    SINGLE_AZ_1 - (Default) Specifies a file system that is configured for single AZ redundancy,  only supports SSD storage.    SINGLE_AZ_2 - Latest generation Single AZ file system.  Specifies a file system that is configured for single AZ redundancy and supports SSD and HDD storage.   For more information, see  Single-AZ and Multi-AZ File Systems.
        public let deploymentType: WindowsDeploymentType?
        /// The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for Windows file system. By default, Amazon FSx automatically provisions 3 IOPS per GiB of storage capacity. You can provision additional IOPS per GiB of storage, up to the maximum limit associated with your chosen throughput capacity.
        public let diskIopsConfiguration: DiskIopsConfiguration?
        /// The list of maintenance operations in progress for this file system.
        public let maintenanceOperationsInProgress: [FileSystemMaintenanceOperation]?
        /// For MULTI_AZ_1 deployment types, the IPv4 address of the primary, or preferred, file server. Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that  are not joined to a Microsoft Active Directory.  Applicable for all Windows file system deployment types.  This IPv4 address is temporarily unavailable  when the file system is undergoing maintenance. For Linux and Windows  SMB clients that are joined to an Active Directory, use the file system's DNSName instead. For more information on mapping and mounting file shares, see  Accessing data using file shares.
        public let preferredFileServerIp: String?
        /// For MULTI_AZ_1 deployment types, the IPv6 address of the primary, or preferred, file server. Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that are not joined to a Microsoft Active Directory. Applicable for all Windows file system deployment types. This IPv6 address is temporarily unavailable when the file system is undergoing maintenance. For Linux and Windows SMB clients that are joined to an Active Directory, use the file system's DNSName instead.
        public let preferredFileServerIpv6: String?
        /// For MULTI_AZ_1 deployment types, it specifies the ID of the subnet where the preferred file server is located.  Must be one of the two subnet IDs specified in SubnetIds property. Amazon FSx serves traffic from this subnet except in the event of a failover to the secondary file server. For SINGLE_AZ_1 and SINGLE_AZ_2 deployment types, this value is the same as that for SubnetIDs. For more information, see  Availability and durability: Single-AZ and Multi-AZ file systems.
        public let preferredSubnetId: String?
        /// For MULTI_AZ_1 deployment types, use this endpoint when performing administrative tasks on the file system using  Amazon FSx Remote PowerShell. For SINGLE_AZ_1 and SINGLE_AZ_2 deployment types, this is the DNS name of the file system. This endpoint is temporarily unavailable when the file system is undergoing maintenance.
        public let remoteAdministrationEndpoint: String?
        public let selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryAttributes?
        /// The throughput of the Amazon FSx file system, measured in megabytes per second.
        public let throughputCapacity: Int?
        /// The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.
        public let weeklyMaintenanceStartTime: String?

        @inlinable
        public init(activeDirectoryId: String? = nil, aliases: [Alias]? = nil, auditLogConfiguration: WindowsAuditLogConfiguration? = nil, automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, deploymentType: WindowsDeploymentType? = nil, diskIopsConfiguration: DiskIopsConfiguration? = nil, maintenanceOperationsInProgress: [FileSystemMaintenanceOperation]? = nil, preferredFileServerIp: String? = nil, preferredFileServerIpv6: String? = nil, preferredSubnetId: String? = nil, remoteAdministrationEndpoint: String? = nil, selfManagedActiveDirectoryConfiguration: SelfManagedActiveDirectoryAttributes? = nil, throughputCapacity: Int? = nil, weeklyMaintenanceStartTime: String? = nil) {
            self.activeDirectoryId = activeDirectoryId
            self.aliases = aliases
            self.auditLogConfiguration = auditLogConfiguration
            self.automaticBackupRetentionDays = automaticBackupRetentionDays
            self.copyTagsToBackups = copyTagsToBackups
            self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime
            self.deploymentType = deploymentType
            self.diskIopsConfiguration = diskIopsConfiguration
            self.maintenanceOperationsInProgress = maintenanceOperationsInProgress
            self.preferredFileServerIp = preferredFileServerIp
            self.preferredFileServerIpv6 = preferredFileServerIpv6
            self.preferredSubnetId = preferredSubnetId
            self.remoteAdministrationEndpoint = remoteAdministrationEndpoint
            self.selfManagedActiveDirectoryConfiguration = selfManagedActiveDirectoryConfiguration
            self.throughputCapacity = throughputCapacity
            self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime
        }

        private enum CodingKeys: String, CodingKey {
            case activeDirectoryId = "ActiveDirectoryId"
            case aliases = "Aliases"
            case auditLogConfiguration = "AuditLogConfiguration"
            case automaticBackupRetentionDays = "AutomaticBackupRetentionDays"
            case copyTagsToBackups = "CopyTagsToBackups"
            case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime"
            case deploymentType = "DeploymentType"
            case diskIopsConfiguration = "DiskIopsConfiguration"
            case maintenanceOperationsInProgress = "MaintenanceOperationsInProgress"
            case preferredFileServerIp = "PreferredFileServerIp"
            case preferredFileServerIpv6 = "PreferredFileServerIpv6"
            case preferredSubnetId = "PreferredSubnetId"
            case remoteAdministrationEndpoint = "RemoteAdministrationEndpoint"
            case selfManagedActiveDirectoryConfiguration = "SelfManagedActiveDirectoryConfiguration"
            case throughputCapacity = "ThroughputCapacity"
            case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime"
        }
    }
}

// MARK: - Errors

/// Error enum for FSx
public struct FSxErrorType: AWSErrorType {
    enum Code: String {
        case accessPointAlreadyOwnedByYou = "AccessPointAlreadyOwnedByYou"
        case activeDirectoryError = "ActiveDirectoryError"
        case backupBeingCopied = "BackupBeingCopied"
        case backupInProgress = "BackupInProgress"
        case backupNotFound = "BackupNotFound"
        case backupRestoring = "BackupRestoring"
        case badRequest = "BadRequest"
        case dataRepositoryAssociationNotFound = "DataRepositoryAssociationNotFound"
        case dataRepositoryTaskEnded = "DataRepositoryTaskEnded"
        case dataRepositoryTaskExecuting = "DataRepositoryTaskExecuting"
        case dataRepositoryTaskNotFound = "DataRepositoryTaskNotFound"
        case fileCacheNotFound = "FileCacheNotFound"
        case fileSystemNotFound = "FileSystemNotFound"
        case incompatibleParameterError = "IncompatibleParameterError"
        case incompatibleRegionForMultiAZ = "IncompatibleRegionForMultiAZ"
        case internalServerError = "InternalServerError"
        case invalidAccessPoint = "InvalidAccessPoint"
        case invalidDataRepositoryType = "InvalidDataRepositoryType"
        case invalidDestinationKmsKey = "InvalidDestinationKmsKey"
        case invalidExportPath = "InvalidExportPath"
        case invalidImportPath = "InvalidImportPath"
        case invalidNetworkSettings = "InvalidNetworkSettings"
        case invalidPerUnitStorageThroughput = "InvalidPerUnitStorageThroughput"
        case invalidRegion = "InvalidRegion"
        case invalidRequest = "InvalidRequest"
        case invalidSourceKmsKey = "InvalidSourceKmsKey"
        case missingFileCacheConfiguration = "MissingFileCacheConfiguration"
        case missingFileSystemConfiguration = "MissingFileSystemConfiguration"
        case missingVolumeConfiguration = "MissingVolumeConfiguration"
        case notServiceResourceError = "NotServiceResourceError"
        case resourceDoesNotSupportTagging = "ResourceDoesNotSupportTagging"
        case resourceNotFound = "ResourceNotFound"
        case s3AccessPointAttachmentNotFound = "S3AccessPointAttachmentNotFound"
        case serviceLimitExceeded = "ServiceLimitExceeded"
        case snapshotNotFound = "SnapshotNotFound"
        case sourceBackupUnavailable = "SourceBackupUnavailable"
        case storageVirtualMachineNotFound = "StorageVirtualMachineNotFound"
        case tooManyAccessPoints = "TooManyAccessPoints"
        case unsupportedOperation = "UnsupportedOperation"
        case volumeNotFound = "VolumeNotFound"
    }

    private let error: Code
    public let context: AWSErrorContext?

    /// initialize FSx
    public init?(errorCode: String, context: AWSErrorContext) {
        guard let error = Code(rawValue: errorCode) else { return nil }
        self.error = error
        self.context = context
    }

    internal init(_ error: Code) {
        self.error = error
        self.context = nil
    }

    /// return error code string
    public var errorCode: String { self.error.rawValue }

    /// An access point with that name already exists in the Amazon Web Services Region in your Amazon Web Services account.
    public static var accessPointAlreadyOwnedByYou: Self { .init(.accessPointAlreadyOwnedByYou) }
    /// An Active Directory error.
    public static var activeDirectoryError: Self { .init(.activeDirectoryError) }
    /// You can't delete a backup while it's being copied.
    public static var backupBeingCopied: Self { .init(.backupBeingCopied) }
    /// Another backup is already under way. Wait for completion before initiating additional backups of this file system.
    public static var backupInProgress: Self { .init(.backupInProgress) }
    /// No Amazon FSx backups were found based upon the supplied parameters.
    public static var backupNotFound: Self { .init(.backupNotFound) }
    /// You can't delete a backup while it's being used to restore a file system.
    public static var backupRestoring: Self { .init(.backupRestoring) }
    /// A generic error indicating a failure with a client request.
    public static var badRequest: Self { .init(.badRequest) }
    /// No data repository associations were found based upon the supplied parameters.
    public static var dataRepositoryAssociationNotFound: Self { .init(.dataRepositoryAssociationNotFound) }
    /// The data repository task could not be canceled because the task has already ended.
    public static var dataRepositoryTaskEnded: Self { .init(.dataRepositoryTaskEnded) }
    /// An existing data repository task is currently executing on the file system.   Wait until the existing task has completed, then create the new task.
    public static var dataRepositoryTaskExecuting: Self { .init(.dataRepositoryTaskExecuting) }
    /// The data repository task or tasks you specified could not be found.
    public static var dataRepositoryTaskNotFound: Self { .init(.dataRepositoryTaskNotFound) }
    /// No caches were found based upon supplied parameters.
    public static var fileCacheNotFound: Self { .init(.fileCacheNotFound) }
    /// No Amazon FSx file systems were found based upon supplied parameters.
    public static var fileSystemNotFound: Self { .init(.fileSystemNotFound) }
    /// The error returned when a second request is received with the same client request token but different parameters settings. A client request token should always uniquely identify a single request.
    public static var incompatibleParameterError: Self { .init(.incompatibleParameterError) }
    /// Amazon FSx doesn't support Multi-AZ Windows File Server copy backup in the destination Region, so the copied backup can't be restored.
    public static var incompatibleRegionForMultiAZ: Self { .init(.incompatibleRegionForMultiAZ) }
    /// A generic error indicating a server-side failure.
    public static var internalServerError: Self { .init(.internalServerError) }
    /// The access point specified doesn't exist.
    public static var invalidAccessPoint: Self { .init(.invalidAccessPoint) }
    /// You have filtered the response to a data repository type that is not supported.
    public static var invalidDataRepositoryType: Self { .init(.invalidDataRepositoryType) }
    /// The Key Management Service (KMS) key of the destination backup is not valid.
    public static var invalidDestinationKmsKey: Self { .init(.invalidDestinationKmsKey) }
    /// The path provided for data repository export isn't valid.
    public static var invalidExportPath: Self { .init(.invalidExportPath) }
    /// The path provided for data repository import isn't valid.
    public static var invalidImportPath: Self { .init(.invalidImportPath) }
    /// One or more network settings specified in the request are invalid.
    public static var invalidNetworkSettings: Self { .init(.invalidNetworkSettings) }
    /// An invalid value for PerUnitStorageThroughput was provided. Please create your file system again, using a valid value.
    public static var invalidPerUnitStorageThroughput: Self { .init(.invalidPerUnitStorageThroughput) }
    /// The Region provided for SourceRegion is not valid or is in a different Amazon Web Services partition.
    public static var invalidRegion: Self { .init(.invalidRegion) }
    /// The action or operation requested is invalid. Verify that the action is typed correctly.
    public static var invalidRequest: Self { .init(.invalidRequest) }
    /// The Key Management Service (KMS) key of the source backup is not valid.
    public static var invalidSourceKmsKey: Self { .init(.invalidSourceKmsKey) }
    /// A cache configuration is required for this operation.
    public static var missingFileCacheConfiguration: Self { .init(.missingFileCacheConfiguration) }
    /// A file system configuration is required for this operation.
    public static var missingFileSystemConfiguration: Self { .init(.missingFileSystemConfiguration) }
    /// A volume configuration is required for this operation.
    public static var missingVolumeConfiguration: Self { .init(.missingVolumeConfiguration) }
    /// The resource specified for the tagging operation is not a resource type owned by Amazon FSx. Use the API of the relevant service to perform the operation.
    public static var notServiceResourceError: Self { .init(.notServiceResourceError) }
    /// The resource specified does not support tagging.
    public static var resourceDoesNotSupportTagging: Self { .init(.resourceDoesNotSupportTagging) }
    /// The resource specified by the Amazon Resource Name (ARN) can't be found.
    public static var resourceNotFound: Self { .init(.resourceNotFound) }
    /// The access point specified was not found.
    public static var s3AccessPointAttachmentNotFound: Self { .init(.s3AccessPointAttachmentNotFound) }
    /// An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting Amazon Web Services Support.
    public static var serviceLimitExceeded: Self { .init(.serviceLimitExceeded) }
    /// No Amazon FSx snapshots were found based on the supplied parameters.
    public static var snapshotNotFound: Self { .init(.snapshotNotFound) }
    /// The request was rejected because the lifecycle status of the source backup isn't AVAILABLE.
    public static var sourceBackupUnavailable: Self { .init(.sourceBackupUnavailable) }
    /// No FSx for ONTAP SVMs were found based upon the supplied parameters.
    public static var storageVirtualMachineNotFound: Self { .init(.storageVirtualMachineNotFound) }
    /// You have reached the maximum number of S3 access points attachments allowed for your account in this Amazon Web Services Region, or for the file system. For more information, or to request an increase,  see Service quotas on FSx resources in the FSx for OpenZFS User Guide.
    public static var tooManyAccessPoints: Self { .init(.tooManyAccessPoints) }
    /// The requested operation is not supported for this resource or API.
    public static var unsupportedOperation: Self { .init(.unsupportedOperation) }
    /// No Amazon FSx volumes were found based upon the supplied parameters.
    public static var volumeNotFound: Self { .init(.volumeNotFound) }
}

extension FSxErrorType: AWSServiceErrorType {
    public static let errorCodeMap: [String: AWSErrorShape.Type] = [
        "AccessPointAlreadyOwnedByYou": FSx.AccessPointAlreadyOwnedByYou.self,
        "ActiveDirectoryError": FSx.ActiveDirectoryError.self,
        "BackupBeingCopied": FSx.BackupBeingCopied.self,
        "BackupRestoring": FSx.BackupRestoring.self,
        "IncompatibleParameterError": FSx.IncompatibleParameterError.self,
        "InvalidAccessPoint": FSx.InvalidAccessPoint.self,
        "InvalidNetworkSettings": FSx.InvalidNetworkSettings.self,
        "InvalidRequest": FSx.InvalidRequest.self,
        "NotServiceResourceError": FSx.NotServiceResourceError.self,
        "ResourceDoesNotSupportTagging": FSx.ResourceDoesNotSupportTagging.self,
        "ResourceNotFound": FSx.ResourceNotFound.self,
        "ServiceLimitExceeded": FSx.ServiceLimitExceeded.self,
        "SourceBackupUnavailable": FSx.SourceBackupUnavailable.self,
        "TooManyAccessPoints": FSx.TooManyAccessPoints.self
    ]
}

extension FSxErrorType: Equatable {
    public static func == (lhs: FSxErrorType, rhs: FSxErrorType) -> Bool {
        lhs.error == rhs.error
    }
}

extension FSxErrorType: CustomStringConvertible {
    public var description: String {
        return "\(self.error.rawValue): \(self.message ?? "")"
    }
}
