/**
 *
 */
import OpenApi;
import OpenApi.OpenApiUtil;

extends OpenApi;


init(config: OpenApiUtil.Config){
  super(config);
  @endpointRule = 'regional';
  @endpointMap = {
    'cn-beijing-gov-1' = 'imm-vpc.cn-beijing-gov-1.aliyuncs.com',
  };

  checkConfig(config);
  @endpoint = getEndpoint('imm', @regionId, @endpointRule, @network, @suffix, @endpointMap, @endpoint);
}

function getEndpoint(productId: string, regionId: string, endpointRule: string, network: string, suffix: string, endpointMap: map[string]string, endpoint: string) throws: string{
  if (!$isNull(endpoint)) {
    return endpoint;
  }
  
  if (!$isNull(endpointMap) && !$isNull(endpointMap[regionId])) {
    return endpointMap[regionId];
  }
  return OpenApiUtil.getEndpointRules(productId, regionId, endpointRule, network, suffix);
}

model Address {
  addressLine?: string(name='AddressLine'),
  city?: string(name='City'),
  country?: string(name='Country'),
  district?: string(name='District'),
  language?: string(name='Language'),
  province?: string(name='Province'),
  township?: string(name='Township'),
}

model AddressForStory {
  city?: string(name='City'),
  country?: string(name='Country'),
  district?: string(name='District'),
  province?: string(name='Province'),
  township?: string(name='Township'),
}

model AlgorithmDefinition {
  algorithmDefinitionId?: string(name='AlgorithmDefinitionId', example='8fc6e718-8d19-495f-a510-bcee3c598588'),
  createTime?: string(name='CreateTime', example='2023-05-31T10:19:40.572325888+08:00'),
  customLabels?: [ map[string]string ](name='CustomLabels'),
  description?: string(name='Description', example='test'),
  name?: string(name='Name', example='algoName'),
  ownerId?: string(name='OwnerId', example='user1'),
  projectName?: string(name='ProjectName', example='traningtest'),
  trainingSpecification?: TrainingSpecification(name='TrainingSpecification'),
  updateTime?: string(name='UpdateTime', example='2023-05-31T10:19:40.572325888+08:00'),
}

model Answer {
  content?: string(name='Content', example='你好'),
  references?: [
    ReferenceFile
  ](name='References'),
}

model App {
  appDescription?: string(name='AppDescription'),
  appId?: string(name='AppId'),
  appKey?: string(name='AppKey'),
  appName?: string(name='AppName'),
  appRegion?: long(name='AppRegion'),
  appType?: long(name='AppType'),
  englishName?: string(name='EnglishName'),
  ownerId?: string(name='OwnerId'),
  packageName?: string(name='PackageName'),
}

model AssumeRoleChain {
  chain?: [
    AssumeRoleChainNode
  ](name='Chain'),
  policy?: string(name='Policy'),
}

model AssumeRoleChainNode {
  ownerId?: string(name='OwnerId', description='This parameter is required.', example='1023210024677934'),
  role?: string(name='Role', description='This parameter is required.', example='test-role'),
  type?: string(name='Type', description='This parameter is required.', example='user'),
}

model AudioStream {
  bitrate?: long(name='Bitrate'),
  channelLayout?: string(name='ChannelLayout'),
  channels?: long(name='Channels'),
  codecLongName?: string(name='CodecLongName'),
  codecName?: string(name='CodecName'),
  codecTag?: string(name='CodecTag'),
  codecTagString?: string(name='CodecTagString'),
  codecTimeBase?: string(name='CodecTimeBase'),
  duration?: double(name='Duration'),
  frameCount?: long(name='FrameCount'),
  index?: long(name='Index'),
  language?: string(name='Language'),
  lyric?: string(name='Lyric'),
  sampleFormat?: string(name='SampleFormat'),
  sampleRate?: long(name='SampleRate'),
  startTime?: double(name='StartTime'),
  timeBase?: string(name='TimeBase'),
}

model Binding {
  createTime?: string(name='CreateTime'),
  datasetName?: string(name='DatasetName'),
  phase?: string(name='Phase'),
  projectName?: string(name='ProjectName'),
  reason?: string(name='Reason'),
  state?: string(name='State'),
  URI?: string(name='URI'),
  updateTime?: string(name='UpdateTime'),
}

model Body {
  boundary?: Boundary(name='Boundary'),
  confidence?: float(name='Confidence'),
}

model Boundary {
  height?: long(name='Height'),
  left?: long(name='Left'),
  polygon?: [
    PointInt64
  ](name='Polygon'),
  top?: long(name='Top'),
  width?: long(name='Width'),
}

model Car {
  boundary?: Boundary(name='Boundary'),
  carColor?: string(name='CarColor'),
  carColorConfidence?: double(name='CarColorConfidence'),
  carType?: string(name='CarType'),
  carTypeConfidence?: double(name='CarTypeConfidence'),
  confidence?: double(name='Confidence'),
  licensePlates?: [
    LicensePlate
  ](name='LicensePlates'),
}

model ClusterForReq {
  cover?: {
    figures?: [ 
      {
        figureId?: string(name='FigureId'),
      }
    ](name='Figures'),
  }(name='Cover'),
  customId?: string(name='CustomId'),
  customLabels?: map[string]any(name='CustomLabels'),
  name?: string(name='Name'),
  objectId?: string(name='ObjectId'),
}

model Codes {
  boundary?: Boundary(name='Boundary'),
  confidence?: float(name='Confidence'),
  content?: string(name='Content'),
  type?: string(name='Type'),
}

model ContextualFile {
  contentType?: string(name='ContentType'),
  datasetName?: string(name='DatasetName'),
  elements?: [
    Element
  ](name='Elements'),
  mediaType?: string(name='MediaType'),
  OSSURI?: string(name='OSSURI'),
  objectId?: string(name='ObjectId'),
  ownerId?: string(name='OwnerId'),
  projectName?: string(name='ProjectName'),
  URI?: string(name='URI'),
}

model ContextualMessage {
  content?: string(name='Content', example='你好'),
  files?: [
    ContextualFile
  ](name='Files'),
  role?: string(name='Role', example='user'),
}

model CredentialConfig {
  chain?: [ 
    {
      assumeRoleFor?: string(name='AssumeRoleFor'),
      role?: string(name='Role'),
      roleType?: string(name='RoleType'),
    }
  ](name='Chain'),
  policy?: string(name='Policy'),
  serviceRole?: string(name='ServiceRole'),
}

model CroppingSuggestion {
  aspectRatio?: string(name='AspectRatio'),
  boundary?: Boundary(name='Boundary'),
  confidence?: float(name='Confidence'),
}

model CustomParams {
  name?: string(name='Name', example='Normalize'),
  properties?: [
    Property
  ](name='Properties'),
}

model DataIngestion {
  actions?: [ 
    {
      fastFailPolicy?: FastFailPolicy(name='FastFailPolicy'),
      name?: string(name='Name'),
      parameters?: [ string ](name='Parameters'),
    }
  ](name='Actions'),
  createTime?: string(name='CreateTime'),
  error?: string(name='Error'),
  id?: string(name='Id'),
  input?: Input(name='Input'),
  marker?: string(name='Marker'),
  notification?: {
    endpoint?: string(name='Endpoint'),
    MNS?: MNS(name='MNS'),
    rocketMQ?: RocketMQ(name='RocketMQ'),
    topic?: string(name='Topic'),
  }(name='Notification'),
  phase?: string(name='Phase', example='IncrementalScanning'),
  serviceRole?: string(name='ServiceRole', example='AliyunIMMBatchTriggerRole'),
  state?: string(name='State'),
  statistic?: {
    skipFiles?: long(name='SkipFiles'),
    submitFailure?: long(name='SubmitFailure'),
    submitSuccess?: long(name='SubmitSuccess'),
  }(name='Statistic'),
  tags?: map[string]any(name='Tags'),
  updateTime?: string(name='UpdateTime'),
}

model Dataset {
  bindCount?: long(name='BindCount'),
  createTime?: string(name='CreateTime'),
  datasetMaxBindCount?: long(name='DatasetMaxBindCount'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize'),
  datasetName?: string(name='DatasetName'),
  description?: string(name='Description'),
  fileCount?: long(name='FileCount'),
  projectName?: string(name='ProjectName'),
  templateId?: string(name='TemplateId'),
  totalFileSize?: long(name='TotalFileSize'),
  updateTime?: string(name='UpdateTime'),
}

model DatasetTaskStatus {
  lastSucceededTime?: string(name='LastSucceededTime', example='2024-06-29T14:50:13.011643661+08:00'),
  startTime?: string(name='StartTime', example='2024-06-29T14:50:13.011643661+08:00'),
  status?: string(name='Status', example='Succeeded'),
}

model Element {
  elementContents?: [
    ElementContent
  ](name='ElementContents'),
  elementRelations?: [
    ElementRelation
  ](name='ElementRelations'),
  elementType?: string(name='ElementType'),
  objectId?: string(name='ObjectId'),
  semanticSimilarity?: float(name='SemanticSimilarity'),
}

model ElementContent {
  content?: string(name='Content'),
  timeRange?: [ long ](name='TimeRange'),
  type?: string(name='Type'),
  URL?: string(name='URL'),
}

model ElementRelation {
  objectId?: string(name='ObjectId'),
  type?: string(name='Type'),
}

model FastFailPolicy {
  action?: string(name='Action', example='abort'),
}

model Figure {
  age?: long(name='Age'),
  ageSD?: float(name='AgeSD'),
  attractive?: float(name='Attractive'),
  beard?: string(name='Beard'),
  beardConfidence?: float(name='BeardConfidence'),
  boundary?: Boundary(name='Boundary'),
  emotion?: string(name='Emotion'),
  emotionConfidence?: float(name='EmotionConfidence'),
  faceQuality?: float(name='FaceQuality'),
  figureClusterConfidence?: float(name='FigureClusterConfidence'),
  figureClusterId?: string(name='FigureClusterId'),
  figureConfidence?: float(name='FigureConfidence'),
  figureId?: string(name='FigureId'),
  figureType?: string(name='FigureType'),
  gender?: string(name='Gender'),
  genderConfidence?: float(name='GenderConfidence'),
  glasses?: string(name='Glasses'),
  glassesConfidence?: float(name='GlassesConfidence'),
  hat?: string(name='Hat'),
  hatConfidence?: float(name='HatConfidence'),
  headPose?: HeadPose(name='HeadPose'),
  mask?: string(name='Mask'),
  maskConfidence?: float(name='MaskConfidence'),
  mouth?: string(name='Mouth'),
  mouthConfidence?: float(name='MouthConfidence'),
  sharpness?: float(name='Sharpness'),
}

model FigureCluster {
  averageAge?: float(name='AverageAge'),
  cover?: File(name='Cover'),
  createTime?: string(name='CreateTime'),
  customId?: string(name='CustomId'),
  customLabels?: map[string]any(name='CustomLabels'),
  datasetName?: string(name='DatasetName'),
  faceCount?: long(name='FaceCount'),
  gender?: string(name='Gender'),
  imageCount?: long(name='ImageCount'),
  maxAge?: float(name='MaxAge'),
  metaLockVersion?: long(name='MetaLockVersion'),
  minAge?: float(name='MinAge'),
  name?: string(name='Name'),
  objectId?: string(name='ObjectId'),
  objectType?: string(name='ObjectType'),
  ownerId?: string(name='OwnerId'),
  projectName?: string(name='ProjectName'),
  updateTime?: string(name='UpdateTime'),
  videoCount?: long(name='VideoCount'),
}

model FigureClusterForReq {
  cover?: {
    figures?: [ 
      {
        figureId?: string(name='FigureId'),
      }
    ](name='Figures'),
  }(name='Cover'),
  customId?: string(name='CustomId'),
  customLabels?: map[string]any(name='CustomLabels'),
  metaLockVersion?: long(name='MetaLockVersion'),
  name?: string(name='Name'),
  objectId?: string(name='ObjectId'),
}

model File {
  accessControlAllowOrigin?: string(name='AccessControlAllowOrigin'),
  accessControlRequestMethod?: string(name='AccessControlRequestMethod'),
  addresses?: [
    Address
  ](name='Addresses'),
  album?: string(name='Album'),
  albumArtist?: string(name='AlbumArtist'),
  artist?: string(name='Artist'),
  audioCovers?: [
    Image
  ](name='AudioCovers'),
  audioStreams?: [
    AudioStream
  ](name='AudioStreams'),
  bitrate?: long(name='Bitrate'),
  cacheControl?: string(name='CacheControl'),
  composer?: string(name='Composer'),
  contentDisposition?: string(name='ContentDisposition'),
  contentEncoding?: string(name='ContentEncoding'),
  contentLanguage?: string(name='ContentLanguage'),
  contentMd5?: string(name='ContentMd5'),
  contentType?: string(name='ContentType'),
  createTime?: string(name='CreateTime'),
  croppingSuggestions?: [
    CroppingSuggestion
  ](name='CroppingSuggestions'),
  customId?: string(name='CustomId'),
  customLabels?: map[string]any(name='CustomLabels'),
  datasetName?: string(name='DatasetName'),
  duration?: double(name='Duration'),
  ETag?: string(name='ETag'),
  EXIF?: string(name='EXIF'),
  elements?: [
    Element
  ](name='Elements'),
  figureCount?: long(name='FigureCount'),
  figures?: [
    Figure
  ](name='Figures'),
  fileAccessTime?: string(name='FileAccessTime'),
  fileCreateTime?: string(name='FileCreateTime'),
  fileHash?: string(name='FileHash'),
  fileModifiedTime?: string(name='FileModifiedTime'),
  filename?: string(name='Filename'),
  formatLongName?: string(name='FormatLongName'),
  formatName?: string(name='FormatName'),
  imageHeight?: long(name='ImageHeight'),
  imageScore?: ImageScore(name='ImageScore'),
  imageWidth?: long(name='ImageWidth'),
  labels?: [
    Label
  ](name='Labels'),
  language?: string(name='Language'),
  latLong?: string(name='LatLong'),
  mediaType?: string(name='MediaType'),
  OCRContents?: [
    OCRContents
  ](name='OCRContents'),
  OCRTexts?: string(name='OCRTexts'),
  OSSCRC64?: string(name='OSSCRC64'),
  OSSDeleteMarker?: string(name='OSSDeleteMarker'),
  OSSExpiration?: string(name='OSSExpiration'),
  OSSObjectType?: string(name='OSSObjectType'),
  OSSStorageClass?: string(name='OSSStorageClass'),
  OSSTagging?: map[string]any(name='OSSTagging'),
  OSSTaggingCount?: long(name='OSSTaggingCount'),
  OSSURI?: string(name='OSSURI'),
  OSSUserMeta?: map[string]any(name='OSSUserMeta'),
  OSSVersionId?: string(name='OSSVersionId'),
  objectACL?: string(name='ObjectACL'),
  objectId?: string(name='ObjectId'),
  objectStatus?: string(name='ObjectStatus'),
  objectType?: string(name='ObjectType'),
  orientation?: long(name='Orientation'),
  ownerId?: string(name='OwnerId'),
  pageCount?: long(name='PageCount'),
  performer?: string(name='Performer'),
  produceTime?: string(name='ProduceTime'),
  programCount?: long(name='ProgramCount'),
  projectName?: string(name='ProjectName'),
  reason?: string(name='Reason'),
  sceneElements?: [
    SceneElement
  ](name='SceneElements'),
  semanticTypes?: [ string ](name='SemanticTypes'),
  serverSideDataEncryption?: string(name='ServerSideDataEncryption'),
  serverSideEncryption?: string(name='ServerSideEncryption'),
  serverSideEncryptionCustomerAlgorithm?: string(name='ServerSideEncryptionCustomerAlgorithm'),
  serverSideEncryptionKeyId?: string(name='ServerSideEncryptionKeyId'),
  size?: long(name='Size'),
  startTime?: double(name='StartTime'),
  streamCount?: long(name='StreamCount'),
  subtitles?: [
    SubtitleStream
  ](name='Subtitles'),
  timezone?: string(name='Timezone'),
  title?: string(name='Title'),
  travelClusterId?: string(name='TravelClusterId'),
  URI?: string(name='URI'),
  updateTime?: string(name='UpdateTime'),
  videoHeight?: long(name='VideoHeight'),
  videoStreams?: [
    VideoStream
  ](name='VideoStreams'),
  videoWidth?: long(name='VideoWidth'),
}

model FileSmartCluster {
  similarity?: float(name='Similarity'),
  smartClusterId?: string(name='SmartClusterId'),
}

model FunctionCall {
  arguments?: string(name='Arguments'),
  name?: string(name='Name', description='This parameter is required.'),
}

model HeadPose {
  pitch?: float(name='Pitch'),
  roll?: float(name='Roll'),
  yaw?: float(name='Yaw'),
}

model Hyperparameters {
  backupInterval?: long(name='BackupInterval', example='1'),
  batchSize?: long(name='BatchSize', example='32'),
  dataLoaderWorkers?: long(name='DataLoaderWorkers', example='4'),
  evaluator?: CustomParams(name='Evaluator', description='This parameter is required.'),
  inputSize?: [ long ](name='InputSize', description='This parameter is required.'),
  maxEpoch?: long(name='MaxEpoch', example='10'),
  optimization?: Optimization(name='Optimization'),
  schedule?: Schedule(name='Schedule'),
}

model Image {
  croppingSuggestions?: [
    CroppingSuggestion
  ](name='CroppingSuggestions'),
  EXIF?: string(name='EXIF'),
  imageHeight?: long(name='ImageHeight'),
  imageScore?: ImageScore(name='ImageScore'),
  imageWidth?: long(name='ImageWidth'),
  OCRContents?: [
    OCRContents
  ](name='OCRContents'),
}

model ImageScore {
  overallQualityScore?: float(name='OverallQualityScore'),
}

model Input {
  OSS?: InputOSS(name='OSS'),
}

model InputFile {
  addresses?: [
    Address
  ](name='Addresses'),
  album?: string(name='Album'),
  albumArtist?: string(name='AlbumArtist'),
  artist?: string(name='Artist'),
  composer?: string(name='Composer'),
  contentType?: string(name='ContentType'),
  customId?: string(name='CustomId'),
  customLabels?: map[string]any(name='CustomLabels'),
  figures?: [ 
    {
      figureClusterId?: string(name='FigureClusterId'),
      figureId?: string(name='FigureId'),
      figureType?: string(name='FigureType'),
    }
  ](name='Figures'),
  fileHash?: string(name='FileHash'),
  labels?: [
    Label
  ](name='Labels'),
  latLong?: string(name='LatLong'),
  mediaType?: string(name='MediaType'),
  OSSURI?: string(name='OSSURI'),
  performer?: string(name='Performer'),
  produceTime?: string(name='ProduceTime'),
  title?: string(name='Title'),
  URI?: string(name='URI'),
}

model InputOSS {
  bucket?: string(name='Bucket', description='This parameter is required.'),
  matchExpressions?: [ string ](name='MatchExpressions'),
  prefix?: string(name='Prefix'),
}

model KdtreeOption {
  compressionLevel?: int32(name='CompressionLevel'),
  libraryName?: string(name='LibraryName', example='draco'),
  quantizationBits?: int32(name='QuantizationBits'),
}

model KeyValuePair {
  key?: string(name='Key'),
  value?: string(name='Value'),
}

model Label {
  centricScore?: float(name='CentricScore'),
  labelConfidence?: float(name='LabelConfidence'),
  labelLevel?: long(name='LabelLevel'),
  labelName?: string(name='LabelName'),
  language?: string(name='Language'),
  parentLabelName?: string(name='ParentLabelName'),
}

model LicensePlate {
  boundary?: Boundary(name='Boundary'),
  confidence?: double(name='Confidence'),
  content?: string(name='Content', example='川A0123'),
}

model LocationDateCluster {
  addresses?: [
    Address
  ](name='Addresses'),
  createTime?: string(name='CreateTime'),
  customId?: string(name='CustomId'),
  customLabels?: map[string]any(name='CustomLabels'),
  locationDateClusterEndTime?: string(name='LocationDateClusterEndTime'),
  locationDateClusterLevel?: string(name='LocationDateClusterLevel'),
  locationDateClusterStartTime?: string(name='LocationDateClusterStartTime'),
  objectId?: string(name='ObjectId'),
  title?: string(name='Title'),
  updateTime?: string(name='UpdateTime'),
}

model MNS {
  topicName?: string(name='TopicName'),
}

model Message {
  assistantType?: string(name='AssistantType'),
  content?: string(name='Content'),
  createTime?: string(name='CreateTime'),
  datasetName?: string(name='DatasetName'),
  language?: string(name='Language'),
  regenerate?: boolean(name='Regenerate'),
  reply?: string(name='Reply'),
  score?: double(name='Score'),
  sourceURI?: string(name='SourceURI'),
  suggestion?: string(name='Suggestion'),
  tone?: string(name='Tone'),
  topic?: string(name='Topic'),
}

model MetaData {
  identifier?: string(name='Identifier', example='detection'),
  provider?: string(name='Provider', example='imm'),
  version?: string(name='Version', example='v1'),
}

model ModelSpecification {
  metaData?: MetaData(name='MetaData', description='This parameter is required.'),
  spec?: Spec(name='Spec', description='This parameter is required.'),
}

model Notification {
  extendedMessageURI?: string(name='ExtendedMessageURI'),
  MNS?: MNS(name='MNS'),
  rocketMQ?: RocketMQ(name='RocketMQ'),
}

model OCRContents {
  boundary?: Boundary(name='Boundary'),
  confidence?: float(name='Confidence'),
  contents?: string(name='Contents'),
  language?: string(name='Language'),
}

model OctreeOption {
  doVoxelGridDownDownSampling?: boolean(name='DoVoxelGridDownDownSampling', example='false'),
  libraryName?: string(name='LibraryName', example='pcl'),
  octreeResolution?: double(name='OctreeResolution', example='0.01'),
  pointResolution?: double(name='PointResolution', example='0.01'),
}

model Optimization {
  learningRate?: float(name='LearningRate', example='0.01'),
  optimizer?: string(name='Optimizer', example='SGD'),
}

model PointInt64 {
  x?: long(name='X'),
  y?: long(name='Y'),
}

model Project {
  createTime?: string(name='CreateTime'),
  datasetCount?: long(name='DatasetCount'),
  datasetMaxBindCount?: long(name='DatasetMaxBindCount'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize'),
  description?: string(name='Description'),
  engineConcurrency?: long(name='EngineConcurrency'),
  fileCount?: long(name='FileCount'),
  projectMaxDatasetCount?: long(name='ProjectMaxDatasetCount'),
  projectName?: string(name='ProjectName'),
  projectQueriesPerSecond?: long(name='ProjectQueriesPerSecond'),
  serviceRole?: string(name='ServiceRole'),
  tags?: [ 
    {
      tagKey?: string(name='TagKey'),
      tagValue?: string(name='TagValue'),
    }
  ](name='Tags'),
  templateId?: string(name='TemplateId'),
  totalFileSize?: long(name='TotalFileSize'),
  updateTime?: string(name='UpdateTime'),
}

model Property {
  itemsType?: string(name='ItemsType', example='float'),
  name?: string(name='Name', example='channels'),
  value?: string(name='Value', example='[40, 80, 160, 320]'),
  valueType?: string(name='ValueType', example='array'),
}

model ReferenceFile {
  datasetName?: string(name='DatasetName', example='test-dataset'),
  objectId?: string(name='ObjectId', example='75d5de2c50754e3dadd5c35dbca5f9949369e37eb342a73821f690c94c36c7f7'),
  projectName?: string(name='ProjectName', example='test-project'),
  URI?: string(name='URI', example='oss://test-bucket/test-object.jpg'),
}

model RegionType {
  localName?: string(name='LocalName'),
  regionId?: string(name='RegionId'),
}

model Resource {
  CPU?: long(name='CPU', example='2'),
  ECSInstance?: string(name='ECSInstance', example='ecs.gn5i-c2g1.large'),
  GPUModel?: string(name='GPUModel', example='string	NVIDIA_P4'),
  GPUNum?: long(name='GPUNum', example='1'),
  name?: string(name='Name', example='string	ecs.gn5i-c2g1.large-2vCPU-8GB-1*NVIDIA_P4'),
  RAM?: long(name='RAM', example='8'),
}

model RocketMQ {
  instanceId?: string(name='InstanceId'),
  topicName?: string(name='TopicName'),
}

model Row {
  customLabels?: [
    KeyValuePair
  ](name='CustomLabels'),
  URI?: string(name='URI'),
}

model Runtime {
  hyperparameters?: Hyperparameters(name='Hyperparameters', description='This parameter is required.'),
  resource?: Resource(name='Resource', description='This parameter is required.'),
}

model SceneElement {
  frameTimes?: [ long ](name='FrameTimes'),
  timeRange?: [ long ](name='TimeRange'),
}

model Schedule {
  gamma?: float(name='Gamma', example='0.97'),
  LRScheduler?: string(name='LRScheduler', example='StepLR'),
  stepSize?: long(name='StepSize', example='1'),
}

model SimilarImage {
  imageScore?: double(name='ImageScore'),
  URI?: string(name='URI'),
}

model SimilarImageCluster {
  createTime?: string(name='CreateTime'),
  customLabels?: map[string]any(name='CustomLabels'),
  files?: [
    SimilarImage
  ](name='Files'),
  objectId?: string(name='ObjectId'),
  updateTime?: string(name='UpdateTime'),
}

model SimpleQuery {
  field?: string(name='Field'),
  operation?: string(name='Operation', example='eq / gt / gte / lt / lte / match / prefix / and / or / not'),
  subQueries?: [
    SimpleQuery
  ](name='SubQueries'),
  value?: string(name='Value'),
}

model SmartCluster {
  createTime?: string(name='CreateTime'),
  datasetName?: string(name='DatasetName'),
  description?: string(name='Description'),
  name?: string(name='Name'),
  objectId?: string(name='ObjectId'),
  objectStatus?: string(name='ObjectStatus'),
  objectType?: string(name='ObjectType'),
  ownerId?: string(name='OwnerId'),
  projectName?: string(name='ProjectName'),
  rule?: SmartClusterRule(name='Rule'),
  updateTime?: string(name='UpdateTime'),
}

model SmartClusterRule {
  keywords?: [ string ](name='Keywords'),
  sensitivity?: float(name='Sensitivity', example='0.5'),
}

model Spec {
  backbone?: CustomParams(name='Backbone'),
  classNum?: long(name='ClassNum', example='10'),
  head?: CustomParams(name='Head'),
  inputChannel?: long(name='InputChannel', example='3'),
  loss?: CustomParams(name='Loss'),
  name?: string(name='Name', description='This parameter is required.', example='ClsResNet'),
  neck?: CustomParams(name='Neck'),
  numLandmarks?: long(name='NumLandmarks', example='5'),
  pretrainedPath?: string(name='PretrainedPath', example='oss://bucket/abc/xxx.json'),
}

model Story {
  addresses?: [
    Address
  ](name='Addresses'),
  cover?: File(name='Cover'),
  createTime?: string(name='CreateTime'),
  customId?: string(name='CustomId'),
  customLabels?: map[string]any(name='CustomLabels'),
  datasetName?: string(name='DatasetName'),
  figureClusterIds?: [ string ](name='FigureClusterIds'),
  files?: [
    File
  ](name='Files'),
  objectId?: string(name='ObjectId'),
  objectType?: string(name='ObjectType'),
  ownerId?: string(name='OwnerId'),
  projectName?: string(name='ProjectName'),
  storyEndTime?: string(name='StoryEndTime'),
  storyName?: string(name='StoryName'),
  storyStartTime?: string(name='StoryStartTime'),
  storySubType?: string(name='StorySubType'),
  storyType?: string(name='StoryType'),
  updateTime?: string(name='UpdateTime'),
}

model StreamOptions {
  incrementalOutput?: boolean(name='IncrementalOutput'),
}

model SubtitleStream {
  bitrate?: long(name='Bitrate'),
  codecLongName?: string(name='CodecLongName'),
  codecName?: string(name='CodecName'),
  codecTag?: string(name='CodecTag'),
  codecTagString?: string(name='CodecTagString'),
  content?: string(name='Content'),
  duration?: double(name='Duration'),
  height?: long(name='Height'),
  index?: long(name='Index'),
  language?: string(name='Language'),
  startTime?: double(name='StartTime'),
  width?: long(name='Width'),
}

model TargetAudio {
  disableAudio?: boolean(name='DisableAudio'),
  filterAudio?: {
    mixing?: boolean(name='Mixing'),
  }(name='FilterAudio'),
  stream?: [ long ](name='Stream'),
  transcodeAudio?: {
    bitrate?: int32(name='Bitrate'),
    bitrateOption?: string(name='BitrateOption'),
    bitsPerSample?: int32(name='BitsPerSample'),
    channel?: int32(name='Channel'),
    codec?: string(name='Codec'),
    quality?: int32(name='Quality'),
    sampleRate?: int32(name='SampleRate'),
    sampleRateOption?: string(name='SampleRateOption'),
  }(name='TranscodeAudio'),
}

model TargetImage {
  animations?: [ 
    {
      format?: string(name='Format', description='This parameter is required.'),
      frameRate?: double(name='FrameRate'),
      height?: double(name='Height'),
      interval?: double(name='Interval'),
      number?: int32(name='Number'),
      scaleType?: string(name='ScaleType'),
      startTime?: double(name='StartTime'),
      URI?: string(name='URI', description='This parameter is required.'),
      width?: double(name='Width'),
    }
  ](name='Animations'),
  snapshots?: [ 
    {
      format?: string(name='Format', description='This parameter is required.'),
      height?: double(name='Height'),
      interval?: double(name='Interval'),
      mode?: string(name='Mode'),
      number?: int32(name='Number'),
      scaleType?: string(name='ScaleType'),
      startTime?: double(name='StartTime'),
      threshold?: int32(name='Threshold'),
      URI?: string(name='URI', description='This parameter is required.'),
      width?: double(name='Width'),
    }
  ](name='Snapshots'),
  sprites?: [ 
    {
      format?: string(name='Format', description='This parameter is required.'),
      interval?: double(name='Interval'),
      margin?: int32(name='Margin'),
      mode?: string(name='Mode'),
      number?: int32(name='Number'),
      pad?: int32(name='Pad'),
      scaleHeight?: float(name='ScaleHeight'),
      scaleType?: string(name='ScaleType'),
      scaleWidth?: float(name='ScaleWidth'),
      startTime?: double(name='StartTime'),
      threshold?: int32(name='Threshold'),
      tileHeight?: int32(name='TileHeight'),
      tileWidth?: int32(name='TileWidth'),
      URI?: string(name='URI', description='This parameter is required.'),
    }
  ](name='Sprites'),
}

model TargetSubtitle {
  disableSubtitle?: boolean(name='DisableSubtitle'),
  extractSubtitle?: {
    format?: string(name='Format'),
    URI?: string(name='URI'),
  }(name='ExtractSubtitle'),
  stream?: [ int32 ](name='Stream'),
}

model TargetVideo {
  disableVideo?: boolean(name='DisableVideo'),
  filterVideo?: {
    delogos?: [ 
      {
        duration?: double(name='Duration'),
        dx?: float(name='Dx'),
        dy?: float(name='Dy'),
        height?: float(name='Height'),
        referPos?: string(name='ReferPos'),
        startTime?: double(name='StartTime'),
        width?: float(name='Width'),
      }
    ](name='Delogos'),
    watermarks?: [ 
      {
        borderColor?: string(name='BorderColor'),
        borderWidth?: int32(name='BorderWidth'),
        content?: string(name='Content'),
        duration?: double(name='Duration'),
        dx?: float(name='Dx'),
        dy?: float(name='Dy'),
        fontApha?: float(name='FontApha'),
        fontColor?: string(name='FontColor'),
        fontName?: string(name='FontName'),
        fontSize?: int32(name='FontSize'),
        height?: float(name='Height'),
        referPos?: string(name='ReferPos'),
        startTime?: double(name='StartTime'),
        type?: string(name='Type'),
        URI?: string(name='URI'),
        width?: float(name='Width'),
      }
    ](name='Watermarks'),
  }(name='FilterVideo'),
  stream?: [ int32 ](name='Stream'),
  transcodeVideo?: {
    adaptiveResolutionDirection?: boolean(name='AdaptiveResolutionDirection'),
    BFrames?: int32(name='BFrames'),
    bitrate?: int32(name='Bitrate'),
    bitrateOption?: string(name='BitrateOption'),
    bufferSize?: int32(name='BufferSize'),
    CRF?: float(name='CRF'),
    codec?: string(name='Codec'),
    frameRate?: float(name='FrameRate'),
    frameRateOption?: string(name='FrameRateOption'),
    GOPSize?: int32(name='GOPSize'),
    maxBitrate?: int32(name='MaxBitrate'),
    pixelFormat?: string(name='PixelFormat'),
    refs?: int32(name='Refs'),
    resolution?: string(name='Resolution'),
    resolutionOption?: string(name='ResolutionOption'),
    rotation?: int32(name='Rotation'),
    scaleType?: string(name='ScaleType'),
  }(name='TranscodeVideo'),
}

model TaskInfo {
  code?: string(name='Code'),
  endTime?: string(name='EndTime'),
  message?: string(name='Message'),
  progress?: int32(name='Progress'),
  startTime?: string(name='StartTime'),
  status?: string(name='Status'),
  tags?: map[string]any(name='Tags'),
  taskId?: string(name='TaskId'),
  taskRequestDefinition?: string(name='TaskRequestDefinition'),
  taskType?: string(name='TaskType'),
  userData?: string(name='UserData'),
}

model TimeRange {
  end?: string(name='End'),
  start?: string(name='Start'),
}

model ToolCall {
  function?: FunctionCall(name='Function'),
  type?: string(name='Type'),
}

model TrainingSpecification {
  datasetName?: string(name='DatasetName'),
  endpoint?: string(name='Endpoint', description='This parameter is required.'),
  modelSpecification?: ModelSpecification(name='ModelSpecification', description='This parameter is required.'),
  runtime?: Runtime(name='Runtime', description='This parameter is required.'),
  sourceURI?: string(name='SourceURI', description='This parameter is required.', example='oss://imm-alg-dataset-bj/cifar10/test_index.json'),
  targetURI?: string(name='TargetURI', description='This parameter is required.', example='oss://imm-alg-dataset-bj/model_training_test/'),
  transforms?: [
    CustomParams
  ](name='Transforms'),
  validationSourceURI?: string(name='ValidationSourceURI', example='oss://imm-alg-dataset-bj/cifar10/test_index.json'),
  validationSplit?: float(name='ValidationSplit', example='0.95'),
}

model TrimPolicy {
  disableDeleteEmptyCell?: boolean(name='DisableDeleteEmptyCell'),
  disableDeleteRepeatedStyle?: boolean(name='DisableDeleteRepeatedStyle'),
  disableDeleteUnusedPicture?: boolean(name='DisableDeleteUnusedPicture'),
  disableDeleteUnusedShape?: boolean(name='DisableDeleteUnusedShape'),
}

model VideoStream {
  averageFrameRate?: string(name='AverageFrameRate'),
  bitDepth?: long(name='BitDepth'),
  bitrate?: long(name='Bitrate'),
  codecLongName?: string(name='CodecLongName'),
  codecName?: string(name='CodecName'),
  codecTag?: string(name='CodecTag'),
  codecTagString?: string(name='CodecTagString'),
  codecTimeBase?: string(name='CodecTimeBase'),
  colorPrimaries?: string(name='ColorPrimaries'),
  colorRange?: string(name='ColorRange'),
  colorSpace?: string(name='ColorSpace'),
  colorTransfer?: string(name='ColorTransfer'),
  displayAspectRatio?: string(name='DisplayAspectRatio'),
  duration?: double(name='Duration'),
  frameCount?: long(name='FrameCount'),
  frameRate?: string(name='FrameRate'),
  hasBFrames?: long(name='HasBFrames'),
  height?: long(name='Height'),
  index?: long(name='Index'),
  language?: string(name='Language'),
  level?: long(name='Level'),
  pixelFormat?: string(name='PixelFormat'),
  profile?: string(name='Profile'),
  rotate?: string(name='Rotate'),
  sampleAspectRatio?: string(name='SampleAspectRatio'),
  startTime?: double(name='StartTime'),
  timeBase?: string(name='TimeBase'),
  width?: long(name='Width'),
}

model WebofficePermission {
  copy?: boolean(name='Copy'),
  export?: boolean(name='Export'),
  history?: boolean(name='History'),
  print?: boolean(name='Print'),
  readonly?: boolean(name='Readonly'),
  rename?: boolean(name='Rename'),
}

model WebofficeUser {
  avatar?: string(name='Avatar'),
  id?: string(name='Id'),
  name?: string(name='Name'),
}

model WebofficeWatermark {
  fillStyle?: string(name='FillStyle', example='rgba(192, 192, 192, 0.6)'),
  font?: string(name='Font', example='bold 20px Serif'),
  horizontal?: long(name='Horizontal'),
  rotate?: float(name='Rotate'),
  type?: long(name='Type'),
  value?: string(name='Value'),
  vertical?: long(name='Vertical'),
}

model AddImageMosaicRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you do not have special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  imageFormat?: string(name='ImageFormat', description='The encoding of the output image. By default, the output image uses the same encoding as the input image. Valid values: jpg, png, and webp.', example='jpg'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  quality?: int32(name='Quality', description='The quality of the output image. This parameter applies only to JPG and WebP images. Valid values: 0 to 100. Default value: 80.', example='80'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the input image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

Supported formats of input images include JPG, PNG, TIFF, JP2, and BMP.

This parameter is required.', example='oss://examplebucket/sampleobject.jpg'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the output image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://examplebucket/outputImage.jpg'),
  targets?: [ 
    {
      blurRadius?: int32(name='BlurRadius', description='The radius of the Gaussian blur. Valid values: 1 to 50. Default value: 3. Unit: pixels.

>  This parameter takes effect only for a Gaussian blur.', example='3'),
      boundary?: {
        height?: float(name='Height', description='The height of the bounding box. The value can be an integer greater than or equal to 0 or a decimal within the range of [0,1):

*   An integer value greater than or equal to 0 indicates the height of the bounding box in pixels.
*   A decimal value within the range of [0,1) indicates the height of the bounding box as a ratio of its height to the image height.

This parameter is required.', example='200'),
        referPos?: string(name='ReferPos', description='The reference position of the bounding box on the image. Valid values:

*   topright: the upper-right corner.
*   topleft: the upper-left corner. This is the default value.
*   bottomright: the lower-right corner.
*   bottomleft: the lower-left corner.', example='topleft'),
        width?: float(name='Width', description='The width of the bounding box. The value can be an integer greater than or equal to 0 or a decimal within the range of [0,1):

*   An integer value greater than or equal to 0 indicates the width of the bounding box in pixels.
*   A decimal value within the range of [0,1) indicates the width of the bounding box as a ratio of its width to the image width.

This parameter is required.', example='200'),
        x?: float(name='X', description='The horizontal offset relative to the reference position. The value can be an integer greater than or equal to 0 or a decimal within the range of [0,1):

*   An integer value greater than or equal to 0 indicates the horizontal offset in pixels.
*   A decimal value within the range of [0,1) indicates the horizontal offset as a ratio of the offset to the image width.

This parameter is required.', example='0'),
        y?: float(name='Y', description='The vertical offset relative to the reference position. The value can be an integer greater than or equal to 0 or a decimal within the range of [0,1):

*   An integer value greater than or equal to 0 indicates the vertical offset in pixels.
*   A decimal value within the range of [0,1) indicates the vertical offset as a ratio of the offset to the image height.

This parameter is required.', example='0'),
      }(name='Boundary', description='The position of the bounding box.

This parameter is required.'),
      color?: string(name='Color', description='The color of the color shape. You can specify a color by using a color code such as`#RRGGBB` or preset color names such as `red` and `white`. The default value is #FFFFFF, which is white.

>  This parameter takes effect only for solid color shapes.', example='#FFFFFF'),
      mosaicRadius?: int32(name='MosaicRadius', description='The radius of the mosaic. Default value: 5. Unit: pixels.

>  This parameter does not take effect for Gaussian blurs and solid color shapes.', example='5'),
      sigma?: int32(name='Sigma', description='The standard deviation of the Gaussian blur. The value must be greater than 0. Default value: 5.

>  This parameter takes effect only for a Gaussian blur.', example='5'),
      type?: string(name='Type', description='The type of the mosaic effect. Valid values:

*   square: squares.
*   diamond: diamonds.
*   hexagon: hexagons.
*   blur: Gaussian blurs.
*   pure: solid color shapes.

This parameter is required.', example='square'),
    }
  ](name='Targets', description='The bounding boxes and processing parameters.

This parameter is required.'),
}

model AddImageMosaicShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you do not have special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  imageFormat?: string(name='ImageFormat', description='The encoding of the output image. By default, the output image uses the same encoding as the input image. Valid values: jpg, png, and webp.', example='jpg'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  quality?: int32(name='Quality', description='The quality of the output image. This parameter applies only to JPG and WebP images. Valid values: 0 to 100. Default value: 80.', example='80'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the input image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

Supported formats of input images include JPG, PNG, TIFF, JP2, and BMP.

This parameter is required.', example='oss://examplebucket/sampleobject.jpg'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the output image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://examplebucket/outputImage.jpg'),
  targetsShrink?: string(name='Targets', description='The bounding boxes and processing parameters.

This parameter is required.'),
}

model AddImageMosaicResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FF*****'),
}

model AddImageMosaicResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: AddImageMosaicResponseBody(name='body'),
}

/**
 * @summary Adds mosaics, Gaussian blurs, or solid color shapes to blur one or more areas of an image for privacy protection and saves the output image to the specified path in Object Storage Service (OSS).
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 * *   The operation accepts JPG and PNG images with a maximum side length of 30,000 pixels and a total of up to 250 million pixels.
 *
 * @param tmpReq AddImageMosaicRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return AddImageMosaicResponse
 */
async function addImageMosaicWithOptions(tmpReq: AddImageMosaicRequest, runtime: $RuntimeOptions): AddImageMosaicResponse {
  tmpReq.validate();
  var request = new AddImageMosaicShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.targets)) {
    request.targetsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.targets, 'Targets', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.imageFormat)) {
    query['ImageFormat'] = request.imageFormat;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.quality)) {
    query['Quality'] = request.quality;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.targetsShrink)) {
    query['Targets'] = request.targetsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'AddImageMosaic',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Adds mosaics, Gaussian blurs, or solid color shapes to blur one or more areas of an image for privacy protection and saves the output image to the specified path in Object Storage Service (OSS).
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 * *   The operation accepts JPG and PNG images with a maximum side length of 30,000 pixels and a total of up to 250 million pixels.
 *
 * @param request AddImageMosaicRequest
 * @return AddImageMosaicResponse
 */
async function addImageMosaic(request: AddImageMosaicRequest): AddImageMosaicResponse {
  var runtime = new $RuntimeOptions{};
  return addImageMosaicWithOptions(request, runtime);
}

model AddStoryFilesRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='test-dataset'),
  files?: [ 
    {
      URI?: string(name='URI', description='The URI of the object.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
    }
  ](name='Files', description='The objects that you want to add.

This parameter is required.'),
  objectId?: string(name='ObjectId', description='The ID of the story.

This parameter is required.', example='testid'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='test-project'),
}

model AddStoryFilesShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='test-dataset'),
  filesShrink?: string(name='Files', description='The objects that you want to add.

This parameter is required.'),
  objectId?: string(name='ObjectId', description='The ID of the story.

This parameter is required.', example='testid'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='test-project'),
}

model AddStoryFilesResponseBody = {
  files?: [ 
    {
      errorCode?: string(name='ErrorCode', description='The error code.', example='ResourceNotFound'),
      errorMessage?: string(name='ErrorMessage', description='The error message that is returned.', example='The specified resource %s is not found.'),
      URI?: string(name='URI', description='The URI of the object.

The OSS URI follows the `oss://{bucketname}/{objectname}` format, where `bucketname` is the name of the bucket in the same region as the current project and `objectname` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
    }
  ](name='Files', description='The objects that were added.'),
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model AddStoryFilesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: AddStoryFilesResponseBody(name='body'),
}

/**
 * @summary Adds objects to a story.
 *
 * @param tmpReq AddStoryFilesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return AddStoryFilesResponse
 */
async function addStoryFilesWithOptions(tmpReq: AddStoryFilesRequest, runtime: $RuntimeOptions): AddStoryFilesResponse {
  tmpReq.validate();
  var request = new AddStoryFilesShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.files)) {
    request.filesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.files, 'Files', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.datasetName)) {
    body['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.filesShrink)) {
    body['Files'] = request.filesShrink;
  }
  if (!$isNull(request.objectId)) {
    body['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'AddStoryFiles',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Adds objects to a story.
 *
 * @param request AddStoryFilesRequest
 * @return AddStoryFilesResponse
 */
async function addStoryFiles(request: AddStoryFilesRequest): AddStoryFilesResponse {
  var runtime = new $RuntimeOptions{};
  return addStoryFilesWithOptions(request, runtime);
}

model AttachOSSBucketRequest {
  description?: string(name='Description', description='The description of the binding. The description must be 1 to 128 characters in length. By default, no description is applied.', example='test-attachment'),
  OSSBucket?: string(name='OSSBucket', description='The name of the OSS bucket in the same region as the project.

This parameter is required.', example='examplebucket'),
  projectName?: string(name='ProjectName', description='The name of the project. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
}

model AttachOSSBucketResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='5F74C5C9-5AC0-49F9-914D-E01589D3****'),
}

model AttachOSSBucketResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: AttachOSSBucketResponseBody(name='body'),
}

/**
 * @summary Binds an Object Storage Service (OSS) bucket to the specified project. The binding enables you to use IMM features by using the x-oss-process parameter.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   To use data processing capabilities of IMM based on the x-oss-process parameter, you must bind an OSS bucket to an IMM project. For more information, see [x-oss-process](https://help.aliyun.com/document_detail/2391270.html).
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 *
 * @param request AttachOSSBucketRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return AttachOSSBucketResponse
 */
async function attachOSSBucketWithOptions(request: AttachOSSBucketRequest, runtime: $RuntimeOptions): AttachOSSBucketResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.description)) {
    query['Description'] = request.description;
  }
  if (!$isNull(request.OSSBucket)) {
    query['OSSBucket'] = request.OSSBucket;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'AttachOSSBucket',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Binds an Object Storage Service (OSS) bucket to the specified project. The binding enables you to use IMM features by using the x-oss-process parameter.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   To use data processing capabilities of IMM based on the x-oss-process parameter, you must bind an OSS bucket to an IMM project. For more information, see [x-oss-process](https://help.aliyun.com/document_detail/2391270.html).
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 *
 * @param request AttachOSSBucketRequest
 * @return AttachOSSBucketResponse
 */
async function attachOSSBucket(request: AttachOSSBucketRequest): AttachOSSBucketResponse {
  var runtime = new $RuntimeOptions{};
  return attachOSSBucketWithOptions(request, runtime);
}

model BatchDeleteFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  URIs?: [ string ](name='URIs', description='The URIs of the OSS buckets in which the files whose metadata you want to delete are stored. You can specify up to 100 URIs.

This parameter is required.'),
}

model BatchDeleteFileMetaShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  URIsShrink?: string(name='URIs', description='The URIs of the OSS buckets in which the files whose metadata you want to delete are stored. You can specify up to 100 URIs.

This parameter is required.'),
}

model BatchDeleteFileMetaResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='3A82F6C9-5AC0-38F9-914F-F02623B3****'),
}

model BatchDeleteFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: BatchDeleteFileMetaResponseBody(name='body'),
}

/**
 * @summary Deletes the metadata of multiple files from a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   A successful deletion message is returned regardless of whether the metadata of the file exists in the dataset.
 * > 
 * *   If you delete the metadata of a file from a dataset, the file stored in Object Storage Service (OSS) or Photo and Drive Service is **not** deleted. If you want to delete the file, use the operations provided by OSS or Photo and Drive Service.
 * *   Metadata deletion affects existing face groups and stories but does not affect existing spatiotemporal groups.
 *
 * @param tmpReq BatchDeleteFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return BatchDeleteFileMetaResponse
 */
async function batchDeleteFileMetaWithOptions(tmpReq: BatchDeleteFileMetaRequest, runtime: $RuntimeOptions): BatchDeleteFileMetaResponse {
  tmpReq.validate();
  var request = new BatchDeleteFileMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.URIs)) {
    request.URIsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.URIs, 'URIs', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.URIsShrink)) {
    query['URIs'] = request.URIsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'BatchDeleteFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes the metadata of multiple files from a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   A successful deletion message is returned regardless of whether the metadata of the file exists in the dataset.
 * > 
 * *   If you delete the metadata of a file from a dataset, the file stored in Object Storage Service (OSS) or Photo and Drive Service is **not** deleted. If you want to delete the file, use the operations provided by OSS or Photo and Drive Service.
 * *   Metadata deletion affects existing face groups and stories but does not affect existing spatiotemporal groups.
 *
 * @param request BatchDeleteFileMetaRequest
 * @return BatchDeleteFileMetaResponse
 */
async function batchDeleteFileMeta(request: BatchDeleteFileMetaRequest): BatchDeleteFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return batchDeleteFileMetaWithOptions(request, runtime);
}

model BatchGetFigureClusterRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='test-dataset'),
  objectIds?: [ string ](name='ObjectIds', description='The cluster IDs.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='test-project'),
}

model BatchGetFigureClusterShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='test-dataset'),
  objectIdsShrink?: string(name='ObjectIds', description='The cluster IDs.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='test-project'),
}

model BatchGetFigureClusterResponseBody = {
  figureClusters?: [
    FigureCluster
  ](name='FigureClusters', description='The clusters.'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FFF****'),
}

model BatchGetFigureClusterResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: BatchGetFigureClusterResponseBody(name='body'),
}

/**
 * @summary Queries face clusters.
 *
 * @param tmpReq BatchGetFigureClusterRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return BatchGetFigureClusterResponse
 */
async function batchGetFigureClusterWithOptions(tmpReq: BatchGetFigureClusterRequest, runtime: $RuntimeOptions): BatchGetFigureClusterResponse {
  tmpReq.validate();
  var request = new BatchGetFigureClusterShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.objectIds)) {
    request.objectIdsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.objectIds, 'ObjectIds', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.objectIdsShrink)) {
    query['ObjectIds'] = request.objectIdsShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'BatchGetFigureCluster',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries face clusters.
 *
 * @param request BatchGetFigureClusterRequest
 * @return BatchGetFigureClusterResponse
 */
async function batchGetFigureCluster(request: BatchGetFigureClusterRequest): BatchGetFigureClusterResponse {
  var runtime = new $RuntimeOptions{};
  return batchGetFigureClusterWithOptions(request, runtime);
}

model BatchGetFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  URIs?: [ string ](name='URIs', description='The array of object URIs. You can specify up to 100 object URIs in an array.

This parameter is required.'),
  withFields?: [ string ](name='WithFields', description='The fields to return. If you specify this parameter, only specified metadata fields are returned. You can use this parameter to control the size of the response.

If you do not specify this parameter or leave this parameter empty, the operation returns all metadata fields.'),
}

model BatchGetFileMetaShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  URIsShrink?: string(name='URIs', description='The array of object URIs. You can specify up to 100 object URIs in an array.

This parameter is required.'),
  withFieldsShrink?: string(name='WithFields', description='The fields to return. If you specify this parameter, only specified metadata fields are returned. You can use this parameter to control the size of the response.

If you do not specify this parameter or leave this parameter empty, the operation returns all metadata fields.'),
}

model BatchGetFileMetaResponseBody = {
  files?: [
    File
  ](name='Files', description='The metadata returned.'),
  requestId?: string(name='RequestId', description='The request ID.', example='7F84C6D9-5AC0-49F9-914D-F02678E3****'),
}

model BatchGetFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: BatchGetFileMetaResponseBody(name='body'),
}

/**
 * @summary Queries metadata of multiple objects or files in the specified dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, feel free to join the DingTalk chat group (ID: 31690030817) and share your questions with us.
 *
 * @param tmpReq BatchGetFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return BatchGetFileMetaResponse
 */
async function batchGetFileMetaWithOptions(tmpReq: BatchGetFileMetaRequest, runtime: $RuntimeOptions): BatchGetFileMetaResponse {
  tmpReq.validate();
  var request = new BatchGetFileMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.URIs)) {
    request.URIsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.URIs, 'URIs', 'json');
  }
  if (!$isNull(tmpReq.withFields)) {
    request.withFieldsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.withFields, 'WithFields', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.URIsShrink)) {
    query['URIs'] = request.URIsShrink;
  }
  if (!$isNull(request.withFieldsShrink)) {
    query['WithFields'] = request.withFieldsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'BatchGetFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries metadata of multiple objects or files in the specified dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, feel free to join the DingTalk chat group (ID: 31690030817) and share your questions with us.
 *
 * @param request BatchGetFileMetaRequest
 * @return BatchGetFileMetaResponse
 */
async function batchGetFileMeta(request: BatchGetFileMetaRequest): BatchGetFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return batchGetFileMetaWithOptions(request, runtime);
}

model BatchIndexFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  files?: [
    InputFile
  ](name='Files', description='The objects in Object Storage Service (OSS). Specify OSS objects by using a JSON array. You can specify up to 100 objects in an array.

This parameter is required.'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  userData?: string(name='UserData'),
}

model BatchIndexFileMetaShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  filesShrink?: string(name='Files', description='The objects in Object Storage Service (OSS). Specify OSS objects by using a JSON array. You can specify up to 100 objects in an array.

This parameter is required.'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  userData?: string(name='UserData'),
}

model BatchIndexFileMetaResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='387-1DAPFFZplUZhuCuhnB6I9H****'),
  requestId?: string(name='RequestId', description='The request ID.', example='8F93E6D9-5AC0-49F9-914D-E02678A3****'),
}

model BatchIndexFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: BatchIndexFileMetaResponseBody(name='body'),
}

/**
 * @summary Indexes metadata of multiple objects into the specified dataset. The process involves data processing operations such as label detection, face detection, and location detection. Metadata indexing helps meet diverse data retrieval requirements.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Data processing operations supported for metadata processing vary with workflow templates. For more information, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).
 * *   Metadata indexing poses limits on the total number and size of objects. For more information about these limits, see [Limits](https://help.aliyun.com/document_detail/475569.html). For more information about how to create
 * *   Metadata indexing is available in specific regions. For information about regions that support metadata indexing, see the "Data management and indexing" section of the [Limits](https://help.aliyun.com/document_detail/475569.html) topic.
 *
 * @param tmpReq BatchIndexFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return BatchIndexFileMetaResponse
 */
async function batchIndexFileMetaWithOptions(tmpReq: BatchIndexFileMetaRequest, runtime: $RuntimeOptions): BatchIndexFileMetaResponse {
  tmpReq.validate();
  var request = new BatchIndexFileMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.files)) {
    request.filesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.files, 'Files', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.filesShrink)) {
    query['Files'] = request.filesShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'BatchIndexFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Indexes metadata of multiple objects into the specified dataset. The process involves data processing operations such as label detection, face detection, and location detection. Metadata indexing helps meet diverse data retrieval requirements.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Data processing operations supported for metadata processing vary with workflow templates. For more information, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).
 * *   Metadata indexing poses limits on the total number and size of objects. For more information about these limits, see [Limits](https://help.aliyun.com/document_detail/475569.html). For more information about how to create
 * *   Metadata indexing is available in specific regions. For information about regions that support metadata indexing, see the "Data management and indexing" section of the [Limits](https://help.aliyun.com/document_detail/475569.html) topic.
 *
 * @param request BatchIndexFileMetaRequest
 * @return BatchIndexFileMetaResponse
 */
async function batchIndexFileMeta(request: BatchIndexFileMetaRequest): BatchIndexFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return batchIndexFileMetaWithOptions(request, runtime);
}

model BatchUpdateFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  files?: [
    InputFile
  ](name='Files', description='The files whose metadata you want to update.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
}

model BatchUpdateFileMetaShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  filesShrink?: string(name='Files', description='The files whose metadata you want to update.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
}

model BatchUpdateFileMetaResponseBody = {
  files?: [ 
    {
      message?: string(name='Message', description='The error message returned when the value of the Success parameter is false.', example='*error.OpError : InvalidArgument | Index KV count exceeded, should be no more than 100.'),
      success?: boolean(name='Success', description='Indicates whether the request was successful. Valid values:

Enumerated values:

*   true
*   false', example='false'),
      URI?: string(name='URI', description='The URI of the file.', example='oss://examplebucket/example.jpg'),
    }
  ](name='Files', description='The files whose metadata was updated.'),
  requestId?: string(name='RequestId', description='The request ID.', example='F5BF215E-3237-0852-B9C6-F233D44A****'),
}

model BatchUpdateFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: BatchUpdateFileMetaResponseBody(name='body'),
}

/**
 * @summary Updates some metadata items of files indexed into a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   You cannot call this operation to update all metadata. You can update only metadata fields such as CustomLabels, CustomId, and Figures. For more information, see the "Request parameters" section of this topic.
 *
 * @param tmpReq BatchUpdateFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return BatchUpdateFileMetaResponse
 */
async function batchUpdateFileMetaWithOptions(tmpReq: BatchUpdateFileMetaRequest, runtime: $RuntimeOptions): BatchUpdateFileMetaResponse {
  tmpReq.validate();
  var request = new BatchUpdateFileMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.files)) {
    request.filesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.files, 'Files', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.filesShrink)) {
    query['Files'] = request.filesShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'BatchUpdateFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates some metadata items of files indexed into a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   You cannot call this operation to update all metadata. You can update only metadata fields such as CustomLabels, CustomId, and Figures. For more information, see the "Request parameters" section of this topic.
 *
 * @param request BatchUpdateFileMetaRequest
 * @return BatchUpdateFileMetaResponse
 */
async function batchUpdateFileMeta(request: BatchUpdateFileMetaRequest): BatchUpdateFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return batchUpdateFileMetaWithOptions(request, runtime);
}

model CompareImageFacesRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  source?: {
    URI1?: string(name='URI1', description='The OSS URL of the image file.

Specify the URL in the `oss://<bucket>/<object>` format. `<bucket>` specifies the name of the OSS bucket that is in the same region as the current project. `<object>` specifies path of the object with the extension included.', example='oss://test-bucket/test-object1'),
    URI2?: string(name='URI2', description='The OSS URL of the image file.

Specify the URL in the `oss://<bucket>/<object>` format. `<bucket>` specifies the name of the OSS bucket that is in the same region as the current project, and `<object>` specifies the path of the object with the extension included.', example='oss://test-bucket/test-object2'),
  }(name='Source', description='The URLs of the two images for compression.'),
}

model CompareImageFacesShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sourceShrink?: string(name='Source', description='The URLs of the two images for compression.'),
}

model CompareImageFacesResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='F73AC982-2B9E-4ECD-AED5-F8331C5******'),
  similarity?: float(name='Similarity', description='The face similarity. A larger value indicates a higher face similarity. Valid values: 0 to 1.', example='0.8848152756690983'),
}

model CompareImageFacesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CompareImageFacesResponseBody(name='body'),
}

/**
 * @summary Compares the similarity of the largest faces in two images. The largest face refers to the largest face frame in an image after face detection.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   For the input image, only the face with the largest face frame in the image is used for face comparison. The face frame detection result is consistent with the responses of the [DetectImageFaces](https://help.aliyun.com/document_detail/478213.html) operation.
 *
 * @param tmpReq CompareImageFacesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CompareImageFacesResponse
 */
async function compareImageFacesWithOptions(tmpReq: CompareImageFacesRequest, runtime: $RuntimeOptions): CompareImageFacesResponse {
  tmpReq.validate();
  var request = new CompareImageFacesShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.source)) {
    request.sourceShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.source, 'Source', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceShrink)) {
    query['Source'] = request.sourceShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CompareImageFaces',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Compares the similarity of the largest faces in two images. The largest face refers to the largest face frame in an image after face detection.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   For the input image, only the face with the largest face frame in the image is used for face comparison. The face frame detection result is consistent with the responses of the [DetectImageFaces](https://help.aliyun.com/document_detail/478213.html) operation.
 *
 * @param request CompareImageFacesRequest
 * @return CompareImageFacesResponse
 */
async function compareImageFaces(request: CompareImageFacesRequest): CompareImageFacesResponse {
  var runtime = new $RuntimeOptions{};
  return compareImageFacesWithOptions(request, runtime);
}

model ContextualAnswerRequest {
  files?: [
    ContextualFile
  ](name='Files'),
  messages?: [
    ContextualMessage
  ](name='Messages', description='This parameter is required.'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='test-project'),
}

model ContextualAnswerShrinkRequest {
  filesShrink?: string(name='Files'),
  messagesShrink?: string(name='Messages', description='This parameter is required.'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='test-project'),
}

model ContextualAnswerResponseBody = {
  answer?: Answer(name='Answer'),
  code?: string(name='Code'),
  message?: string(name='Message'),
  requestId?: string(name='RequestId', example='22F081FB-90D7-525A-BFE4-D28DC906A28F'),
}

model ContextualAnswerResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ContextualAnswerResponseBody(name='body'),
}

/**
 * @summary AI 助手二期，问答API
 *
 * @param tmpReq ContextualAnswerRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ContextualAnswerResponse
 */
async function contextualAnswerWithOptions(tmpReq: ContextualAnswerRequest, runtime: $RuntimeOptions): ContextualAnswerResponse {
  tmpReq.validate();
  var request = new ContextualAnswerShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.files)) {
    request.filesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.files, 'Files', 'json');
  }
  if (!$isNull(tmpReq.messages)) {
    request.messagesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.messages, 'Messages', 'json');
  }
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var body : map[string]any = {};
  if (!$isNull(request.filesShrink)) {
    body['Files'] = request.filesShrink;
  }
  if (!$isNull(request.messagesShrink)) {
    body['Messages'] = request.messagesShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'ContextualAnswer',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary AI 助手二期，问答API
 *
 * @param request ContextualAnswerRequest
 * @return ContextualAnswerResponse
 */
async function contextualAnswer(request: ContextualAnswerRequest): ContextualAnswerResponse {
  var runtime = new $RuntimeOptions{};
  return contextualAnswerWithOptions(request, runtime);
}

model ContextualRetrievalRequest {
  datasetName?: string(name='DatasetName', description='This parameter is required.', example='test-dataset'),
  messages?: [
    ContextualMessage
  ](name='Messages', description='This parameter is required.'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='test-project'),
  recallOnly?: boolean(name='RecallOnly', example='false'),
  smartClusterIds?: [ string ](name='SmartClusterIds'),
}

model ContextualRetrievalShrinkRequest {
  datasetName?: string(name='DatasetName', description='This parameter is required.', example='test-dataset'),
  messagesShrink?: string(name='Messages', description='This parameter is required.'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='test-project'),
  recallOnly?: boolean(name='RecallOnly', example='false'),
  smartClusterIdsShrink?: string(name='SmartClusterIds'),
}

model ContextualRetrievalResponseBody = {
  requestId?: string(name='RequestId', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
  results?: [
    File
  ](name='Results'),
}

model ContextualRetrievalResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ContextualRetrievalResponseBody(name='body'),
}

/**
 * @summary AI助手二期，检索API
 *
 * @param tmpReq ContextualRetrievalRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ContextualRetrievalResponse
 */
async function contextualRetrievalWithOptions(tmpReq: ContextualRetrievalRequest, runtime: $RuntimeOptions): ContextualRetrievalResponse {
  tmpReq.validate();
  var request = new ContextualRetrievalShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.messages)) {
    request.messagesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.messages, 'Messages', 'json');
  }
  if (!$isNull(tmpReq.smartClusterIds)) {
    request.smartClusterIdsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.smartClusterIds, 'SmartClusterIds', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.recallOnly)) {
    query['RecallOnly'] = request.recallOnly;
  }
  var body : map[string]any = {};
  if (!$isNull(request.messagesShrink)) {
    body['Messages'] = request.messagesShrink;
  }
  if (!$isNull(request.smartClusterIdsShrink)) {
    body['SmartClusterIds'] = request.smartClusterIdsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'ContextualRetrieval',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary AI助手二期，检索API
 *
 * @param request ContextualRetrievalRequest
 * @return ContextualRetrievalResponse
 */
async function contextualRetrieval(request: ContextualRetrievalRequest): ContextualRetrievalResponse {
  var runtime = new $RuntimeOptions{};
  return contextualRetrievalWithOptions(request, runtime);
}

model CreateArchiveFileInspectionTaskRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  password?: string(name='Password', description='The password that protects the package. If the package is password-protected, you must provide the password to view the contents of the package.', example='123456'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The URI of the package.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://imm-apitest-fxf2/name.zip'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateArchiveFileInspectionTaskShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  password?: string(name='Password', description='The password that protects the package. If the package is password-protected, you must provide the password to view the contents of the package.', example='123456'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The URI of the package.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://imm-apitest-fxf2/name.zip'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateArchiveFileInspectionTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16Es*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='EC564A9A-BA5C-4499-A087-D9B9E76E*****'),
  taskId?: string(name='TaskId', description='The task ID.', example='ArchiveFileInspection-8475218e-d86e-4c66-b3cf-50e74d6c****'),
}

model CreateArchiveFileInspectionTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateArchiveFileInspectionTaskResponseBody(name='body'),
}

/**
 * @summary Creates an archive file inspection task to preview the files in a package without decompressing the package.
 *
 * @description >  The operation is in public preview. For any inquires, join our DingTalk chat group (ID: 31690030817) and share your questions with us.
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation supports a package that contains up to 80,000 files.
 * *   The operation supports ZIP or RAR packages up to 200 GB in size, or 7z packages up to 50 GB in size.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateArchiveFileInspectionTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateArchiveFileInspectionTaskResponse
 */
async function createArchiveFileInspectionTaskWithOptions(tmpReq: CreateArchiveFileInspectionTaskRequest, runtime: $RuntimeOptions): CreateArchiveFileInspectionTaskResponse {
  tmpReq.validate();
  var request = new CreateArchiveFileInspectionTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.password)) {
    query['Password'] = request.password;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateArchiveFileInspectionTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates an archive file inspection task to preview the files in a package without decompressing the package.
 *
 * @description >  The operation is in public preview. For any inquires, join our DingTalk chat group (ID: 31690030817) and share your questions with us.
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation supports a package that contains up to 80,000 files.
 * *   The operation supports ZIP or RAR packages up to 200 GB in size, or 7z packages up to 50 GB in size.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateArchiveFileInspectionTaskRequest
 * @return CreateArchiveFileInspectionTaskResponse
 */
async function createArchiveFileInspectionTask(request: CreateArchiveFileInspectionTaskRequest): CreateArchiveFileInspectionTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createArchiveFileInspectionTaskWithOptions(request, runtime);
}

model CreateBatchRequest {
  actions?: [ 
    {
      fastFailPolicy?: FastFailPolicy(name='FastFailPolicy', description='The policy configurations for handling failures.'),
      name?: string(name='Name', description='The name of the template.

This parameter is required.', example='doc/convert'),
      parameters?: [ string ](name='Parameters', description='The template parameters.'),
    }
  ](name='Actions', description='The templates.

This parameter is required.'),
  input?: Input(name='Input', description='The data source configurations.

This parameter is required.'),
  notification?: {
    MNS?: MNS(name='MNS', description='The SMQ notification settings.'),
  }(name='Notification', description='The notification settings. The operation supports multiple messaging middleware options. For more information about notification messages, see Asynchronous message examples. You can use one of the following methods to receive notification messages:

In the region in which the IMM project is located, use EventBridge to receive task notifications. For more information, see IMM events. In the region in which the IMM project is located, configure a Simple Message Queue (SMQ) subscription to receive task notifications.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The service role. IMM assumes the service role so that it can access resources in other cloud services, such as OSS. Default value: AliyunIMMBatchTriggerRole.

You can also create a custom service role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Create a regular service role](https://help.aliyun.com/document_detail/116800.html) and [Grant permissions to a role](https://help.aliyun.com/document_detail/116147.html).

This parameter is required.', example='AliyunIMMDefaultRole'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"key": "val"}'),
}

model CreateBatchShrinkRequest {
  actionsShrink?: string(name='Actions', description='The templates.

This parameter is required.'),
  inputShrink?: string(name='Input', description='The data source configurations.

This parameter is required.'),
  notificationShrink?: string(name='Notification', description='The notification settings. The operation supports multiple messaging middleware options. For more information about notification messages, see Asynchronous message examples. You can use one of the following methods to receive notification messages:

In the region in which the IMM project is located, use EventBridge to receive task notifications. For more information, see IMM events. In the region in which the IMM project is located, configure a Simple Message Queue (SMQ) subscription to receive task notifications.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The service role. IMM assumes the service role so that it can access resources in other cloud services, such as OSS. Default value: AliyunIMMBatchTriggerRole.

You can also create a custom service role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Create a regular service role](https://help.aliyun.com/document_detail/116800.html) and [Grant permissions to a role](https://help.aliyun.com/document_detail/116147.html).

This parameter is required.', example='AliyunIMMDefaultRole'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"key": "val"}'),
}

model CreateBatchResponseBody = {
  id?: string(name='Id', description='The ID of the batch processing task.', example='batch-4eb9223f-3e88-42d3-a578-3f2852******'),
  requestId?: string(name='RequestId', description='The request ID.', example='EC564A9A-BA5C-4499-A087-D9B9E76E*****'),
}

model CreateBatchResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateBatchResponseBody(name='body'),
}

/**
 * @summary Creates a batch processing task to perform a data processing operation, such as transcoding or format conversion, on multiple existing files at a time.
 *
 * @description If you want to create a batch processing task to process data in [OSS](https://help.aliyun.com/document_detail/99372.html), make sure that you have bound the dataset to the OSS bucket where the data is stored. For more information about how to bind a dataset to a bucket, see [AttachOSSBucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param tmpReq CreateBatchRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateBatchResponse
 */
async function createBatchWithOptions(tmpReq: CreateBatchRequest, runtime: $RuntimeOptions): CreateBatchResponse {
  tmpReq.validate();
  var request = new CreateBatchShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.actions)) {
    request.actionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.actions, 'Actions', 'json');
  }
  if (!$isNull(tmpReq.input)) {
    request.inputShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.input, 'Input', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.actionsShrink)) {
    body['Actions'] = request.actionsShrink;
  }
  if (!$isNull(request.inputShrink)) {
    body['Input'] = request.inputShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    body['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.serviceRole)) {
    body['ServiceRole'] = request.serviceRole;
  }
  if (!$isNull(request.tagsShrink)) {
    body['Tags'] = request.tagsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateBatch',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a batch processing task to perform a data processing operation, such as transcoding or format conversion, on multiple existing files at a time.
 *
 * @description If you want to create a batch processing task to process data in [OSS](https://help.aliyun.com/document_detail/99372.html), make sure that you have bound the dataset to the OSS bucket where the data is stored. For more information about how to bind a dataset to a bucket, see [AttachOSSBucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param request CreateBatchRequest
 * @return CreateBatchResponse
 */
async function createBatch(request: CreateBatchRequest): CreateBatchResponse {
  var runtime = new $RuntimeOptions{};
  return createBatchWithOptions(request, runtime);
}

model CreateBindingRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  URI?: string(name='URI', description='The URI of the OSS bucket to which you bind the dataset.

Specify the value in the oss://${Bucket} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project.

This parameter is required.', example='oss://examplebucket'),
}

model CreateBindingResponseBody = {
  binding?: Binding(name='Binding', description='The binding relationship.'),
  requestId?: string(name='RequestId', description='The request ID.', example='5F74C5C9-5AC0-49F9-914D-E01589D3****'),
}

model CreateBindingResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateBindingResponseBody(name='body'),
}

/**
 * @summary Creates a binding relationship between a dataset and an Object Storage Service (OSS) bucket. This allows for the automatic synchronization of incremental and full data and indexing.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * Before you create a binding relationship, make sure that the project and the dataset that you want to use exist.
 * *   For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   For information about how to create a dataset, see [CreateDataset](https://help.aliyun.com/document_detail/478160.html).
 * >  The CreateBinding operation works by using the [workflow template](https://help.aliyun.com/document_detail/466304.html) that is specified when you created the project or dataset.
 * After you create a binding relationship between a dataset and an OSS bucket, IMM scans the existing objects in the bucket and extracts metadata based on the scanning result. Then, IMM creates an index from the extracted metadata. If new objects are added to the OSS bucket, IMM constantly tracks and scans the objects and updates the index. For objects whose index is created in this way, you can call the [SimpleQuery](https://help.aliyun.com/document_detail/478175.html) operation to query, manage, and collect statistics from the objects.
 *
 * @param request CreateBindingRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateBindingResponse
 */
async function createBindingWithOptions(request: CreateBindingRequest, runtime: $RuntimeOptions): CreateBindingResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.URI)) {
    query['URI'] = request.URI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateBinding',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a binding relationship between a dataset and an Object Storage Service (OSS) bucket. This allows for the automatic synchronization of incremental and full data and indexing.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * Before you create a binding relationship, make sure that the project and the dataset that you want to use exist.
 * *   For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   For information about how to create a dataset, see [CreateDataset](https://help.aliyun.com/document_detail/478160.html).
 * >  The CreateBinding operation works by using the [workflow template](https://help.aliyun.com/document_detail/466304.html) that is specified when you created the project or dataset.
 * After you create a binding relationship between a dataset and an OSS bucket, IMM scans the existing objects in the bucket and extracts metadata based on the scanning result. Then, IMM creates an index from the extracted metadata. If new objects are added to the OSS bucket, IMM constantly tracks and scans the objects and updates the index. For objects whose index is created in this way, you can call the [SimpleQuery](https://help.aliyun.com/document_detail/478175.html) operation to query, manage, and collect statistics from the objects.
 *
 * @param request CreateBindingRequest
 * @return CreateBindingResponse
 */
async function createBinding(request: CreateBindingRequest): CreateBindingResponse {
  var runtime = new $RuntimeOptions{};
  return createBindingWithOptions(request, runtime);
}

model CreateCompressPointCloudTaskRequest {
  compressMethod?: string(name='CompressMethod', description='The compression algorithm. Valid values:

*   octree
*   kdtree

This parameter is required.', example='octree'),
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. This parameter is optional. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  kdtreeOption?: KdtreeOption(name='KdtreeOption', description='The k-d tree compression options.'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  octreeOption?: OctreeOption(name='OctreeOption', description='The octree compression options.'),
  pointCloudFields?: [ string ](name='PointCloudFields', description='The PCD property fields and the compression order in which the data is decompressed after the compression is complete.

*   If octree of Point Cloud Library (PCL) is used for compression, ["xyz"] is supported.
*   If Draco k-dimensional (k-d) tree is used for compression, ["xyz"] and ["xyz", "intensity"] are supported.

This parameter is required.'),
  pointCloudFileFormat?: string(name='PointCloudFileFormat', description='The file format. Set the value to the default value: pcd.', example='pcd'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The OSS URL of the PCD file.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the path of the object with the extension included.

This parameter is required.', example='oss://test/src/test.pcd'),
  tags?: map[string]any(name='Tags', description='The custom tags, which can be used to search for and filter asynchronous tasks.', example='{"LabelKey": "Value"}'),
  targetURI?: string(name='TargetURI', description='The OSS URL of the output file after compression.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the path of the object with the extension included.

This parameter is required.', example='oss://test/tgt'),
  userData?: string(name='UserData', description='The custom data, which is returned in an asynchronous notification and facilitates notification management. The maximum length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateCompressPointCloudTaskShrinkRequest {
  compressMethod?: string(name='CompressMethod', description='The compression algorithm. Valid values:

*   octree
*   kdtree

This parameter is required.', example='octree'),
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. This parameter is optional. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  kdtreeOptionShrink?: string(name='KdtreeOption', description='The k-d tree compression options.'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  octreeOptionShrink?: string(name='OctreeOption', description='The octree compression options.'),
  pointCloudFieldsShrink?: string(name='PointCloudFields', description='The PCD property fields and the compression order in which the data is decompressed after the compression is complete.

*   If octree of Point Cloud Library (PCL) is used for compression, ["xyz"] is supported.
*   If Draco k-dimensional (k-d) tree is used for compression, ["xyz"] and ["xyz", "intensity"] are supported.

This parameter is required.'),
  pointCloudFileFormat?: string(name='PointCloudFileFormat', description='The file format. Set the value to the default value: pcd.', example='pcd'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The OSS URL of the PCD file.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the path of the object with the extension included.

This parameter is required.', example='oss://test/src/test.pcd'),
  tagsShrink?: string(name='Tags', description='The custom tags, which can be used to search for and filter asynchronous tasks.', example='{"LabelKey": "Value"}'),
  targetURI?: string(name='TargetURI', description='The OSS URL of the output file after compression.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the path of the object with the extension included.

This parameter is required.', example='oss://test/tgt'),
  userData?: string(name='UserData', description='The custom data, which is returned in an asynchronous notification and facilitates notification management. The maximum length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateCompressPointCloudTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0B7-1LR4Wcue1aBhk2xT85MfL*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FFF****'),
  taskId?: string(name='TaskId', description='The task ID.', example='PointCloudCompress-badda57d-a3ab-4e6d-938f-49b77ce****'),
}

model CreateCompressPointCloudTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateCompressPointCloudTaskResponseBody(name='body'),
}

/**
 * @summary Compresses point cloud data (PCD) in Object Storage Service (OSS) to reduce the amount of data transferred over networks.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   This operation supports only Point Cloud Data (PCD) files.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications. >
 *
 * @param tmpReq CreateCompressPointCloudTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateCompressPointCloudTaskResponse
 */
async function createCompressPointCloudTaskWithOptions(tmpReq: CreateCompressPointCloudTaskRequest, runtime: $RuntimeOptions): CreateCompressPointCloudTaskResponse {
  tmpReq.validate();
  var request = new CreateCompressPointCloudTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.kdtreeOption)) {
    request.kdtreeOptionShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.kdtreeOption, 'KdtreeOption', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.octreeOption)) {
    request.octreeOptionShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.octreeOption, 'OctreeOption', 'json');
  }
  if (!$isNull(tmpReq.pointCloudFields)) {
    request.pointCloudFieldsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.pointCloudFields, 'PointCloudFields', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.compressMethod)) {
    query['CompressMethod'] = request.compressMethod;
  }
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.kdtreeOptionShrink)) {
    query['KdtreeOption'] = request.kdtreeOptionShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.octreeOptionShrink)) {
    query['OctreeOption'] = request.octreeOptionShrink;
  }
  if (!$isNull(request.pointCloudFieldsShrink)) {
    query['PointCloudFields'] = request.pointCloudFieldsShrink;
  }
  if (!$isNull(request.pointCloudFileFormat)) {
    query['PointCloudFileFormat'] = request.pointCloudFileFormat;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateCompressPointCloudTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Compresses point cloud data (PCD) in Object Storage Service (OSS) to reduce the amount of data transferred over networks.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   This operation supports only Point Cloud Data (PCD) files.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications. >
 *
 * @param request CreateCompressPointCloudTaskRequest
 * @return CreateCompressPointCloudTaskResponse
 */
async function createCompressPointCloudTask(request: CreateCompressPointCloudTaskRequest): CreateCompressPointCloudTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createCompressPointCloudTaskWithOptions(request, runtime);
}

model CreateCustomizedStoryRequest {
  cover?: {
    URI?: string(name='URI', description='The URI of the cover image.

This parameter is required.', example='oss://bucket1/cover'),
  }(name='Cover', description='The cover image of the story. You can specify an image as the cover image of the custom story.

This parameter is required.'),
  customLabels?: map[string]any(name='CustomLabels', description='The custom labels. You can specify labels to help you identify and retrieve the story.', example='{"Bucket": "examplebucket"}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='dataset001'),
  files?: [ 
    {
      URI?: string(name='URI', description='The URIs of the files.

This parameter is required.', example='[{"URI":"oss://bucket1/file1"}]'),
    }
  ](name='Files', description='The files of the story. You can specify up to 100 files in a custom story.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  storyName?: string(name='StoryName', description='The name of the story.

This parameter is required.', example='name1'),
  storySubType?: string(name='StorySubType', description='The subtype of the story. For information about valid subtypes, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).

This parameter is required.', example='Solo'),
  storyType?: string(name='StoryType', description='The type of the story. For information about valid types, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).

This parameter is required.', example='PeopleMemory'),
}

model CreateCustomizedStoryShrinkRequest {
  coverShrink?: string(name='Cover', description='The cover image of the story. You can specify an image as the cover image of the custom story.

This parameter is required.'),
  customLabelsShrink?: string(name='CustomLabels', description='The custom labels. You can specify labels to help you identify and retrieve the story.', example='{"Bucket": "examplebucket"}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='dataset001'),
  filesShrink?: string(name='Files', description='The files of the story. You can specify up to 100 files in a custom story.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  storyName?: string(name='StoryName', description='The name of the story.

This parameter is required.', example='name1'),
  storySubType?: string(name='StorySubType', description='The subtype of the story. For information about valid subtypes, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).

This parameter is required.', example='Solo'),
  storyType?: string(name='StoryType', description='The type of the story. For information about valid types, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).

This parameter is required.', example='PeopleMemory'),
}

model CreateCustomizedStoryResponseBody = {
  objectId?: string(name='ObjectId', description='The ID of the story.', example='563062c0b085733f34ab****'),
  requestId?: string(name='RequestId', description='The request ID.', example='BC91D091-D49F-0ACD-95D5-F0621045****'),
}

model CreateCustomizedStoryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateCustomizedStoryResponseBody(name='body'),
}

/**
 * @summary Creates a story based on the specified images and videos.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 *
 * @param tmpReq CreateCustomizedStoryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateCustomizedStoryResponse
 */
async function createCustomizedStoryWithOptions(tmpReq: CreateCustomizedStoryRequest, runtime: $RuntimeOptions): CreateCustomizedStoryResponse {
  tmpReq.validate();
  var request = new CreateCustomizedStoryShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.cover)) {
    request.coverShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.cover, 'Cover', 'json');
  }
  if (!$isNull(tmpReq.customLabels)) {
    request.customLabelsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.customLabels, 'CustomLabels', 'json');
  }
  if (!$isNull(tmpReq.files)) {
    request.filesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.files, 'Files', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.coverShrink)) {
    body['Cover'] = request.coverShrink;
  }
  if (!$isNull(request.customLabelsShrink)) {
    body['CustomLabels'] = request.customLabelsShrink;
  }
  if (!$isNull(request.datasetName)) {
    body['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.filesShrink)) {
    body['Files'] = request.filesShrink;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.storyName)) {
    body['StoryName'] = request.storyName;
  }
  if (!$isNull(request.storySubType)) {
    body['StorySubType'] = request.storySubType;
  }
  if (!$isNull(request.storyType)) {
    body['StoryType'] = request.storyType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateCustomizedStory',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a story based on the specified images and videos.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 *
 * @param request CreateCustomizedStoryRequest
 * @return CreateCustomizedStoryResponse
 */
async function createCustomizedStory(request: CreateCustomizedStoryRequest): CreateCustomizedStoryResponse {
  var runtime = new $RuntimeOptions{};
  return createCustomizedStoryWithOptions(request, runtime);
}

model CreateDatasetRequest {
  datasetMaxBindCount?: long(name='DatasetMaxBindCount', description='The maximum number of bindings for the dataset. Valid values: 1 to 10. Default value: 10.', example='10'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount', description='The maximum number of metadata entities in the dataset. Default value: 10000000000.', example='10000000000'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount', description='The maximum number of files in the dataset. Valid values: 1 to 100000000. Default value: 100000000.', example='100000000'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount', description='The maximum number of metadata relationships in the dataset. Default value: 100000000000.', example='100000000000'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize', description='The maximum total file size for the dataset. If the total file size of the dataset exceeds this limit, indexes can no longer be added. Default value: 90000000000000000. Unit: bytes.', example='90000000000000000'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. The dataset name must be unique in the same project. The name must meet the following requirements:

*   The name must be 1 to 128 characters in length.
*   The name can contain only letters, digits, hyphens (-), and underscores (_).
*   The name must start with a letter or underscore (_).

This parameter is required.', example='dataset001'),
  description?: string(name='Description', description='The description of the dataset. The description must be 1 to 256 characters in length. You can leave this parameter empty.', example='immtest'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  templateId?: string(name='TemplateId', description='The ID of the workflow template. For more information, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).', example='Official:AllFunction'),
}

model CreateDatasetResponseBody = {
  dataset?: Dataset(name='Dataset', description='The information about the dataset.'),
  requestId?: string(name='RequestId', description='The request ID.', example='6D74B3A9-5AC0-49F9-914D-E01589D3****'),
}

model CreateDatasetResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateDatasetResponseBody(name='body'),
}

/**
 * @summary Creates a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   A dataset name must be unique within the same project.
 * *   A project has an upper limit on the number of datasets that can be created in the project. You can call the [GetProjcet](https://help.aliyun.com/document_detail/478155.html) operation to query the dataset limit of the project.
 * *   After creating a dataset, you can call the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) operation to index metadata. Metadata indexing enhances [data retrieval efficiency and statistics collection](https://help.aliyun.com/document_detail/478175.html), and enables intelligent data management.
 *
 * @param request CreateDatasetRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateDatasetResponse
 */
async function createDatasetWithOptions(request: CreateDatasetRequest, runtime: $RuntimeOptions): CreateDatasetResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetMaxBindCount)) {
    query['DatasetMaxBindCount'] = request.datasetMaxBindCount;
  }
  if (!$isNull(request.datasetMaxEntityCount)) {
    query['DatasetMaxEntityCount'] = request.datasetMaxEntityCount;
  }
  if (!$isNull(request.datasetMaxFileCount)) {
    query['DatasetMaxFileCount'] = request.datasetMaxFileCount;
  }
  if (!$isNull(request.datasetMaxRelationCount)) {
    query['DatasetMaxRelationCount'] = request.datasetMaxRelationCount;
  }
  if (!$isNull(request.datasetMaxTotalFileSize)) {
    query['DatasetMaxTotalFileSize'] = request.datasetMaxTotalFileSize;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.description)) {
    query['Description'] = request.description;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.templateId)) {
    query['TemplateId'] = request.templateId;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateDataset',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   A dataset name must be unique within the same project.
 * *   A project has an upper limit on the number of datasets that can be created in the project. You can call the [GetProjcet](https://help.aliyun.com/document_detail/478155.html) operation to query the dataset limit of the project.
 * *   After creating a dataset, you can call the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) operation to index metadata. Metadata indexing enhances [data retrieval efficiency and statistics collection](https://help.aliyun.com/document_detail/478175.html), and enables intelligent data management.
 *
 * @param request CreateDatasetRequest
 * @return CreateDatasetResponse
 */
async function createDataset(request: CreateDatasetRequest): CreateDatasetResponse {
  var runtime = new $RuntimeOptions{};
  return createDatasetWithOptions(request, runtime);
}

model CreateDecodeBlindWatermarkTaskRequest {
  imageQuality?: int32(name='ImageQuality', description='The quality of the output image. This parameter is also available in the earlier DecodeBlindWatermark operation.

Higher image quality indicates a larger image size and higher watermark resolution quality.', example='90'),
  model?: string(name='Model', description='The watermark algorithm model. This parameter is also available in the earlier DecodeBlindWatermark operation. Valid values: FFT, FFT_FULL, DWT, and DWT_IBG. Default value: FFT.

If this parameter is left empty, the CreateDecodeBlindWatermarkTask operation is called. Otherwise, the earlier DecodeBlindWatermark operation is called.', example='FFT'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  originalImageURI?: string(name='OriginalImageURI', description='The OSS URI of the image before the blind watermark is added. This parameter is also available in the earlier DecodeBlindWatermark operation.

Do not specify this parameter when you set the Model parameter to DWT or DWT_IBG.

Specify the OSS URI in the `oss://<bucket>/<object>` format, where `<bucket>` is the name of the bucket in the same region as the current project and `<object>` is the path of the object with the extension included.', example='oss://imm-test/testcases/watermarktestbefore.jpg'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

>  The project specified in the request must match the one in the EncodeBlindWatermark request to encode the blind watermark.

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the image.

Specify the OSS URI in the `oss://<bucket>/<object>` format, where `<bucket>` is the name of the bucket in the same region as the current project and `<object>` is the path of the object with the extension included.

This parameter is required.', example='oss://target/sampleobject.jpg'),
  strengthLevel?: string(name='StrengthLevel', description='The watermark strength level. The higher the strength level, the more resistant the watermarked image is to attacks, but the more the image is distorted. Valid values: low, medium, and high. Default value: low.', example='low'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the output image. This parameter is also available in the earlier DecodeBlindWatermark operation.

Specify the OSS URI in the `oss://<bucket>/<object>` format, where `<bucket>` is the name of the bucket in the same region as the current project and `<object>` is the path of the object with the extension included.', example='oss://target/targetobject.jpg'),
  watermarkType?: string(name='WatermarkType', description='The type of the watermark. Valid value: text.

No image watermarks are supported.', example='text'),
}

model CreateDecodeBlindWatermarkTaskShrinkRequest {
  imageQuality?: int32(name='ImageQuality', description='The quality of the output image. This parameter is also available in the earlier DecodeBlindWatermark operation.

Higher image quality indicates a larger image size and higher watermark resolution quality.', example='90'),
  model?: string(name='Model', description='The watermark algorithm model. This parameter is also available in the earlier DecodeBlindWatermark operation. Valid values: FFT, FFT_FULL, DWT, and DWT_IBG. Default value: FFT.

If this parameter is left empty, the CreateDecodeBlindWatermarkTask operation is called. Otherwise, the earlier DecodeBlindWatermark operation is called.', example='FFT'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  originalImageURI?: string(name='OriginalImageURI', description='The OSS URI of the image before the blind watermark is added. This parameter is also available in the earlier DecodeBlindWatermark operation.

Do not specify this parameter when you set the Model parameter to DWT or DWT_IBG.

Specify the OSS URI in the `oss://<bucket>/<object>` format, where `<bucket>` is the name of the bucket in the same region as the current project and `<object>` is the path of the object with the extension included.', example='oss://imm-test/testcases/watermarktestbefore.jpg'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

>  The project specified in the request must match the one in the EncodeBlindWatermark request to encode the blind watermark.

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the image.

Specify the OSS URI in the `oss://<bucket>/<object>` format, where `<bucket>` is the name of the bucket in the same region as the current project and `<object>` is the path of the object with the extension included.

This parameter is required.', example='oss://target/sampleobject.jpg'),
  strengthLevel?: string(name='StrengthLevel', description='The watermark strength level. The higher the strength level, the more resistant the watermarked image is to attacks, but the more the image is distorted. Valid values: low, medium, and high. Default value: low.', example='low'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the output image. This parameter is also available in the earlier DecodeBlindWatermark operation.

Specify the OSS URI in the `oss://<bucket>/<object>` format, where `<bucket>` is the name of the bucket in the same region as the current project and `<object>` is the path of the object with the extension included.', example='oss://target/targetobject.jpg'),
  watermarkType?: string(name='WatermarkType', description='The type of the watermark. Valid value: text.

No image watermarks are supported.', example='text'),
}

model CreateDecodeBlindWatermarkTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='27C-1jyAP5qQI7RoI8lFFwvMrWtl0ft'),
  requestId?: string(name='RequestId', description='The request ID.', example='4A7A2D0E-D8B8-4DA0-8127-EB32C6600ADE'),
  taskId?: string(name='TaskId', description='The task ID.', example='DecodeBlindWatermark-78ac8f3b-59e0-45a6-9b67-32168c3f22b9'),
}

model CreateDecodeBlindWatermarkTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateDecodeBlindWatermarkTaskResponseBody(name='body'),
}

/**
 * @summary Decodes the blind watermark in an image.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the billing of Intelligent Media Management (IMM).
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   Make sure that an IMM project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   The region and project specified in the request to decode a blind watermark must match those in the [EncodeBlindWatermark](https://help.aliyun.com/document_detail/2743655.html) request to encode the blind watermark.
 * *   A blind watermark can still be extracted even if attacks, such as compression, scaling, cropping, rotation, and color transformation, are performed on the image.
 * *   This operation is compatible with its earlier version DecodeBlindWatermark.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task. If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateDecodeBlindWatermarkTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateDecodeBlindWatermarkTaskResponse
 */
async function createDecodeBlindWatermarkTaskWithOptions(tmpReq: CreateDecodeBlindWatermarkTaskRequest, runtime: $RuntimeOptions): CreateDecodeBlindWatermarkTaskResponse {
  tmpReq.validate();
  var request = new CreateDecodeBlindWatermarkTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  var query = {};
  if (!$isNull(request.imageQuality)) {
    query['ImageQuality'] = request.imageQuality;
  }
  if (!$isNull(request.model)) {
    query['Model'] = request.model;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.originalImageURI)) {
    query['OriginalImageURI'] = request.originalImageURI;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.strengthLevel)) {
    query['StrengthLevel'] = request.strengthLevel;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.watermarkType)) {
    query['WatermarkType'] = request.watermarkType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateDecodeBlindWatermarkTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Decodes the blind watermark in an image.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the billing of Intelligent Media Management (IMM).
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   Make sure that an IMM project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   The region and project specified in the request to decode a blind watermark must match those in the [EncodeBlindWatermark](https://help.aliyun.com/document_detail/2743655.html) request to encode the blind watermark.
 * *   A blind watermark can still be extracted even if attacks, such as compression, scaling, cropping, rotation, and color transformation, are performed on the image.
 * *   This operation is compatible with its earlier version DecodeBlindWatermark.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task. If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateDecodeBlindWatermarkTaskRequest
 * @return CreateDecodeBlindWatermarkTaskResponse
 */
async function createDecodeBlindWatermarkTask(request: CreateDecodeBlindWatermarkTaskRequest): CreateDecodeBlindWatermarkTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createDecodeBlindWatermarkTaskWithOptions(request, runtime);
}

model CreateFacesSearchingTaskRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  maxResult?: long(name='MaxResult', description='The number of the most similar faces that you want to return. Valid values: 1 to 100. Default value: 5.', example='100'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sources?: [ 
    {
      URI?: string(name='URI', description='The OSS URI of the image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
    }
  ](name='Sources', description='The images.'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "testuid","Name": "test-user","Avatar": "http://test.com/testuid"}'),
}

model CreateFacesSearchingTaskShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  maxResult?: long(name='MaxResult', description='The number of the most similar faces that you want to return. Valid values: 1 to 100. Default value: 5.', example='100'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sourcesShrink?: string(name='Sources', description='The images.'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "testuid","Name": "test-user","Avatar": "http://test.com/testuid"}'),
}

model CreateFacesSearchingTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16****'),
  requestId?: string(name='RequestId', description='The request ID.', example='B1E79399-05F7-06D8-95FE-EBE17BA*****'),
  taskId?: string(name='TaskId', description='The task ID.', example='CreateFacesSearchingTask-00bec802-073a-4b61-ba*****'),
}

model CreateFacesSearchingTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateFacesSearchingTaskResponseBody(name='body'),
}

/**
 * @summary Searches the dataset for the specified number of images most similar to the specified image or face and returns face IDs and boundaries in descending order of similarity.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation searches for faces that are similar to the face within the largest bounding box in each input image.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateFacesSearchingTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateFacesSearchingTaskResponse
 */
async function createFacesSearchingTaskWithOptions(tmpReq: CreateFacesSearchingTaskRequest, runtime: $RuntimeOptions): CreateFacesSearchingTaskResponse {
  tmpReq.validate();
  var request = new CreateFacesSearchingTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.sources)) {
    request.sourcesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.sources, 'Sources', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxResult)) {
    query['MaxResult'] = request.maxResult;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourcesShrink)) {
    query['Sources'] = request.sourcesShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateFacesSearchingTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Searches the dataset for the specified number of images most similar to the specified image or face and returns face IDs and boundaries in descending order of similarity.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation searches for faces that are similar to the face within the largest bounding box in each input image.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateFacesSearchingTaskRequest
 * @return CreateFacesSearchingTaskResponse
 */
async function createFacesSearchingTask(request: CreateFacesSearchingTaskRequest): CreateFacesSearchingTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createFacesSearchingTaskWithOptions(request, runtime);
}

model CreateFigureClusteringTaskRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='dataset001'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test": "val1"}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateFigureClusteringTaskShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='dataset001'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test": "val1"}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateFigureClusteringTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16****'),
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
  taskId?: string(name='TaskId', description='The task ID.', example='formatconvert-00bec802-073a-4b61-ba3b-39bc****'),
}

model CreateFigureClusteringTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateFigureClusteringTaskResponseBody(name='body'),
}

/**
 * @summary Creates a face clustering task to cluster faces of different persons in images by person based on the intelligent algorithms.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the CreateBinding operation or manually by calling the IndexFileMeta or BatchIndexFileMeta operation.
 * *   Each call to the operation incrementally processes metadata in the dataset. You can regularly call this operation to process incremental files.
 *     After the clustering task is completed, you can call the GetFigureCluster or BatchGetFigureCluster  operation to query information about a specific cluster. You can also call the QueryFigureClusters operation to query all face clusters of the specified dataset.
 * *   Removing image information from the dataset causes changes to face clusters. When images that contain all faces in a cluster are removed, the cluster is deleted.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task. If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateFigureClusteringTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateFigureClusteringTaskResponse
 */
async function createFigureClusteringTaskWithOptions(tmpReq: CreateFigureClusteringTaskRequest, runtime: $RuntimeOptions): CreateFigureClusteringTaskResponse {
  tmpReq.validate();
  var request = new CreateFigureClusteringTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateFigureClusteringTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a face clustering task to cluster faces of different persons in images by person based on the intelligent algorithms.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the CreateBinding operation or manually by calling the IndexFileMeta or BatchIndexFileMeta operation.
 * *   Each call to the operation incrementally processes metadata in the dataset. You can regularly call this operation to process incremental files.
 *     After the clustering task is completed, you can call the GetFigureCluster or BatchGetFigureCluster  operation to query information about a specific cluster. You can also call the QueryFigureClusters operation to query all face clusters of the specified dataset.
 * *   Removing image information from the dataset causes changes to face clusters. When images that contain all faces in a cluster are removed, the cluster is deleted.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task. If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateFigureClusteringTaskRequest
 * @return CreateFigureClusteringTaskResponse
 */
async function createFigureClusteringTask(request: CreateFigureClusteringTaskRequest): CreateFigureClusteringTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createFigureClusteringTaskWithOptions(request, runtime);
}

model CreateFigureClustersMergingTaskRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. For more information, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='dataset001'),
  from?: string(name='From', description='The ID of the source group. You must specify either From or Froms, but not both.', example='Cluster-2ab85905-23ba-4632-b2d8-1c21cfe****'),
  froms?: [ string ](name='Froms', description='The IDs of source clustering groups. You must specify either From or Froms, but not both. You can specify up to 100 task IDs.'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
  tags?: map[string]any(name='Tags', description='The custom tags, which can be used to search for and filter asynchronous tasks.', example='{"key":"val"}'),
  to?: string(name='To', description='The ID of the destination clustering group.

This parameter is required.', example='Cluster-4a3a71c1-c092-4788-8826-2f65d17****'),
  userData?: string(name='UserData', description='The custom data, which is returned in an asynchronous notification and facilitates notification management. The maximum length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateFigureClustersMergingTaskShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. For more information, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='dataset001'),
  from?: string(name='From', description='The ID of the source group. You must specify either From or Froms, but not both.', example='Cluster-2ab85905-23ba-4632-b2d8-1c21cfe****'),
  fromsShrink?: string(name='Froms', description='The IDs of source clustering groups. You must specify either From or Froms, but not both. You can specify up to 100 task IDs.'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
  tagsShrink?: string(name='Tags', description='The custom tags, which can be used to search for and filter asynchronous tasks.', example='{"key":"val"}'),
  to?: string(name='To', description='The ID of the destination clustering group.

This parameter is required.', example='Cluster-4a3a71c1-c092-4788-8826-2f65d17****'),
  userData?: string(name='UserData', description='The custom data, which is returned in an asynchronous notification and facilitates notification management. The maximum length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateFigureClustersMergingTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16E****'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FF****'),
  taskId?: string(name='TaskId', description='The task ID.', example='92376fbb-171f-4259-913f-705f7ee0****'),
}

model CreateFigureClustersMergingTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateFigureClustersMergingTaskResponseBody(name='body'),
}

/**
 * @summary Merges two or more face clustering groups into one face clustering group.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have called the [CreateFigureClusteringTask](https://help.aliyun.com/document_detail/478180.html) operation to cluster all faces in the dataset.
 * *   If you merge unrelated groups, the feature values of the target groups are affected. As a result, the incremental data may be inaccurately grouped when you create a face clustering task.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateFigureClustersMergingTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateFigureClustersMergingTaskResponse
 */
async function createFigureClustersMergingTaskWithOptions(tmpReq: CreateFigureClustersMergingTaskRequest, runtime: $RuntimeOptions): CreateFigureClustersMergingTaskResponse {
  tmpReq.validate();
  var request = new CreateFigureClustersMergingTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.froms)) {
    request.fromsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.froms, 'Froms', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.from)) {
    query['From'] = request.from;
  }
  if (!$isNull(request.fromsShrink)) {
    query['Froms'] = request.fromsShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.to)) {
    query['To'] = request.to;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateFigureClustersMergingTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Merges two or more face clustering groups into one face clustering group.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have called the [CreateFigureClusteringTask](https://help.aliyun.com/document_detail/478180.html) operation to cluster all faces in the dataset.
 * *   If you merge unrelated groups, the feature values of the target groups are affected. As a result, the incremental data may be inaccurately grouped when you create a face clustering task.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateFigureClustersMergingTaskRequest
 * @return CreateFigureClustersMergingTaskResponse
 */
async function createFigureClustersMergingTask(request: CreateFigureClustersMergingTaskRequest): CreateFigureClustersMergingTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createFigureClustersMergingTaskWithOptions(request, runtime);
}

model CreateFileCompressionTaskRequest {
  compressedFormat?: string(name='CompressedFormat', description='The format of the package. Default value: zip.

>  Only the ZIP format is supported.', example='zip'),
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceManifestURI?: string(name='SourceManifestURI', description='The OSS URI of the inventory object that contains the objects to compress. The inventory object stores the objects to compress by using the same data structure of the Sources parameter in the JSON format. This parameter is suitable for specifying a large number of objects to compress.

>  You must specify this parameter or the `Sources` parameter. The `URI` parameter is required and the `Alias` parameter is optional. You can specify up to 80,000 compression rule by using SourceManifestURI in one single call to the operation. The following line provides an example of the content within an inventory object.

    [{"URI":"oss://<bucket>/<object>", "Alias":"/new-dir/new-name"}]', example='oss://test-bucket/test-object.json'),
  sources?: [ 
    {
      alias?: string(name='Alias', description='Specifies the path of the object in the package, or renames the object in the package.

*   Leave this parameter empty to retain the original directory structure of the object in the package. For example, if the object is stored at `oss://test-bucket/test-dir/test-object.doc` and you do not specify this parameter, the path of the object in the package is `/test-dir/test-object.doc`.
*   Rename the object. For example, if the object is stored at `oss://test-bucket/test-object.jpg` and you set the **Alias** parameter to `test-rename-object.jpg`, the name of the object in the package is `test-rename-object.jpg`.
*   Specify a different path for the object in the package. For example, if the directory to be packed is `oss://test-bucket/test-dir/` and you set the **Alias** parameter to `/new-dir/`, all objects in the directory are placed in the `/new-dir/` path in the package.
*   Set the parameter to `/` to remove the original directory structure.

>  Duplicate object names may cause a failure in extracting the objects from the package, depending on the packing tool that you use. We recommend that you avoid using duplicate object names when you rename objects in the packing task.', example='/new-dir/'),
      mode?: string(name='Mode', description='The object matching rule. Valid values: `fullname` and `prefix`. Default value: `prefix`

*   `prefix`: matches objects by object name prefix.
*   `fullname`: exactly matches one single object by its full object name.', example='fullname'),
      URI?: string(name='URI', description='The OSS URI of the object or directory.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is a directory or object:

When you pack a directory, `${Object}` is the path of the directory.

*   When you pack an object, `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
    }
  ](name='Sources', description='The objects to be packed and packing rules.

>  You must specify this parameter or the SourceManifestURI parameter. The Sources parameter can hold up to 100 packing rules. If you want to include more than 100 packing rules, use the SourceManifestURI parameter.'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the package. The object name part in the URI is used as the name of the package. Example: `name.zip`.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://test-bucket/test-target-object.zip'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "testuid","Name": "test-user","Avatar": "http://test.com/testuid"}'),
}

model CreateFileCompressionTaskShrinkRequest {
  compressedFormat?: string(name='CompressedFormat', description='The format of the package. Default value: zip.

>  Only the ZIP format is supported.', example='zip'),
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceManifestURI?: string(name='SourceManifestURI', description='The OSS URI of the inventory object that contains the objects to compress. The inventory object stores the objects to compress by using the same data structure of the Sources parameter in the JSON format. This parameter is suitable for specifying a large number of objects to compress.

>  You must specify this parameter or the `Sources` parameter. The `URI` parameter is required and the `Alias` parameter is optional. You can specify up to 80,000 compression rule by using SourceManifestURI in one single call to the operation. The following line provides an example of the content within an inventory object.

    [{"URI":"oss://<bucket>/<object>", "Alias":"/new-dir/new-name"}]', example='oss://test-bucket/test-object.json'),
  sourcesShrink?: string(name='Sources', description='The objects to be packed and packing rules.

>  You must specify this parameter or the SourceManifestURI parameter. The Sources parameter can hold up to 100 packing rules. If you want to include more than 100 packing rules, use the SourceManifestURI parameter.'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the package. The object name part in the URI is used as the name of the package. Example: `name.zip`.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://test-bucket/test-target-object.zip'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "testuid","Name": "test-user","Avatar": "http://test.com/testuid"}'),
}

model CreateFileCompressionTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16Es*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='EC564A9A-BA5C-4499-A087-D9B9E76E*****'),
  taskId?: string(name='TaskId', description='The task ID.', example='FileCompression-3579a380-6f7a-4a9d-b9d2-65996*****'),
}

model CreateFileCompressionTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateFileCompressionTaskResponseBody(name='body'),
}

/**
 * @summary Creates a file packing task.
 *
 * @description >  The operation is in public preview. For any inquires, join our DingTalk group (ID: 88490020073) and share your questions with us.
 * >  The operation supports file packing only. Compression support will be added later.
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   A call to the operation can pack up to 80,000 objects into a package.
 * *   The total size of all objects to be packed into a package cannot exceed 200 GB.
 * *   The operation can pack only Standard objects in Object Storage Service (OSS). To pack an object in another storage class, you must first [convert the storage class of the object](https://help.aliyun.com/document_detail/90090.html).
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateFileCompressionTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateFileCompressionTaskResponse
 */
async function createFileCompressionTaskWithOptions(tmpReq: CreateFileCompressionTaskRequest, runtime: $RuntimeOptions): CreateFileCompressionTaskResponse {
  tmpReq.validate();
  var request = new CreateFileCompressionTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.sources)) {
    request.sourcesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.sources, 'Sources', 'json');
  }
  var query = {};
  if (!$isNull(request.compressedFormat)) {
    query['CompressedFormat'] = request.compressedFormat;
  }
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceManifestURI)) {
    query['SourceManifestURI'] = request.sourceManifestURI;
  }
  if (!$isNull(request.sourcesShrink)) {
    query['Sources'] = request.sourcesShrink;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateFileCompressionTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a file packing task.
 *
 * @description >  The operation is in public preview. For any inquires, join our DingTalk group (ID: 88490020073) and share your questions with us.
 * >  The operation supports file packing only. Compression support will be added later.
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   A call to the operation can pack up to 80,000 objects into a package.
 * *   The total size of all objects to be packed into a package cannot exceed 200 GB.
 * *   The operation can pack only Standard objects in Object Storage Service (OSS). To pack an object in another storage class, you must first [convert the storage class of the object](https://help.aliyun.com/document_detail/90090.html).
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateFileCompressionTaskRequest
 * @return CreateFileCompressionTaskResponse
 */
async function createFileCompressionTask(request: CreateFileCompressionTaskRequest): CreateFileCompressionTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createFileCompressionTaskWithOptions(request, runtime);
}

model CreateFileUncompressionTaskRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  password?: string(name='Password', description='The password that protects the package.', example='123456'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  selectedFiles?: [ string ](name='SelectedFiles', description='The files to extract. If you do not specify this parameter, the entire package is decompressed.'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the package.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://imm-apitest-fxf2/name.zip'),
  targetURI?: string(name='TargetURI', description='The OSS URI to which you want to extract files from the package or decompress the entire package.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-dir/'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateFileUncompressionTaskShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  password?: string(name='Password', description='The password that protects the package.', example='123456'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  selectedFilesShrink?: string(name='SelectedFiles', description='The files to extract. If you do not specify this parameter, the entire package is decompressed.'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the package.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://imm-apitest-fxf2/name.zip'),
  targetURI?: string(name='TargetURI', description='The OSS URI to which you want to extract files from the package or decompress the entire package.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-dir/'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateFileUncompressionTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16Es*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='EC564A9A-BA5C-4499-A087-D9B9E76E*****'),
  taskId?: string(name='TaskId', description='The task ID.', example='FileUncompression-16ab5dd6-af02-480e-9ed7-a8d51b1*****'),
}

model CreateFileUncompressionTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateFileUncompressionTaskResponseBody(name='body'),
}

/**
 * @summary Extracts the specified files from a ZIP, RAR, or 7z package to the specified directory or decompresses the entire package.
 *
 * @description >  The operation is in public preview. For any inquires, join our DingTalk group (ID: 88490020073) and share your questions with us.
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation supports a package that contains up to 80,000 files.
 * *   The operation supports ZIP or RAR packages up to 200 GB in size, or 7z packages up to 50 GB in size.
 * *   The operation extracts files in streams to the specified directory. If the file extraction task is interrupted by a corrupt file, files that have been extracted are not deleted.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateFileUncompressionTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateFileUncompressionTaskResponse
 */
async function createFileUncompressionTaskWithOptions(tmpReq: CreateFileUncompressionTaskRequest, runtime: $RuntimeOptions): CreateFileUncompressionTaskResponse {
  tmpReq.validate();
  var request = new CreateFileUncompressionTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.selectedFiles)) {
    request.selectedFilesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.selectedFiles, 'SelectedFiles', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.password)) {
    query['Password'] = request.password;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.selectedFilesShrink)) {
    query['SelectedFiles'] = request.selectedFilesShrink;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateFileUncompressionTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Extracts the specified files from a ZIP, RAR, or 7z package to the specified directory or decompresses the entire package.
 *
 * @description >  The operation is in public preview. For any inquires, join our DingTalk group (ID: 88490020073) and share your questions with us.
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation supports a package that contains up to 80,000 files.
 * *   The operation supports ZIP or RAR packages up to 200 GB in size, or 7z packages up to 50 GB in size.
 * *   The operation extracts files in streams to the specified directory. If the file extraction task is interrupted by a corrupt file, files that have been extracted are not deleted.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateFileUncompressionTaskRequest
 * @return CreateFileUncompressionTaskResponse
 */
async function createFileUncompressionTask(request: CreateFileUncompressionTaskRequest): CreateFileUncompressionTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createFileUncompressionTaskWithOptions(request, runtime);
}

model CreateImageModerationTaskRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='The authorization chain. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  interval?: long(name='Interval', description='The time interval between two consecutive frames in a GIF or long image. Default value: 1.', example='2'),
  maxFrames?: long(name='MaxFrames', description='The maximum number of frames that can be captured in a GIF or long image. Default value: 1.', example='10'),
  notification?: Notification(name='Notification', description='The notification settings. For more information, click Notification. For information about the asynchronous notification format, see [Asynchronous notification format](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  scenes?: [ string ](name='Scenes', description='The scenarios in which you want to apply the image moderation task.'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image.

Specify the value in the `oss://<Bucket>/<Object>` format. `<Bucket>` specifies the name of the OSS bucket that resides in the same region as the current project. `<Object>` specifies the complete path to the image file that has an extension.

This parameter is required.', example='oss://test-bucket/test-object'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test": "val1"}'),
  userData?: string(name='UserData', description='The user data, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the user data is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateImageModerationTaskShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='The authorization chain. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  interval?: long(name='Interval', description='The time interval between two consecutive frames in a GIF or long image. Default value: 1.', example='2'),
  maxFrames?: long(name='MaxFrames', description='The maximum number of frames that can be captured in a GIF or long image. Default value: 1.', example='10'),
  notificationShrink?: string(name='Notification', description='The notification settings. For more information, click Notification. For information about the asynchronous notification format, see [Asynchronous notification format](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  scenesShrink?: string(name='Scenes', description='The scenarios in which you want to apply the image moderation task.'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image.

Specify the value in the `oss://<Bucket>/<Object>` format. `<Bucket>` specifies the name of the OSS bucket that resides in the same region as the current project. `<Object>` specifies the complete path to the image file that has an extension.

This parameter is required.', example='oss://test-bucket/test-object'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test": "val1"}'),
  userData?: string(name='UserData', description='The user data, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the user data is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateImageModerationTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='2E6-1I0FGn0zFnl5AflRfhzClma*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
  taskId?: string(name='TaskId', description='The task ID.', example='ImageModeration-179ef4f8-d583-4f0c-a293-7c0889c*****'),
}

model CreateImageModerationTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateImageModerationTaskResponseBody(name='body'),
}

/**
 * @summary Creates an image moderation task to ensure image content compliance. You can call this operation to identify inappropriate content, such as pornography, violence, terrorism, politically sensitive content, undesirable scenes, unauthorized logos, and non-compliant ads.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The image for which you want to create a content moderation task must meet the following requirements:
 *     *   The image URL supports the HTTP and HTTPS protocols.
 *     *   The image is in one of the following formats: PNG, JPG, JPEG, BMP, GIF, and WebP
 *     *   The image size is limited to 20 MB for synchronous and asynchronous calls, with a maximum height or width of 30,000 pixels. The total number of pixels cannot exceed 250 million. GIF images are limited to 4,194,304 pixels, with a maximum height or width of 30,000 pixels.
 *     *   The image download time is limited to 3 seconds. If the download takes longer, a timeout error occurs.
 *     *   To ensure effective moderation, we recommend that you submit an image with dimensions of at least 256 × 256 pixels.
 *     *   The response time of the CreateImageModerationTask operation varies based on the duration of the image download. Make sure that the image is stored in a stable and reliable service. We recommend that you store images on Alibaba Cloud Object Storage Service (OSS) or cache them on Alibaba Cloud CDN.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478241.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can also obtain information about the task based on notifications.
 * >  The detection result is sent as an asynchronous notification. The Suggestion field of the notification can have one of the following values:
 * *   pass: No non-compliant content is found.
 * *   block: Non-compliant content is detected. The Categories field value indicates the non-compliance categories. For more information, see Content moderation results.
 * *   review: A manual review is needed. After the manual review is finished, another asynchronous notification is sent to inform you about the review result. >
 *
 * @param tmpReq CreateImageModerationTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateImageModerationTaskResponse
 */
async function createImageModerationTaskWithOptions(tmpReq: CreateImageModerationTaskRequest, runtime: $RuntimeOptions): CreateImageModerationTaskResponse {
  tmpReq.validate();
  var request = new CreateImageModerationTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.scenes)) {
    request.scenesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.scenes, 'Scenes', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.interval)) {
    query['Interval'] = request.interval;
  }
  if (!$isNull(request.maxFrames)) {
    query['MaxFrames'] = request.maxFrames;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.scenesShrink)) {
    query['Scenes'] = request.scenesShrink;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateImageModerationTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates an image moderation task to ensure image content compliance. You can call this operation to identify inappropriate content, such as pornography, violence, terrorism, politically sensitive content, undesirable scenes, unauthorized logos, and non-compliant ads.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The image for which you want to create a content moderation task must meet the following requirements:
 *     *   The image URL supports the HTTP and HTTPS protocols.
 *     *   The image is in one of the following formats: PNG, JPG, JPEG, BMP, GIF, and WebP
 *     *   The image size is limited to 20 MB for synchronous and asynchronous calls, with a maximum height or width of 30,000 pixels. The total number of pixels cannot exceed 250 million. GIF images are limited to 4,194,304 pixels, with a maximum height or width of 30,000 pixels.
 *     *   The image download time is limited to 3 seconds. If the download takes longer, a timeout error occurs.
 *     *   To ensure effective moderation, we recommend that you submit an image with dimensions of at least 256 × 256 pixels.
 *     *   The response time of the CreateImageModerationTask operation varies based on the duration of the image download. Make sure that the image is stored in a stable and reliable service. We recommend that you store images on Alibaba Cloud Object Storage Service (OSS) or cache them on Alibaba Cloud CDN.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478241.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can also obtain information about the task based on notifications.
 * >  The detection result is sent as an asynchronous notification. The Suggestion field of the notification can have one of the following values:
 * *   pass: No non-compliant content is found.
 * *   block: Non-compliant content is detected. The Categories field value indicates the non-compliance categories. For more information, see Content moderation results.
 * *   review: A manual review is needed. After the manual review is finished, another asynchronous notification is sent to inform you about the review result. >
 *
 * @param request CreateImageModerationTaskRequest
 * @return CreateImageModerationTaskResponse
 */
async function createImageModerationTask(request: CreateImageModerationTaskRequest): CreateImageModerationTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createImageModerationTaskWithOptions(request, runtime);
}

model CreateImageSplicingTaskRequest {
  align?: long(name='Align', description='The width or height with which the input images must align. Valid values: 1 to 4096. Unit: px.

*   If you set **Direction** to `vertical`, this parameter specifies the width with which the input images must align.
*   If you set **Direction** to `horizontal`, this parameter specifies the height with which the input images must align.

>  If you do not specify this parameter, the width or height of the first input image is used.', example='192'),
  backgroundColor?: string(name='BackgroundColor', description='The padding color of the spaces specified by `Padding` and `Margin`. Colors encoded in the `#FFFFFF` format and colors that are related to preset keywords such as `red` and `alpha` are supported.', example='red'),
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  direction?: string(name='Direction', description='The splicing method. Valid values:

*   vertical (default): All input images are vertically aligned and have the same width.
*   horizontal: All input images are horizontally aligned and have the same height.', example='vertical'),
  imageFormat?: string(name='ImageFormat', description='The compression format of the output image. Valid values:

*   jpg (default)
*   png
*   webp', example='jpg'),
  margin?: long(name='Margin', description='The empty space or border around the edges of the output image. Default value: 0. Unit: px.', example='2'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  padding?: long(name='Padding', description='The space between component images in the output image. Default value: 0. Unit: px.', example='2'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  quality?: long(name='Quality', description='The compression quality of the output image. This parameter takes effect only for JPG and WebP images. Valid values: 0 to 100. Default value: 80.', example='80'),
  scaleType?: string(name='ScaleType', description='The scaling mode of the input images that are vertically or horizontally aligned. Valid values:

*   fit (default): Input images are scaled proportionally, and black edges are not retained.
*   stretch: Input images are stretched to fill the space.
*   horizon: Input images are horizontally stretched.
*   vertical: Input images are vertically stretched.', example='stretch'),
  sources?: [ 
    {
      rotate?: long(name='Rotate', description='The rotation angle. Valid values:

*   0 (default)
*   90
*   180
*   270', example='90'),
      URI?: string(name='URI', description='The Object Storage Service (OSS) bucket in which you store the input images.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the input images that have an extension.

The following image formats are supported: jpg and png.

This parameter is required.', example='oss://examplebucket/sampleobject.jpg'),
    }
  ](name='Sources', description='The input images. The images are sliced in the order of the input image URIs.

This parameter is required.'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  targetURI?: string(name='TargetURI', description='The OSS bucket in which you want to store the output image.

Specify the value in the oss://${bucketname}/${objectname} format. ${bucketname} specifies the name of the OSS bucket that resides in the same region as the current project. ${objectname} specifies the path to the output image.

This parameter is required.', example='oss://examplebucket/outputImage.jpg'),
  userData?: string(name='UserData', description='The user data, which is returned as asynchronous notifications to help manage notifications within your system. The maximum length of the user data is 2,048 bytes.', example='test-data'),
}

model CreateImageSplicingTaskShrinkRequest {
  align?: long(name='Align', description='The width or height with which the input images must align. Valid values: 1 to 4096. Unit: px.

*   If you set **Direction** to `vertical`, this parameter specifies the width with which the input images must align.
*   If you set **Direction** to `horizontal`, this parameter specifies the height with which the input images must align.

>  If you do not specify this parameter, the width or height of the first input image is used.', example='192'),
  backgroundColor?: string(name='BackgroundColor', description='The padding color of the spaces specified by `Padding` and `Margin`. Colors encoded in the `#FFFFFF` format and colors that are related to preset keywords such as `red` and `alpha` are supported.', example='red'),
  credentialConfigShrink?: string(name='CredentialConfig', description='The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  direction?: string(name='Direction', description='The splicing method. Valid values:

*   vertical (default): All input images are vertically aligned and have the same width.
*   horizontal: All input images are horizontally aligned and have the same height.', example='vertical'),
  imageFormat?: string(name='ImageFormat', description='The compression format of the output image. Valid values:

*   jpg (default)
*   png
*   webp', example='jpg'),
  margin?: long(name='Margin', description='The empty space or border around the edges of the output image. Default value: 0. Unit: px.', example='2'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  padding?: long(name='Padding', description='The space between component images in the output image. Default value: 0. Unit: px.', example='2'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  quality?: long(name='Quality', description='The compression quality of the output image. This parameter takes effect only for JPG and WebP images. Valid values: 0 to 100. Default value: 80.', example='80'),
  scaleType?: string(name='ScaleType', description='The scaling mode of the input images that are vertically or horizontally aligned. Valid values:

*   fit (default): Input images are scaled proportionally, and black edges are not retained.
*   stretch: Input images are stretched to fill the space.
*   horizon: Input images are horizontally stretched.
*   vertical: Input images are vertically stretched.', example='stretch'),
  sourcesShrink?: string(name='Sources', description='The input images. The images are sliced in the order of the input image URIs.

This parameter is required.'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  targetURI?: string(name='TargetURI', description='The OSS bucket in which you want to store the output image.

Specify the value in the oss://${bucketname}/${objectname} format. ${bucketname} specifies the name of the OSS bucket that resides in the same region as the current project. ${objectname} specifies the path to the output image.

This parameter is required.', example='oss://examplebucket/outputImage.jpg'),
  userData?: string(name='UserData', description='The user data, which is returned as asynchronous notifications to help manage notifications within your system. The maximum length of the user data is 2,048 bytes.', example='test-data'),
}

model CreateImageSplicingTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='10C-1R6p7Km0H5Ieg38LKXTIvw*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='94D6F994-E298-037E-8E8B-0090F27*****'),
  taskId?: string(name='TaskId', description='The task ID.', example='ImageSplicing-537cc157-7645-444a-a631-c8db4d02*****'),
}

model CreateImageSplicingTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateImageSplicingTaskResponseBody(name='body'),
}

/**
 * @summary Creates an image splicing task. You can call this operation to splice multiple images into one based on a given rule and save the final image into an Object Storage Service (OSS) bucket.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 * *   You can call this operation to merge up to 10 images. Each side of an image cannot exceed 32,876 pixels, and the total number of pixels of the image cannot exceed 1 billion.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateImageSplicingTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateImageSplicingTaskResponse
 */
async function createImageSplicingTaskWithOptions(tmpReq: CreateImageSplicingTaskRequest, runtime: $RuntimeOptions): CreateImageSplicingTaskResponse {
  tmpReq.validate();
  var request = new CreateImageSplicingTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.sources)) {
    request.sourcesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.sources, 'Sources', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.align)) {
    query['Align'] = request.align;
  }
  if (!$isNull(request.backgroundColor)) {
    query['BackgroundColor'] = request.backgroundColor;
  }
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.direction)) {
    query['Direction'] = request.direction;
  }
  if (!$isNull(request.imageFormat)) {
    query['ImageFormat'] = request.imageFormat;
  }
  if (!$isNull(request.margin)) {
    query['Margin'] = request.margin;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.padding)) {
    query['Padding'] = request.padding;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.quality)) {
    query['Quality'] = request.quality;
  }
  if (!$isNull(request.scaleType)) {
    query['ScaleType'] = request.scaleType;
  }
  if (!$isNull(request.sourcesShrink)) {
    query['Sources'] = request.sourcesShrink;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateImageSplicingTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates an image splicing task. You can call this operation to splice multiple images into one based on a given rule and save the final image into an Object Storage Service (OSS) bucket.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 * *   You can call this operation to merge up to 10 images. Each side of an image cannot exceed 32,876 pixels, and the total number of pixels of the image cannot exceed 1 billion.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateImageSplicingTaskRequest
 * @return CreateImageSplicingTaskResponse
 */
async function createImageSplicingTask(request: CreateImageSplicingTaskRequest): CreateImageSplicingTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createImageSplicingTaskWithOptions(request, runtime);
}

model CreateImageToPDFTaskRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sources?: [ 
    {
      rotate?: long(name='Rotate', description='The rotation angle. Valid values:

*   0 (default)
*   90
*   180
*   270', example='90'),
      URI?: string(name='URI', description='The OSS URI of the input image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

The operation supports the following image formats: JPG, JP2, PNG, TIFF, WebP, BMP, and SVG.

This parameter is required.', example='oss://examplebucket/sampleobject.jpg'),
    }
  ](name='Sources', description='The list of images. The sequence of image URIs in the list determines the order in which they are converted.

This parameter is required.'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the output file.

Specify the OSS URI in the oss://${bucketname}/${objectname} format, where ${bucketname} is the name of the bucket in the same region as the current project and ${objectname} is the path of the object with the extension included.

This parameter is required.', example='oss://examplebucket/outputDocument.pdf'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='test-data'),
}

model CreateImageToPDFTaskShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sourcesShrink?: string(name='Sources', description='The list of images. The sequence of image URIs in the list determines the order in which they are converted.

This parameter is required.'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  targetURI?: string(name='TargetURI', description='The OSS URI of the output file.

Specify the OSS URI in the oss://${bucketname}/${objectname} format, where ${bucketname} is the name of the bucket in the same region as the current project and ${objectname} is the path of the object with the extension included.

This parameter is required.', example='oss://examplebucket/outputDocument.pdf'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='test-data'),
}

model CreateImageToPDFTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16Es*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='EC564A9A-BA5C-4499-A087-D9B9E76E*****'),
  taskId?: string(name='TaskId', description='The task ID.', example='ImageToPDF-cbe6ae3e-f8dc-4566-9da7-535d5d*****'),
}

model CreateImageToPDFTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateImageToPDFTaskResponseBody(name='body'),
}

/**
 * @summary Converts multiple images into one single PDF file and stores the PDF file to the specified path in Object Storage Service (OSS).
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 * *   You can specify up to 100 images in a call to the operation.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateImageToPDFTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateImageToPDFTaskResponse
 */
async function createImageToPDFTaskWithOptions(tmpReq: CreateImageToPDFTaskRequest, runtime: $RuntimeOptions): CreateImageToPDFTaskResponse {
  tmpReq.validate();
  var request = new CreateImageToPDFTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.sources)) {
    request.sourcesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.sources, 'Sources', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourcesShrink)) {
    query['Sources'] = request.sourcesShrink;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateImageToPDFTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Converts multiple images into one single PDF file and stores the PDF file to the specified path in Object Storage Service (OSS).
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 * *   You can specify up to 100 images in a call to the operation.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateImageToPDFTaskRequest
 * @return CreateImageToPDFTaskResponse
 */
async function createImageToPDFTask(request: CreateImageToPDFTaskRequest): CreateImageToPDFTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createImageToPDFTaskWithOptions(request, runtime);
}

model CreateLocationDateClusteringTaskRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  dateOptions?: {
    gapDays?: long(name='GapDays', description='The maximum number of days allowed in a gap for a single spatiotemporal cluster. Valid values: 0 to 99999.

For example, if travel photos were produced on March 4, 5, and 7, 2024, but not on Marh 6, 2024, and you set the parameter to 1, IMM considers the travel spanning the date range from March 4, 2024 to March 7, 2024 and includes photos within the data range in the same cluster.````

We recommend that you set the parameter to a value within the range from 0 to 3.

This parameter is required.', example='1'),
    maxDays?: long(name='MaxDays', description='The maximum number of days that a single spatiotemporal cluster can span. Valid values: 1 to 99999. IMM does not create a cluster that spans more than the maximum number of days.

For example, if you want to create travel photo clusters, you may want to exclude photos that were taken within 15 consecutive days in the same city, because it is likely that these photos were not taken during a travel. In this case, you can set the parameter to 15 to exclude this time range and location from the clustering task.

This parameter is required.', example='15'),
    minDays?: long(name='MinDays', description='The minimum number of days that a single spatiotemporal cluster can span. Valid values: 1 to 99999. IMM does not create a cluster that spans less than the minimum number of days.

For example, if you do not want a one-day tour cluster, you can set the parameter to 2.

This parameter is required.', example='1'),
  }(name='DateOptions', description='The date configurations for clustering.

>  Adjusting these configurations affects existing spatiotemporal clusters for the dataset.

This parameter is required.'),
  locationOptions?: {
    locationDateClusterLevels?: [ string ](name='LocationDateClusterLevels', description='The administrative division levels. You can specify multiple administrative division levels.

For example, you uploaded photos that were taken from March 3, 2024 to March 5, 2024 in Hangzhou and photos that were taken from March 6, 2024 to March 8, 2024 in Jiaxing. When you call the operation and set the parameter to `["city", "province"]`, the following spatiotemporal clusters are created from these photos:

*   March 3, 2024 to March 5, 2024, Hangzhou
*   March 6, 2024 to March 8, 2024, Jiaxing
*   March 3, 2024 to March 8, 2024, Zhejiang

This parameter is required.'),
  }(name='LocationOptions', description='The geolocation configurations for clustering.

>  Adjusting these configurations affects existing spatiotemporal clusters for the dataset.

This parameter is required.'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='test-data'),
}

model CreateLocationDateClusteringTaskShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  dateOptionsShrink?: string(name='DateOptions', description='The date configurations for clustering.

>  Adjusting these configurations affects existing spatiotemporal clusters for the dataset.

This parameter is required.'),
  locationOptionsShrink?: string(name='LocationOptions', description='The geolocation configurations for clustering.

>  Adjusting these configurations affects existing spatiotemporal clusters for the dataset.

This parameter is required.'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='test-data'),
}

model CreateLocationDateClusteringTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='25B-1W2ChgujA3Q8MbBY6mSp2mh****'),
  requestId?: string(name='RequestId', description='The request ID.', example='B121940C-9794-4EE3-8D6E-F8EC525F****'),
  taskId?: string(name='TaskId', description='The task ID.', example='LocationDateClustering-c10dce07-1de7-4da7-abee-1a3aba7****'),
}

model CreateLocationDateClusteringTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateLocationDateClusteringTaskResponseBody(name='body'),
}

/**
 * @summary Creates a spatiotemporal clustering task to cluster photos and videos based on geolocation and time information. Spatiotemporal clustering allows you to group photos and videos taken during a travel or at different places by their spatial and temporal similarity. Based on spatiotemporal clustering, you can develop media capabilities such as media file categorization, photo collections, and image and video-based stories.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Each call to the operation incrementally processes metadata in the dataset.****`` You can regularly call this operation to process incremental files.
 * *   After a spatiotemporal clustering task is complete, you can call the [QueryLocationDateClusters](https://help.aliyun.com/document_detail/478189.html) operation to query the spatiotemporal clustering result.
 * *   Removing metadata from a dataset does not affect existing spatiotemporal clusters for the dataset. To delete a spatiotemporal cluster, call the [DeleteLocationDateCluster](https://help.aliyun.com/document_detail/478191.html) operation.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateLocationDateClusteringTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateLocationDateClusteringTaskResponse
 */
async function createLocationDateClusteringTaskWithOptions(tmpReq: CreateLocationDateClusteringTaskRequest, runtime: $RuntimeOptions): CreateLocationDateClusteringTaskResponse {
  tmpReq.validate();
  var request = new CreateLocationDateClusteringTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.dateOptions)) {
    request.dateOptionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.dateOptions, 'DateOptions', 'json');
  }
  if (!$isNull(tmpReq.locationOptions)) {
    request.locationOptionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.locationOptions, 'LocationOptions', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.dateOptionsShrink)) {
    query['DateOptions'] = request.dateOptionsShrink;
  }
  if (!$isNull(request.locationOptionsShrink)) {
    query['LocationOptions'] = request.locationOptionsShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateLocationDateClusteringTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a spatiotemporal clustering task to cluster photos and videos based on geolocation and time information. Spatiotemporal clustering allows you to group photos and videos taken during a travel or at different places by their spatial and temporal similarity. Based on spatiotemporal clustering, you can develop media capabilities such as media file categorization, photo collections, and image and video-based stories.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Each call to the operation incrementally processes metadata in the dataset.****`` You can regularly call this operation to process incremental files.
 * *   After a spatiotemporal clustering task is complete, you can call the [QueryLocationDateClusters](https://help.aliyun.com/document_detail/478189.html) operation to query the spatiotemporal clustering result.
 * *   Removing metadata from a dataset does not affect existing spatiotemporal clusters for the dataset. To delete a spatiotemporal cluster, call the [DeleteLocationDateCluster](https://help.aliyun.com/document_detail/478191.html) operation.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateLocationDateClusteringTaskRequest
 * @return CreateLocationDateClusteringTaskResponse
 */
async function createLocationDateClusteringTask(request: CreateLocationDateClusteringTaskRequest): CreateLocationDateClusteringTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createLocationDateClusteringTaskWithOptions(request, runtime);
}

model CreateMediaConvertTaskRequest {
  alignmentIndex?: int32(name='AlignmentIndex', description='The sequence number of the main media file in the concatenation list of media files. The main media file provides the default transcoding settings, such as the resolution and the frame rate, for videos and audios. Default value: `0`. A value of `0` specifies that the main media file is aligned with the first media file in the concatenation list.'),
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notification?: Notification(name='Notification', description='The notification settings. For more information, see "Notification". For information about the asynchronous notification format, see [Asynchronous notification format](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  sources?: [ 
    {
      alignMode?: string(name='AlignMode'),
      attached?: boolean(name='Attached'),
      disableAudio?: boolean(name='DisableAudio'),
      disableVideo?: boolean(name='DisableVideo'),
      duration?: double(name='Duration', description='The transcoding duration of the media. Unit: seconds. Default value: 0. A value of 0 specifies that the transcoding duration lasts until the end of the video.', example='0'),
      startTime?: double(name='StartTime', description='The start time of the media transcoding task. Unit: seconds. Valid values:

*   0 (default): starts transcoding when the media starts playing.
*   n: starts transcoding n seconds after the media starts playing. n must be greater than 0.', example='0'),
      subtitles?: [ 
        {
          language?: string(name='Language', description='The subtitle language. If you specify this parameter, comply with the ISO 639-2 standard. This parameter is left empty by default.', example='eng'),
          timeOffset?: double(name='TimeOffset', description='The time offset of the subtitle. Unit: seconds. Default value: 0.', example='10.5'),
          URI?: string(name='URI', description='The URI of the Object Storage Service (OSS) bucket. Specify the value in the `oss://${Bucket}/${Object}` format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region with the current project. `${Object}` specifies the complete path to the file whose name contains an extension. The following subtitle formats are supported: srt, vtt, mov_text, ass, dvd_sub, and pgs.', example='oss://test-bucket/subtitles'),
        }
      ](name='Subtitles', description='The subtitles. By default, this parameter is left empty.'),
      URI?: string(name='URI', description='The URI of the Object Storage Service (OSS) bucket. Specify the value in the `oss://${Bucket}/${Object}` format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region with the current project. `${Object}` specifies the complete path to the file whose name contains an extension.', example='oss://test-bucket/test-object'),
    }
  ](name='Sources', description='The source media files. If multiple files exist at the same time, the Concat feature is enabled. The video files are concatenated in the order of their URI inputs.

This parameter is required.'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test":"val1"}'),
  targets?: [ 
    {
      audio?: TargetAudio(name='Audio', description='The audio processing settings.

>  If you leave Audio empty and the first audio stream exists, the first audio stream is directly copied to the output file.'),
      container?: string(name='Container', description='The type of the media container.

*   Valid values for audio and video containers: mp4, mkv, mov, asf, avi, mxf, ts, and flv.

*   Valid values only for audio containers: mp3, aac, flac, oga, ac3, and opus.

    **

    **Note** Specify Container and URI at the same time. If you want to extract subtitles, capture frames, capture image sprites, or rotate media images, set Container and URI to null. In this case, Segment, Video, Audio, and Speed do not take effect.', example='mp4'),
      image?: TargetImage(name='Image', description='The frame capturing, sprite capturing, and media rotation settings.'),
      segment?: {
        duration?: double(name='Duration', description='The duration of the segment. Unit: seconds.', example='30'),
        format?: string(name='Format', description='The media segmentation mode. Valid values:

*   hls
*   dash', example='hls'),
        startNumber?: int32(name='StartNumber', description='The start sequence number. You can specify this parameter only if you set Format to hls. Default value: 0.', example='5'),
      }(name='Segment', description='The media segmentation settings. By default, no segmentation is performed.'),
      speed?: float(name='Speed', description='The playback speed of the media. Valid values: 0.5 to 2. Default value: 1.0.

>  This parameter specifies the ratio of the non-regular playback speed of the transcoded media file to the default playback speed of the source media file.', example='1.0'),
      stripMetadata?: boolean(name='StripMetadata', description='Specifies whether to remove the metadata, such as `title` and `album`, from the media file. Default value: false.'),
      subtitle?: TargetSubtitle(name='Subtitle', description='The subtitle processing settings.

>  If you leave Subtitle empty and the first subtitle stream exists, the first subtitle stream is directly copied to the output file.'),
      URI?: string(name='URI', description='The URI of the OSS bucket in which you want to store the media transcoding output file.

Specify the value in the `oss://${Bucket}/${Object}` format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region with the current project. `${Object}` specifies the complete path to the file whose name contains an extension.

*   If the value of **URI** contains an extension, the endpoint of the OSS bucket matches the URI. If multiple media transcoding output files exist, the endpoints of the correspodning OSS buckets may be overwritten.****

*   If the value of **URI** does not contain an extension, the endpoint of the OSS bucket consists of the following parameters: **URI**, **Container**, and **Segment**. In the following examples, the value of **URI** is `oss://examplebucket/outputVideo`.

    *   If the value of **Container** is `mp4` and the value of **Segment** is null, the endpoint of the OSS bucket is `oss://examplebucket/outputVideo.mp4`.
    *   If the value of **Container** is `ts` and the value of **Format** in **Segment** is `hls`, the endpoint of the OSS bucket is `oss://examplebucket/outputVideo.m3u8`. In addition, multiple ts files prefixed with `oss://examplebucket/outputVideo` are generated.', example='oss://test-bucket/targets'),
      video?: TargetVideo(name='Video', description='The video processing settings.

>  If you leave Video empty and the first video stream exists, the first video stream is directly copied to the output file.'),
    }
  ](name='Targets', description='The media processing tasks. You can specify multiple values for this parameter.

This parameter is required.'),
  userData?: string(name='UserData', description='The custom information, which is returned as asynchronous notifications to facilitate notification management in your system. The maximum information length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateMediaConvertTaskShrinkRequest {
  alignmentIndex?: int32(name='AlignmentIndex', description='The sequence number of the main media file in the concatenation list of media files. The main media file provides the default transcoding settings, such as the resolution and the frame rate, for videos and audios. Default value: `0`. A value of `0` specifies that the main media file is aligned with the first media file in the concatenation list.'),
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notificationShrink?: string(name='Notification', description='The notification settings. For more information, see "Notification". For information about the asynchronous notification format, see [Asynchronous notification format](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  sourcesShrink?: string(name='Sources', description='The source media files. If multiple files exist at the same time, the Concat feature is enabled. The video files are concatenated in the order of their URI inputs.

This parameter is required.'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test":"val1"}'),
  targetsShrink?: string(name='Targets', description='The media processing tasks. You can specify multiple values for this parameter.

This parameter is required.'),
  userData?: string(name='UserData', description='The custom information, which is returned as asynchronous notifications to facilitate notification management in your system. The maximum information length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateMediaConvertTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='0ED-1Bz8z71k5TtsUejT4UJ16Es****'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FFFE0B6'),
  taskId?: string(name='TaskId', description='The task ID.', example='MediaConvert-adb1ee28-c4c9-42a7-9f54-3b8eadcb****'),
}

model CreateMediaConvertTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateMediaConvertTaskResponseBody(name='body'),
}

/**
 * @summary Creates an asynchronous media transcoding task to provide audio and video file processing abilities, such as media transcoding, media splicing, video frame capturing, and video to GIF conversion.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   By default, only one type of video, audio, and subtitle streams is processed when you call this operation to process media transcoding. However, you can specify the number of video, audio, or subtitle streams that you want to process.
 * *   When you use this operation to execute a media merging task, up to 11 media files are supported. In this case, the parameters that involve media transcoding and frame capturing apply to the merged media data.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *     **
 *     ****
 *
 * @param tmpReq CreateMediaConvertTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateMediaConvertTaskResponse
 */
async function createMediaConvertTaskWithOptions(tmpReq: CreateMediaConvertTaskRequest, runtime: $RuntimeOptions): CreateMediaConvertTaskResponse {
  tmpReq.validate();
  var request = new CreateMediaConvertTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.sources)) {
    request.sourcesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.sources, 'Sources', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  if (!$isNull(tmpReq.targets)) {
    request.targetsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.targets, 'Targets', 'json');
  }
  var query = {};
  if (!$isNull(request.alignmentIndex)) {
    query['AlignmentIndex'] = request.alignmentIndex;
  }
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourcesShrink)) {
    query['Sources'] = request.sourcesShrink;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.targetsShrink)) {
    query['Targets'] = request.targetsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateMediaConvertTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates an asynchronous media transcoding task to provide audio and video file processing abilities, such as media transcoding, media splicing, video frame capturing, and video to GIF conversion.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   By default, only one type of video, audio, and subtitle streams is processed when you call this operation to process media transcoding. However, you can specify the number of video, audio, or subtitle streams that you want to process.
 * *   When you use this operation to execute a media merging task, up to 11 media files are supported. In this case, the parameters that involve media transcoding and frame capturing apply to the merged media data.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *     **
 *     ****
 *
 * @param request CreateMediaConvertTaskRequest
 * @return CreateMediaConvertTaskResponse
 */
async function createMediaConvertTask(request: CreateMediaConvertTaskRequest): CreateMediaConvertTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createMediaConvertTaskWithOptions(request, runtime);
}

model CreateOfficeConversionTaskRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  endPage?: long(name='EndPage', description='The ending page for document conversion. The default value is -1, which converts the file until the last page of the file.

> 

*   If the source is a spreadsheet file, specify the index number of the corresponding sheet instead.

*   If you convert a large number of pages within the document, we recommend that you split the pages into several document conversion tasks to prevent conversion timeouts.

*   This parameter takes effect only when you convert the file into an image. It does not take effect when you convert the file into a PDF or TXT file.', example='-1'),
  firstPage?: boolean(name='FirstPage', description='Specifies whether to return only the first resulting image when you convert a spreadsheet document to images. The number of rows and the number of columns in the first image are determined by the automatic splitting process. Valid values:

*   false (default): does not return only the first resulting image. All the resulting images are returned.
*   true: returns only the first resulting image. A thumbnail is generated.

>  This parameter takes effect only when the **LongPicture** parameter is set to `true`.', example='false'),
  fitToHeight?: boolean(name='FitToHeight', description='Specifies whether to convert all rows of a spreadsheet document to one single image or a single-page PDF document when you convert the table document to an image or a PDF document. Valid values:

*   false (default): converts all rows of the document to multiple images or a multi-page PDF document. This is the default value.
*   true: converts all rows of the document to one single image or a single-page PDF document.', example='false'),
  fitToWidth?: boolean(name='FitToWidth', description='Specifies whether to convert all columns of a spreadsheet document to one single image or a single-page PDF document when you convert the spreadsheet file to an image or a PDF document. Valid values:

*   false (default): converts all columns of the document to multiple images or a multi-page PDF document.
*   true: converts all columns of the document to one single image or a single-page PDF document.', example='false'),
  holdLineFeed?: boolean(name='HoldLineFeed', description='Specifies whether to retain line feeds in the output file when a document is converted to a text file. Valid values:

*   false (default): does not retain the line feeds.
*   true: retains the line feeds.', example='false'),
  imageDPI?: long(name='ImageDPI', description='The dots per inch (DPI) of output images. Valid values: 96 to 600. Default value: 96.', example='96'),
  longPicture?: boolean(name='LongPicture', description='Specifies whether to convert the document to a long image. Valid values:

*   false (default): does not convert the document to a long image.
*   true: converts the document to a long image.

>  You can convert up to 20 pages of a document into a long image. If you convert more than 20 pages to a long image, an error may occur.', example='false'),
  longText?: boolean(name='LongText', description='Specifies whether to convert the document to a long text file. Valid values:

*   false (default): does not convert the document to a long text file. Each page of the document is converted to a text file.
*   true: converts the entire document to a long text file.', example='false'),
  maxSheetColumn?: long(name='MaxSheetColumn', description='The maximum number of spreadsheet columns to be converted to an image. By default, all columns within the spreadsheet file are converted.

>  This parameter takes effect only when the **LongPicture** parameter is set to `true`.', example='10'),
  maxSheetRow?: long(name='MaxSheetRow', description='The maximum number of spreadsheet rows to be converted to an image. By default, all rows within the spreadsheet file are converted.

>  This parameter takes effect only when the **LongPicture** parameter is set to `true`.', example='10'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  pages?: string(name='Pages', description='The numbers of pages to be converted. This parameter takes precedence over the StartPage and EndPage parameters. The value of this parameter can be in different formats:

*   If you specify pages separately by page number, separate page numbers with commas (,). Example: 1,2
*   If you specify consecutive pages by using a page range, connect the starting and ending page numbers with a hyphen (-). Example: 1,2-4,7', example='1,2-4,7'),
  paperHorizontal?: boolean(name='PaperHorizontal', description='Specifies whether to place sheets of paper horizontally for converting a spreadsheet document to images. Conversion to images is similar to printing the content on a sheet of paper. Valid values:

*   false (default): does not place sheets of paper horizontally. Paper sheets are placed vertically.
*   true: places sheets of paper horizontally.', example='false'),
  paperSize?: string(name='PaperSize', description='The paper size for converting a spreadsheet document to images. Conversion to images is similar to printing the content on a sheet of paper. Valid values:

*   A0
*   A2
*   A4 (default)

>  This parameter takes effect only when the **FitToHeight** and **FitToWidth** parameters are specified.', example='A4'),
  password?: string(name='Password', description='The password that protects the source document. To convert a password-protected document, specify this parameter.', example='********'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  quality?: long(name='Quality', description='The quality of the output file. Valid values: 0 to 100. A smaller value indicates lower quality and better conversion performance. By default, the system specifies an appropriate value that provides an optimal balance between the quality and conversion performance based on the document content.', example='60'),
  scalePercentage?: long(name='ScalePercentage', description='The percentage scale relative to the source document. Valid values: 20 to 200. The default value is 100, which indicates that the document is not scaled.

>  A value that is less than 100 indicates a size reduction. A value that is greater than 100 indicates an enlargement.', example='100'),
  sheetCount?: long(name='SheetCount', description='The number of sheets to be converted to an image. By default, all sheets within the spreadsheet file are converted.', example='1'),
  sheetIndex?: long(name='SheetIndex', description='The index number of the sheet to be converted to an image. The value ranges from 1 to the index number of the last sheet. By default, the conversion starts from the first sheet.', example='1'),
  showComments?: boolean(name='ShowComments', description='Specifies whether to display comments in resulting images when a text document is converted to images. Valid values:

*   false (default): does not display comments in resulting images.
*   true: displays comments in resulting images.', example='false'),
  sourceType?: string(name='SourceType', description='The name extension of the source file. By default, the type of the source file is determined based on the name extension of the source object in OSS. If the object in OSS does not have a name extension, you can specify this parameter. Valid values:

*   Text documents: doc, docx, wps, wpss, docm, dotm, dot, dotx, and html
*   Presentation documents: pptx, ppt, pot, potx, pps, ppsx, dps, dpt, pptm, potm, ppsm, and dpss
*   Spreadsheet documents: xls, xlt, et, ett, xlsx, xltx, csv, xlsb, xlsm, xltm, and ets
*   PDF documents: pdf', example='doc'),
  sourceURI?: string(name='SourceURI', description='The URI of the source file.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
  sources?: [ 
    {
      rotate?: long(name='Rotate'),
      URI?: string(name='URI'),
    }
  ](name='Sources'),
  startPage?: long(name='StartPage', description='The starting page for document conversion. Default value: 1.

> 

*   If the document is a spreadsheet file, specify the index number of the corresponding sheet instead.

*   This parameter takes effect only when you convert the file to an image format. It does not take effect when you convert the file into a PDF or TXT file.', example='1'),
  tags?: map[string]any(name='Tags', description='The custom tags in dictionary format. You can use the custom tags to search for the task.', example='{"test":"val1"}'),
  targetType?: string(name='TargetType', description='The format of the output file. Valid values:

*   png: a PNG image.
*   jpg: a JPG image.
*   pdf: a PDF file.
*   txt: a TXT file. You can specify this value to extract the text content of the source document. Only presentation, text, or spreadsheet documents can be converted to a TXT file. If the source document is a spreadsheet, only one TXT is created and sheet-related parameters do not take effect.

This parameter is required.', example='png'),
  targetURI?: string(name='TargetURI', description='The address template of the output file.

Specify the value in the `oss://{bucket}/{tags.custom}/{dirname}/{barename}.{autoext}` format. For more information, see [TargetURI template](https://help.aliyun.com/document_detail/465762.html).

>  Specify at least one of the TargetURI and TargetURIPrefix parameters.', example='oss://{bucket}/{tags.custom}/{dirname}/{barename}.{autoext}'),
  targetURIPrefix?: string(name='TargetURIPrefix', description='The prefix of the storage address of the output file.

Specify the prefix in the `oss://${Bucket}/${Prefix}/` format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Prefix}` is the prefix of the output file.

>  Specify at least one of the TargetURI and TargetURIPrefix parameters.', example='oss://bucket1/'),
  trimPolicy?: TrimPolicy(name='TrimPolicy', description='The trim policy for converting a spreadsheet file. Empty rows and columns may generate blank spaces in the output file if no appropriate trim policy is specified.'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum information length is 2,048 bytes.', example='{"file_id": "abc"}'),
}

model CreateOfficeConversionTaskShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  endPage?: long(name='EndPage', description='The ending page for document conversion. The default value is -1, which converts the file until the last page of the file.

> 

*   If the source is a spreadsheet file, specify the index number of the corresponding sheet instead.

*   If you convert a large number of pages within the document, we recommend that you split the pages into several document conversion tasks to prevent conversion timeouts.

*   This parameter takes effect only when you convert the file into an image. It does not take effect when you convert the file into a PDF or TXT file.', example='-1'),
  firstPage?: boolean(name='FirstPage', description='Specifies whether to return only the first resulting image when you convert a spreadsheet document to images. The number of rows and the number of columns in the first image are determined by the automatic splitting process. Valid values:

*   false (default): does not return only the first resulting image. All the resulting images are returned.
*   true: returns only the first resulting image. A thumbnail is generated.

>  This parameter takes effect only when the **LongPicture** parameter is set to `true`.', example='false'),
  fitToHeight?: boolean(name='FitToHeight', description='Specifies whether to convert all rows of a spreadsheet document to one single image or a single-page PDF document when you convert the table document to an image or a PDF document. Valid values:

*   false (default): converts all rows of the document to multiple images or a multi-page PDF document. This is the default value.
*   true: converts all rows of the document to one single image or a single-page PDF document.', example='false'),
  fitToWidth?: boolean(name='FitToWidth', description='Specifies whether to convert all columns of a spreadsheet document to one single image or a single-page PDF document when you convert the spreadsheet file to an image or a PDF document. Valid values:

*   false (default): converts all columns of the document to multiple images or a multi-page PDF document.
*   true: converts all columns of the document to one single image or a single-page PDF document.', example='false'),
  holdLineFeed?: boolean(name='HoldLineFeed', description='Specifies whether to retain line feeds in the output file when a document is converted to a text file. Valid values:

*   false (default): does not retain the line feeds.
*   true: retains the line feeds.', example='false'),
  imageDPI?: long(name='ImageDPI', description='The dots per inch (DPI) of output images. Valid values: 96 to 600. Default value: 96.', example='96'),
  longPicture?: boolean(name='LongPicture', description='Specifies whether to convert the document to a long image. Valid values:

*   false (default): does not convert the document to a long image.
*   true: converts the document to a long image.

>  You can convert up to 20 pages of a document into a long image. If you convert more than 20 pages to a long image, an error may occur.', example='false'),
  longText?: boolean(name='LongText', description='Specifies whether to convert the document to a long text file. Valid values:

*   false (default): does not convert the document to a long text file. Each page of the document is converted to a text file.
*   true: converts the entire document to a long text file.', example='false'),
  maxSheetColumn?: long(name='MaxSheetColumn', description='The maximum number of spreadsheet columns to be converted to an image. By default, all columns within the spreadsheet file are converted.

>  This parameter takes effect only when the **LongPicture** parameter is set to `true`.', example='10'),
  maxSheetRow?: long(name='MaxSheetRow', description='The maximum number of spreadsheet rows to be converted to an image. By default, all rows within the spreadsheet file are converted.

>  This parameter takes effect only when the **LongPicture** parameter is set to `true`.', example='10'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  pages?: string(name='Pages', description='The numbers of pages to be converted. This parameter takes precedence over the StartPage and EndPage parameters. The value of this parameter can be in different formats:

*   If you specify pages separately by page number, separate page numbers with commas (,). Example: 1,2
*   If you specify consecutive pages by using a page range, connect the starting and ending page numbers with a hyphen (-). Example: 1,2-4,7', example='1,2-4,7'),
  paperHorizontal?: boolean(name='PaperHorizontal', description='Specifies whether to place sheets of paper horizontally for converting a spreadsheet document to images. Conversion to images is similar to printing the content on a sheet of paper. Valid values:

*   false (default): does not place sheets of paper horizontally. Paper sheets are placed vertically.
*   true: places sheets of paper horizontally.', example='false'),
  paperSize?: string(name='PaperSize', description='The paper size for converting a spreadsheet document to images. Conversion to images is similar to printing the content on a sheet of paper. Valid values:

*   A0
*   A2
*   A4 (default)

>  This parameter takes effect only when the **FitToHeight** and **FitToWidth** parameters are specified.', example='A4'),
  password?: string(name='Password', description='The password that protects the source document. To convert a password-protected document, specify this parameter.', example='********'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  quality?: long(name='Quality', description='The quality of the output file. Valid values: 0 to 100. A smaller value indicates lower quality and better conversion performance. By default, the system specifies an appropriate value that provides an optimal balance between the quality and conversion performance based on the document content.', example='60'),
  scalePercentage?: long(name='ScalePercentage', description='The percentage scale relative to the source document. Valid values: 20 to 200. The default value is 100, which indicates that the document is not scaled.

>  A value that is less than 100 indicates a size reduction. A value that is greater than 100 indicates an enlargement.', example='100'),
  sheetCount?: long(name='SheetCount', description='The number of sheets to be converted to an image. By default, all sheets within the spreadsheet file are converted.', example='1'),
  sheetIndex?: long(name='SheetIndex', description='The index number of the sheet to be converted to an image. The value ranges from 1 to the index number of the last sheet. By default, the conversion starts from the first sheet.', example='1'),
  showComments?: boolean(name='ShowComments', description='Specifies whether to display comments in resulting images when a text document is converted to images. Valid values:

*   false (default): does not display comments in resulting images.
*   true: displays comments in resulting images.', example='false'),
  sourceType?: string(name='SourceType', description='The name extension of the source file. By default, the type of the source file is determined based on the name extension of the source object in OSS. If the object in OSS does not have a name extension, you can specify this parameter. Valid values:

*   Text documents: doc, docx, wps, wpss, docm, dotm, dot, dotx, and html
*   Presentation documents: pptx, ppt, pot, potx, pps, ppsx, dps, dpt, pptm, potm, ppsm, and dpss
*   Spreadsheet documents: xls, xlt, et, ett, xlsx, xltx, csv, xlsb, xlsm, xltm, and ets
*   PDF documents: pdf', example='doc'),
  sourceURI?: string(name='SourceURI', description='The URI of the source file.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
  sourcesShrink?: string(name='Sources'),
  startPage?: long(name='StartPage', description='The starting page for document conversion. Default value: 1.

> 

*   If the document is a spreadsheet file, specify the index number of the corresponding sheet instead.

*   This parameter takes effect only when you convert the file to an image format. It does not take effect when you convert the file into a PDF or TXT file.', example='1'),
  tagsShrink?: string(name='Tags', description='The custom tags in dictionary format. You can use the custom tags to search for the task.', example='{"test":"val1"}'),
  targetType?: string(name='TargetType', description='The format of the output file. Valid values:

*   png: a PNG image.
*   jpg: a JPG image.
*   pdf: a PDF file.
*   txt: a TXT file. You can specify this value to extract the text content of the source document. Only presentation, text, or spreadsheet documents can be converted to a TXT file. If the source document is a spreadsheet, only one TXT is created and sheet-related parameters do not take effect.

This parameter is required.', example='png'),
  targetURI?: string(name='TargetURI', description='The address template of the output file.

Specify the value in the `oss://{bucket}/{tags.custom}/{dirname}/{barename}.{autoext}` format. For more information, see [TargetURI template](https://help.aliyun.com/document_detail/465762.html).

>  Specify at least one of the TargetURI and TargetURIPrefix parameters.', example='oss://{bucket}/{tags.custom}/{dirname}/{barename}.{autoext}'),
  targetURIPrefix?: string(name='TargetURIPrefix', description='The prefix of the storage address of the output file.

Specify the prefix in the `oss://${Bucket}/${Prefix}/` format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Prefix}` is the prefix of the output file.

>  Specify at least one of the TargetURI and TargetURIPrefix parameters.', example='oss://bucket1/'),
  trimPolicyShrink?: string(name='TrimPolicy', description='The trim policy for converting a spreadsheet file. Empty rows and columns may generate blank spaces in the output file if no appropriate trim policy is specified.'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum information length is 2,048 bytes.', example='{"file_id": "abc"}'),
}

model CreateOfficeConversionTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='2C2-1I0EG57VR37J4rQ8oKG6C9*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='FF3B7D81-66AE-47E0-BF69-157DCF18*****'),
  taskId?: string(name='TaskId', description='The task ID.', example='formatconvert-00bec802-073a-4b61-ba3b-39bc2fdd*****'),
}

model CreateOfficeConversionTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateOfficeConversionTaskResponseBody(name='body'),
}

/**
 * @summary Creates a document format conversion task to convert the format of a document stored in an Object Storage Service (OSS) bucket.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation supports the following input formats:
 *     *   Text documents: doc, docx, wps, wpss, docm, dotm, dot, dotx, and html
 *     *   Presentation documents: pptx, ppt, pot, potx, pps, ppsx, dps, dpt, pptm, potm, ppsm, and dpss
 *     *   Spreadsheet documents: xls, xlt, et, ett, xlsx, xltx, csv, xlsb, xlsm, xltm, and ets
 *     *   PDF documents: pdf
 * *   The operation supports the following output formats:
 *     *   Image files: png and jpg
 *     *   Text files: txt
 *     *   PDF files: pdf
 * *   Each input document can be up to 200 MB in size.
 * *   The maximum conversion time is 120 seconds. If the document contains too much or complex content, the conversion may time out.
 * *   The operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can use one of the following methods to query task information:
 *     *   Call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.``
 *     *   In the region in which the IMM project is located, configure a Simple Message Queue (SMQ) subscription to receive task information notifications. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html). For information about SMQ SDKs, see [Use queues](https://help.aliyun.com/document_detail/32449.html).
 *     *   In the region in which the IMM project is located, create an ApsaraMQ for RocketMQ 4.0 instance, a topic, and a group to receive task notifications. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html). For more information about how to use ApsaraMQ for RocketMQ, see [Call HTTP SDKs to send and subscribe to messages](https://help.aliyun.com/document_detail/169009.html).
 *     *   In the region in which the IMM project is located, use [EventBridge](https://www.alibabacloud.com/en/product/eventbridge) to receive task information notifications. For more information, see [IMM events](https://help.aliyun.com/document_detail/205730.html).
 *
 * @param tmpReq CreateOfficeConversionTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateOfficeConversionTaskResponse
 */
async function createOfficeConversionTaskWithOptions(tmpReq: CreateOfficeConversionTaskRequest, runtime: $RuntimeOptions): CreateOfficeConversionTaskResponse {
  tmpReq.validate();
  var request = new CreateOfficeConversionTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.sources)) {
    request.sourcesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.sources, 'Sources', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  if (!$isNull(tmpReq.trimPolicy)) {
    request.trimPolicyShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.trimPolicy, 'TrimPolicy', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.endPage)) {
    query['EndPage'] = request.endPage;
  }
  if (!$isNull(request.firstPage)) {
    query['FirstPage'] = request.firstPage;
  }
  if (!$isNull(request.fitToHeight)) {
    query['FitToHeight'] = request.fitToHeight;
  }
  if (!$isNull(request.fitToWidth)) {
    query['FitToWidth'] = request.fitToWidth;
  }
  if (!$isNull(request.holdLineFeed)) {
    query['HoldLineFeed'] = request.holdLineFeed;
  }
  if (!$isNull(request.imageDPI)) {
    query['ImageDPI'] = request.imageDPI;
  }
  if (!$isNull(request.longPicture)) {
    query['LongPicture'] = request.longPicture;
  }
  if (!$isNull(request.longText)) {
    query['LongText'] = request.longText;
  }
  if (!$isNull(request.maxSheetColumn)) {
    query['MaxSheetColumn'] = request.maxSheetColumn;
  }
  if (!$isNull(request.maxSheetRow)) {
    query['MaxSheetRow'] = request.maxSheetRow;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.pages)) {
    query['Pages'] = request.pages;
  }
  if (!$isNull(request.paperHorizontal)) {
    query['PaperHorizontal'] = request.paperHorizontal;
  }
  if (!$isNull(request.paperSize)) {
    query['PaperSize'] = request.paperSize;
  }
  if (!$isNull(request.password)) {
    query['Password'] = request.password;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.quality)) {
    query['Quality'] = request.quality;
  }
  if (!$isNull(request.scalePercentage)) {
    query['ScalePercentage'] = request.scalePercentage;
  }
  if (!$isNull(request.sheetCount)) {
    query['SheetCount'] = request.sheetCount;
  }
  if (!$isNull(request.sheetIndex)) {
    query['SheetIndex'] = request.sheetIndex;
  }
  if (!$isNull(request.showComments)) {
    query['ShowComments'] = request.showComments;
  }
  if (!$isNull(request.sourceType)) {
    query['SourceType'] = request.sourceType;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.startPage)) {
    query['StartPage'] = request.startPage;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.targetType)) {
    query['TargetType'] = request.targetType;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  if (!$isNull(request.targetURIPrefix)) {
    query['TargetURIPrefix'] = request.targetURIPrefix;
  }
  if (!$isNull(request.trimPolicyShrink)) {
    query['TrimPolicy'] = request.trimPolicyShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var body : map[string]any = {};
  if (!$isNull(request.sourcesShrink)) {
    body['Sources'] = request.sourcesShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateOfficeConversionTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a document format conversion task to convert the format of a document stored in an Object Storage Service (OSS) bucket.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The operation supports the following input formats:
 *     *   Text documents: doc, docx, wps, wpss, docm, dotm, dot, dotx, and html
 *     *   Presentation documents: pptx, ppt, pot, potx, pps, ppsx, dps, dpt, pptm, potm, ppsm, and dpss
 *     *   Spreadsheet documents: xls, xlt, et, ett, xlsx, xltx, csv, xlsb, xlsm, xltm, and ets
 *     *   PDF documents: pdf
 * *   The operation supports the following output formats:
 *     *   Image files: png and jpg
 *     *   Text files: txt
 *     *   PDF files: pdf
 * *   Each input document can be up to 200 MB in size.
 * *   The maximum conversion time is 120 seconds. If the document contains too much or complex content, the conversion may time out.
 * *   The operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can use one of the following methods to query task information:
 *     *   Call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.``
 *     *   In the region in which the IMM project is located, configure a Simple Message Queue (SMQ) subscription to receive task information notifications. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html). For information about SMQ SDKs, see [Use queues](https://help.aliyun.com/document_detail/32449.html).
 *     *   In the region in which the IMM project is located, create an ApsaraMQ for RocketMQ 4.0 instance, a topic, and a group to receive task notifications. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html). For more information about how to use ApsaraMQ for RocketMQ, see [Call HTTP SDKs to send and subscribe to messages](https://help.aliyun.com/document_detail/169009.html).
 *     *   In the region in which the IMM project is located, use [EventBridge](https://www.alibabacloud.com/en/product/eventbridge) to receive task information notifications. For more information, see [IMM events](https://help.aliyun.com/document_detail/205730.html).
 *
 * @param request CreateOfficeConversionTaskRequest
 * @return CreateOfficeConversionTaskResponse
 */
async function createOfficeConversionTask(request: CreateOfficeConversionTaskRequest): CreateOfficeConversionTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createOfficeConversionTaskWithOptions(request, runtime);
}

model CreateProjectRequest {
  datasetMaxBindCount?: long(name='DatasetMaxBindCount', description='The maximum number of bindings for each dataset. Valid values: 1 to 10. Default value: 10.', example='10'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount', description='The maximum number of metadata entities in each dataset. Default value: 10000000000.

>  This is a precautionary setting that does not impose practical limitations.', example='10000000000'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount', description='The maximum number of files in each dataset. Valid values: 1 to 100000000. Default value: 10000000000.', example='100000000'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount', description='The maximum number of metadata relationships in each dataset. Default value: 100000000000.

>  This is a precautionary setting that does not impose practical limitations.', example='100000000000'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize', description='The maximum size of files in each dataset. If the maximum size is exceeded, no indexes can be added. Unit: bytes. Default value: 90000000000000000.', example='90000000000000000'),
  description?: string(name='Description', description='The description of the project. The description must be 1 to 256 characters in length. You can leave this parameter empty.', example='immtest'),
  projectMaxDatasetCount?: long(name='ProjectMaxDatasetCount', description='The maximum number of datasets in the project. Valid values: 1 to 1000000000. Default value: 1000000000.', example='1000000000'),
  projectName?: string(name='ProjectName', description='The name of the project. The name must meet the following requirements:

*   The name must be 1 to 128 characters in length
*   and can contain only letters, digits, hyphens (-), and underscores (_).
*   The name must start with a letter or an underscores (_).

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The name of the Resource Access Management (RAM) role. You must attach the RAM role to IMM to allow IMM to access other cloud resources, such as Object Storage Service (OSS). Default value: `AliyunIMMDefaultRole`.

You can also create a custom role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Grant permissions to a RAM user](https://help.aliyun.com/document_detail/477257.html).', example='AliyunIMMDefaultRole'),
  tag?: [ 
    {
      key?: string(name='Key', description='The tag key.', example='TestKey'),
      value?: string(name='Value', description='The tag value.', example='TestValue'),
    }
  ](name='Tag', description='The tags.'),
  templateId?: string(name='TemplateId', description='The ID of the workflow template. You can leave this parameter empty. For more information, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).', example='Official:AllFunction'),
}

model CreateProjectShrinkRequest {
  datasetMaxBindCount?: long(name='DatasetMaxBindCount', description='The maximum number of bindings for each dataset. Valid values: 1 to 10. Default value: 10.', example='10'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount', description='The maximum number of metadata entities in each dataset. Default value: 10000000000.

>  This is a precautionary setting that does not impose practical limitations.', example='10000000000'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount', description='The maximum number of files in each dataset. Valid values: 1 to 100000000. Default value: 10000000000.', example='100000000'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount', description='The maximum number of metadata relationships in each dataset. Default value: 100000000000.

>  This is a precautionary setting that does not impose practical limitations.', example='100000000000'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize', description='The maximum size of files in each dataset. If the maximum size is exceeded, no indexes can be added. Unit: bytes. Default value: 90000000000000000.', example='90000000000000000'),
  description?: string(name='Description', description='The description of the project. The description must be 1 to 256 characters in length. You can leave this parameter empty.', example='immtest'),
  projectMaxDatasetCount?: long(name='ProjectMaxDatasetCount', description='The maximum number of datasets in the project. Valid values: 1 to 1000000000. Default value: 1000000000.', example='1000000000'),
  projectName?: string(name='ProjectName', description='The name of the project. The name must meet the following requirements:

*   The name must be 1 to 128 characters in length
*   and can contain only letters, digits, hyphens (-), and underscores (_).
*   The name must start with a letter or an underscores (_).

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The name of the Resource Access Management (RAM) role. You must attach the RAM role to IMM to allow IMM to access other cloud resources, such as Object Storage Service (OSS). Default value: `AliyunIMMDefaultRole`.

You can also create a custom role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Grant permissions to a RAM user](https://help.aliyun.com/document_detail/477257.html).', example='AliyunIMMDefaultRole'),
  tagShrink?: string(name='Tag', description='The tags.'),
  templateId?: string(name='TemplateId', description='The ID of the workflow template. You can leave this parameter empty. For more information, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).', example='Official:AllFunction'),
}

model CreateProjectResponseBody = {
  project?: Project(name='Project', description='The project.'),
  requestId?: string(name='RequestId', description='The request ID.', example='7F7D235C-76FF-4B65-800C-8238AE3F****'),
}

model CreateProjectResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateProjectResponseBody(name='body'),
}

/**
 * @summary Creates a project.
 *
 * @description *   The name of a project must be unique in a region.
 * *   By default, you can create up to 100 projects in a region. If you want to request a quota increase to create more projects, submit a ticket or join the DingTalk chat group (ID: 88490020073).
 * *   After you create a project, you can create other Intelligent Media Management (IMM) resources in the project. For more information, see the following links:
 *     *   [CreateDataset](https://help.aliyun.com/document_detail/478160.html)
 *     *   [CreateTrigger](https://help.aliyun.com/document_detail/479912.html)
 *     *   [CreateBatch](https://help.aliyun.com/document_detail/606694.html)
 *     *   [CreateBinding](https://help.aliyun.com/document_detail/478202.html)
 *
 * @param tmpReq CreateProjectRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateProjectResponse
 */
async function createProjectWithOptions(tmpReq: CreateProjectRequest, runtime: $RuntimeOptions): CreateProjectResponse {
  tmpReq.validate();
  var request = new CreateProjectShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.tag)) {
    request.tagShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tag, 'Tag', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetMaxBindCount)) {
    query['DatasetMaxBindCount'] = request.datasetMaxBindCount;
  }
  if (!$isNull(request.datasetMaxEntityCount)) {
    query['DatasetMaxEntityCount'] = request.datasetMaxEntityCount;
  }
  if (!$isNull(request.datasetMaxFileCount)) {
    query['DatasetMaxFileCount'] = request.datasetMaxFileCount;
  }
  if (!$isNull(request.datasetMaxRelationCount)) {
    query['DatasetMaxRelationCount'] = request.datasetMaxRelationCount;
  }
  if (!$isNull(request.datasetMaxTotalFileSize)) {
    query['DatasetMaxTotalFileSize'] = request.datasetMaxTotalFileSize;
  }
  if (!$isNull(request.description)) {
    query['Description'] = request.description;
  }
  if (!$isNull(request.projectMaxDatasetCount)) {
    query['ProjectMaxDatasetCount'] = request.projectMaxDatasetCount;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.serviceRole)) {
    query['ServiceRole'] = request.serviceRole;
  }
  if (!$isNull(request.tagShrink)) {
    query['Tag'] = request.tagShrink;
  }
  if (!$isNull(request.templateId)) {
    query['TemplateId'] = request.templateId;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateProject',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a project.
 *
 * @description *   The name of a project must be unique in a region.
 * *   By default, you can create up to 100 projects in a region. If you want to request a quota increase to create more projects, submit a ticket or join the DingTalk chat group (ID: 88490020073).
 * *   After you create a project, you can create other Intelligent Media Management (IMM) resources in the project. For more information, see the following links:
 *     *   [CreateDataset](https://help.aliyun.com/document_detail/478160.html)
 *     *   [CreateTrigger](https://help.aliyun.com/document_detail/479912.html)
 *     *   [CreateBatch](https://help.aliyun.com/document_detail/606694.html)
 *     *   [CreateBinding](https://help.aliyun.com/document_detail/478202.html)
 *
 * @param request CreateProjectRequest
 * @return CreateProjectResponse
 */
async function createProject(request: CreateProjectRequest): CreateProjectResponse {
  var runtime = new $RuntimeOptions{};
  return createProjectWithOptions(request, runtime);
}

model CreateSimilarImageClusteringTaskRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='test-data'),
}

model CreateSimilarImageClusteringTaskShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{
      "User": "Jane"
}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='test-data'),
}

model CreateSimilarImageClusteringTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='3BF-1UhtFyrua71eOkFlqYq23Co****'),
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
  taskId?: string(name='TaskId', description='The task ID.', example='SimilarImageClustering-48d0a0f3-8459-47f4-b8af-ff49c64****'),
}

model CreateSimilarImageClusteringTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateSimilarImageClusteringTaskResponseBody(name='body'),
}

/**
 * @summary Clusters images indexed into a dataset by similarity. Image clustering is suitable for image deduplication and selection. For example, you can use image clustering to filter photos in your album that are taken in continuous shooting mode.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note that** Asynchronous processing does not guarantee timely task completion.
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Each call to the operation incrementally processes metadata in the dataset.****`` You can regularly call this operation to process incremental files.
 * *   After clustering is completed, you can call the [QuerySimilarImageClusters](https://help.aliyun.com/document_detail/611304.html) operation to query image clustering results.
 * *   An image cluster contains at lest two images. Removing similar images from the dataset affects existing image clusters. If image deletion reduces the number of images in a cluster to less than 2, the cluster is automatically deleted.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateSimilarImageClusteringTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateSimilarImageClusteringTaskResponse
 */
async function createSimilarImageClusteringTaskWithOptions(tmpReq: CreateSimilarImageClusteringTaskRequest, runtime: $RuntimeOptions): CreateSimilarImageClusteringTaskResponse {
  tmpReq.validate();
  var request = new CreateSimilarImageClusteringTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateSimilarImageClusteringTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Clusters images indexed into a dataset by similarity. Image clustering is suitable for image deduplication and selection. For example, you can use image clustering to filter photos in your album that are taken in continuous shooting mode.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note that** Asynchronous processing does not guarantee timely task completion.
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Each call to the operation incrementally processes metadata in the dataset.****`` You can regularly call this operation to process incremental files.
 * *   After clustering is completed, you can call the [QuerySimilarImageClusters](https://help.aliyun.com/document_detail/611304.html) operation to query image clustering results.
 * *   An image cluster contains at lest two images. Removing similar images from the dataset affects existing image clusters. If image deletion reduces the number of images in a cluster to less than 2, the cluster is automatically deleted.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateSimilarImageClusteringTaskRequest
 * @return CreateSimilarImageClusteringTaskResponse
 */
async function createSimilarImageClusteringTask(request: CreateSimilarImageClusteringTaskRequest): CreateSimilarImageClusteringTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createSimilarImageClusteringTaskWithOptions(request, runtime);
}

model CreateStoryRequest {
  address?: AddressForStory(name='Address', description='The address of the story. IMM filters candidate photos to generate a story based on the value of this parameter. This parameter takes effect only if you set StoryType to TravelMemory.

>  If you are located in Hong Kong (China), Macao (China), Taiwan (China), or overseas, you cannot specify an address in the Chinese mainland by using this parameter.'),
  customId?: string(name='CustomId', description='The custom ID. A custom ID of a generated story may differ from the value of ObjectID and can be utilized for subsequent retrieval and sorting of stories.', example='test'),
  customLabels?: map[string]any(name='CustomLabels', description='The custom labels. Labels specify the custom information of the story. This enables retrieval based on your business requirements.', example='{"Bucket": "examplebucket"}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. For information about how to obtain the name of a dataset, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='test-dataset'),
  maxFileCount?: long(name='MaxFileCount', description='The maximum number of photo files in the story. The actual number of photo files ranges from the value of MinFileCount to the value of MaxFileCount. The value of this parameter must be an integer greater than the value of MinFileCount. To provide the desired effect, the algorithm limits the maximum number of photo files to 1,500. If you set MaxFileCount to a value greater than 1,500, this parameter does not take effect.', example='3'),
  minFileCount?: long(name='MinFileCount', description='The minimum number of photo files in the story. The actual number of photo files ranges from the value of MinFileCount to the value of MaxFileCount. The value of this parameter must be an integer greater than 1. If the actual number of candidate photos is less than the value of this parameter, a null story is returned.', example='1'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  notifyTopicName?: string(name='NotifyTopicName', description='The topic name of the asynchronous reverse notification.', example='test-topic'),
  objectId?: string(name='ObjectId', description='The ID of the story. This parameter is optional. If you leave this parameter empty, IMM assigns a unique identifier to the story. You can query and update a story based on its ID. You can also manually create an ID for a story. After you create an ID for a story, you must specify this parameter to pass the ID into the system. This way, IMM can record the ID as the unique identifier of the story. If you pass an existing ID into the system, IMM updates the story that corresponds to the ID.', example='id1'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  storyEndTime?: string(name='StoryEndTime', description='The end time of the photo collection for which you want to create the story. StoryStartTime and StoryEndTime form a time interval based on which IMM filters candidate photos to generate a story. The value must be a string in the RFC3339 format.', example='2021-12-30T16:00:00Z'),
  storyName?: string(name='StoryName', description='The name of the story.', example='name1'),
  storyStartTime?: string(name='StoryStartTime', description='The start time of the photo collection for which you want to create the story. StoryStartTime and StoryEndTime form a time interval based on which IMM filters candidate photos to generate a story. The value must be a string in the RFC3339 format.', example='2016-12-30T16:00:00Z'),
  storySubType?: string(name='StorySubType', description='The subtype of the story. For information about valid subtypes, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).', example='Solo'),
  storyType?: string(name='StoryType', description='The type of the story. For information about valid types, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).

This parameter is required.', example='PeopleMemory'),
  tags?: map[string]any(name='Tags', description='The tags. You can specify this parameter in one of the following scenarios:

*   Specify tags as custom data, which is returned in messages provided by Simple Message Queue.
*   Search for tasks by tag.
*   Specify tags as variables in destination URIs.', example='{"key":"val"}'),
  userData?: string(name='UserData', description='The custom information, which is returned as asynchronous notifications to facilitate notification management in your system. The maximum information length is 2,048 bytes.', example='{"ID": "testuid","Name": "test-user","Avatar": "http://test.com/testuid"}'),
}

model CreateStoryShrinkRequest {
  addressShrink?: string(name='Address', description='The address of the story. IMM filters candidate photos to generate a story based on the value of this parameter. This parameter takes effect only if you set StoryType to TravelMemory.

>  If you are located in Hong Kong (China), Macao (China), Taiwan (China), or overseas, you cannot specify an address in the Chinese mainland by using this parameter.'),
  customId?: string(name='CustomId', description='The custom ID. A custom ID of a generated story may differ from the value of ObjectID and can be utilized for subsequent retrieval and sorting of stories.', example='test'),
  customLabelsShrink?: string(name='CustomLabels', description='The custom labels. Labels specify the custom information of the story. This enables retrieval based on your business requirements.', example='{"Bucket": "examplebucket"}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. For information about how to obtain the name of a dataset, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='test-dataset'),
  maxFileCount?: long(name='MaxFileCount', description='The maximum number of photo files in the story. The actual number of photo files ranges from the value of MinFileCount to the value of MaxFileCount. The value of this parameter must be an integer greater than the value of MinFileCount. To provide the desired effect, the algorithm limits the maximum number of photo files to 1,500. If you set MaxFileCount to a value greater than 1,500, this parameter does not take effect.', example='3'),
  minFileCount?: long(name='MinFileCount', description='The minimum number of photo files in the story. The actual number of photo files ranges from the value of MinFileCount to the value of MaxFileCount. The value of this parameter must be an integer greater than 1. If the actual number of candidate photos is less than the value of this parameter, a null story is returned.', example='1'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  notifyTopicName?: string(name='NotifyTopicName', description='The topic name of the asynchronous reverse notification.', example='test-topic'),
  objectId?: string(name='ObjectId', description='The ID of the story. This parameter is optional. If you leave this parameter empty, IMM assigns a unique identifier to the story. You can query and update a story based on its ID. You can also manually create an ID for a story. After you create an ID for a story, you must specify this parameter to pass the ID into the system. This way, IMM can record the ID as the unique identifier of the story. If you pass an existing ID into the system, IMM updates the story that corresponds to the ID.', example='id1'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  storyEndTime?: string(name='StoryEndTime', description='The end time of the photo collection for which you want to create the story. StoryStartTime and StoryEndTime form a time interval based on which IMM filters candidate photos to generate a story. The value must be a string in the RFC3339 format.', example='2021-12-30T16:00:00Z'),
  storyName?: string(name='StoryName', description='The name of the story.', example='name1'),
  storyStartTime?: string(name='StoryStartTime', description='The start time of the photo collection for which you want to create the story. StoryStartTime and StoryEndTime form a time interval based on which IMM filters candidate photos to generate a story. The value must be a string in the RFC3339 format.', example='2016-12-30T16:00:00Z'),
  storySubType?: string(name='StorySubType', description='The subtype of the story. For information about valid subtypes, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).', example='Solo'),
  storyType?: string(name='StoryType', description='The type of the story. For information about valid types, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).

This parameter is required.', example='PeopleMemory'),
  tagsShrink?: string(name='Tags', description='The tags. You can specify this parameter in one of the following scenarios:

*   Specify tags as custom data, which is returned in messages provided by Simple Message Queue.
*   Search for tasks by tag.
*   Specify tags as variables in destination URIs.', example='{"key":"val"}'),
  userData?: string(name='UserData', description='The custom information, which is returned as asynchronous notifications to facilitate notification management in your system. The maximum information length is 2,048 bytes.', example='{"ID": "testuid","Name": "test-user","Avatar": "http://test.com/testuid"}'),
}

model CreateStoryResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='392-1CqzvESGTEeNZ2OWFbRKIM****'),
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
  taskId?: string(name='TaskId', description='The task ID.', example='CreateStory-4ef6ff43-edf3-4612-9cc4-0c7f9e19****'),
}

model CreateStoryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateStoryResponseBody(name='body'),
}

/**
 * @summary Creates a story.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) to query information about the task. If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateStoryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateStoryResponse
 */
async function createStoryWithOptions(tmpReq: CreateStoryRequest, runtime: $RuntimeOptions): CreateStoryResponse {
  tmpReq.validate();
  var request = new CreateStoryShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.address)) {
    request.addressShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.address, 'Address', 'json');
  }
  if (!$isNull(tmpReq.customLabels)) {
    request.customLabelsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.customLabels, 'CustomLabels', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var body : map[string]any = {};
  if (!$isNull(request.addressShrink)) {
    body['Address'] = request.addressShrink;
  }
  if (!$isNull(request.customId)) {
    body['CustomId'] = request.customId;
  }
  if (!$isNull(request.customLabelsShrink)) {
    body['CustomLabels'] = request.customLabelsShrink;
  }
  if (!$isNull(request.datasetName)) {
    body['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxFileCount)) {
    body['MaxFileCount'] = request.maxFileCount;
  }
  if (!$isNull(request.minFileCount)) {
    body['MinFileCount'] = request.minFileCount;
  }
  if (!$isNull(request.notifyTopicName)) {
    body['NotifyTopicName'] = request.notifyTopicName;
  }
  if (!$isNull(request.objectId)) {
    body['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.storyEndTime)) {
    body['StoryEndTime'] = request.storyEndTime;
  }
  if (!$isNull(request.storyName)) {
    body['StoryName'] = request.storyName;
  }
  if (!$isNull(request.storyStartTime)) {
    body['StoryStartTime'] = request.storyStartTime;
  }
  if (!$isNull(request.storySubType)) {
    body['StorySubType'] = request.storySubType;
  }
  if (!$isNull(request.storyType)) {
    body['StoryType'] = request.storyType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateStory',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a story.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The operation is an asynchronous operation. After a task is executed, the task information is saved only for seven days. When the retention period ends, the task information can no longer be retrieved. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) to query information about the task. If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateStoryRequest
 * @return CreateStoryResponse
 */
async function createStory(request: CreateStoryRequest): CreateStoryResponse {
  var runtime = new $RuntimeOptions{};
  return createStoryWithOptions(request, runtime);
}

model CreateTriggerRequest {
  actions?: [ 
    {
      fastFailPolicy?: FastFailPolicy(name='FastFailPolicy', description='The policy configurations for handling failures.'),
      name?: string(name='Name', description='The name of the template.

This parameter is required.', example='doc/convert'),
      parameters?: [ string ](name='Parameters', description='The template parameters.'),
    }
  ](name='Actions', description='The templates.

This parameter is required.'),
  input?: Input(name='Input', description='The data source configurations.

This parameter is required.'),
  notification?: {
    MNS?: MNS(name='MNS', description='The SMQ notification settings.'),
  }(name='Notification', description='The notification settings. The operation supports multiple messaging middleware options. For more information about notification messages, see Asynchronous message examples. You can use one of the following methods to receive notification messages:

In the region in which the IMM project is located, use EventBridge to receive task notifications. For more information, see IMM events. In the region in which the IMM project is located, configure a Simple Message Queue (SMQ) subscription to receive task notifications.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The service role. IMM assumes the service role so that it can access resources in other cloud services, such as OSS. Default value: AliyunIMMBatchTriggerRole.

You can also create a custom service role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Create a regular service role](https://help.aliyun.com/document_detail/116800.html) and [Grant permissions to a role](https://help.aliyun.com/document_detail/116147.html).

This parameter is required.', example='AliyunIMMDefaultRole'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"key":"val"}'),
}

model CreateTriggerShrinkRequest {
  actionsShrink?: string(name='Actions', description='The templates.

This parameter is required.'),
  inputShrink?: string(name='Input', description='The data source configurations.

This parameter is required.'),
  notificationShrink?: string(name='Notification', description='The notification settings. The operation supports multiple messaging middleware options. For more information about notification messages, see Asynchronous message examples. You can use one of the following methods to receive notification messages:

In the region in which the IMM project is located, use EventBridge to receive task notifications. For more information, see IMM events. In the region in which the IMM project is located, configure a Simple Message Queue (SMQ) subscription to receive task notifications.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The service role. IMM assumes the service role so that it can access resources in other cloud services, such as OSS. Default value: AliyunIMMBatchTriggerRole.

You can also create a custom service role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Create a regular service role](https://help.aliyun.com/document_detail/116800.html) and [Grant permissions to a role](https://help.aliyun.com/document_detail/116147.html).

This parameter is required.', example='AliyunIMMDefaultRole'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"key":"val"}'),
}

model CreateTriggerResponseBody = {
  id?: string(name='Id', description='The ID of the trigger.', example='trigger-9f72636a-0f0c-4baf-ae78-38b27b******'),
  requestId?: string(name='RequestId', description='The request ID.', example='EC564A9A-BA5C-4499-A087-D9B9E76E*****'),
}

model CreateTriggerResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateTriggerResponseBody(name='body'),
}

/**
 * @summary Creates a trigger. A trigger can trigger Intelligent Media Management (IMM) based on events such as events in Object Storage Service (OSS) to process files, such as images, videos, and documents based on data processing templates.
 *
 * @description If you want to create a trigger to process data in [OSS](https://help.aliyun.com/document_detail/99372.html), make sure that you have bound the dataset to the OSS bucket where the data is stored. For more information about how to bind a dataset to a bucket, see [AttachOSSBucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param tmpReq CreateTriggerRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateTriggerResponse
 */
async function createTriggerWithOptions(tmpReq: CreateTriggerRequest, runtime: $RuntimeOptions): CreateTriggerResponse {
  tmpReq.validate();
  var request = new CreateTriggerShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.actions)) {
    request.actionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.actions, 'Actions', 'json');
  }
  if (!$isNull(tmpReq.input)) {
    request.inputShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.input, 'Input', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.actionsShrink)) {
    body['Actions'] = request.actionsShrink;
  }
  if (!$isNull(request.inputShrink)) {
    body['Input'] = request.inputShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    body['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.serviceRole)) {
    body['ServiceRole'] = request.serviceRole;
  }
  if (!$isNull(request.tagsShrink)) {
    body['Tags'] = request.tagsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateTrigger',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates a trigger. A trigger can trigger Intelligent Media Management (IMM) based on events such as events in Object Storage Service (OSS) to process files, such as images, videos, and documents based on data processing templates.
 *
 * @description If you want to create a trigger to process data in [OSS](https://help.aliyun.com/document_detail/99372.html), make sure that you have bound the dataset to the OSS bucket where the data is stored. For more information about how to bind a dataset to a bucket, see [AttachOSSBucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param request CreateTriggerRequest
 * @return CreateTriggerResponse
 */
async function createTrigger(request: CreateTriggerRequest): CreateTriggerResponse {
  var runtime = new $RuntimeOptions{};
  return createTriggerWithOptions(request, runtime);
}

model CreateVideoLabelClassificationTaskRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the video file.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the path of the object with the extension included.

This parameter is required.', example='oss://bucket1/object'),
  tags?: map[string]any(name='Tags', description='The custom tags, which can be used to search for and filter asynchronous tasks.', example='{"test":"val1"}'),
  userData?: string(name='UserData', description='The custom data, which is returned in an asynchronous notification and facilitates notification management. The maximum length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateVideoLabelClassificationTaskShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the video file.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the path of the object with the extension included.

This parameter is required.', example='oss://bucket1/object'),
  tagsShrink?: string(name='Tags', description='The custom tags, which can be used to search for and filter asynchronous tasks.', example='{"test":"val1"}'),
  userData?: string(name='UserData', description='The custom data, which is returned in an asynchronous notification and facilitates notification management. The maximum length is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateVideoLabelClassificationTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID of the current task. You can use [EventBridge](https://www.alibabacloud.com/en/product/eventbridge) to query the ID and obtain the task information notification.', example='03F-1Qt1Yn5RZZ0Zh3ZdYlDblv7****'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FFFE0B6'),
  taskId?: string(name='TaskId', description='The ID of the current task. You can call the [GetTask](~~GetTask~~) operation to view the task information or the [GetVideoLabelClassificationResult](https://help.aliyun.com/document_detail/478224.html) operation to obtain the result of the video label detection task.', example='VideoLabelClassification-2f157087-91df-4fda-8c3e-232407ec*****'),
}

model CreateVideoLabelClassificationTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateVideoLabelClassificationTaskResponseBody(name='body'),
}

/**
 * @summary Detects the scene, object, and event tag information of video content. Scene information includes categories such as natural landscapes, life scenes, and disaster scenes. Event information includes categories such as talent shows, office events, performances, and production events. Object information includes categories such as tableware, electronic products, furniture, and transportation. Video tag detection supports more than 30 tag categories and thousands of tags.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/2747104.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that an IMM project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   For more information about video label detection, see [Video label detection](https://help.aliyun.com/document_detail/477189.html).
 * *   This operation supports multiple video formats, such as MP4, MPEG-TS, MKV, MOV, AVI, FLV, and M3U8.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param tmpReq CreateVideoLabelClassificationTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateVideoLabelClassificationTaskResponse
 */
async function createVideoLabelClassificationTaskWithOptions(tmpReq: CreateVideoLabelClassificationTaskRequest, runtime: $RuntimeOptions): CreateVideoLabelClassificationTaskResponse {
  tmpReq.validate();
  var request = new CreateVideoLabelClassificationTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateVideoLabelClassificationTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects the scene, object, and event tag information of video content. Scene information includes categories such as natural landscapes, life scenes, and disaster scenes. Event information includes categories such as talent shows, office events, performances, and production events. Object information includes categories such as tableware, electronic products, furniture, and transportation. Video tag detection supports more than 30 tag categories and thousands of tags.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/2747104.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that an IMM project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   For more information about video label detection, see [Video label detection](https://help.aliyun.com/document_detail/477189.html).
 * *   This operation supports multiple video formats, such as MP4, MPEG-TS, MKV, MOV, AVI, FLV, and M3U8.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications.
 *
 * @param request CreateVideoLabelClassificationTaskRequest
 * @return CreateVideoLabelClassificationTaskResponse
 */
async function createVideoLabelClassificationTask(request: CreateVideoLabelClassificationTaskRequest): CreateVideoLabelClassificationTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createVideoLabelClassificationTaskWithOptions(request, runtime);
}

model CreateVideoModerationTaskRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  interval?: long(name='Interval', description='The interval of capturing video frames. Unit: seconds. Valid values: 1 to 600. Default value: 1.', example='1'),
  maxFrames?: long(name='MaxFrames', description='The maximum number of frames that can be captured from the video. Valid values: 5 to 3600. Default value: 200.', example='200'),
  notification?: Notification(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  scenes?: [ string ](name='Scenes', description='The scenarios of video moderation.'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the video.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://test-bucket/test-object'),
  tags?: map[string]any(name='Tags', description='The custom tags. The custom tags help you retrieve the task.', example='{"test": "val1"}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateVideoModerationTaskShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  interval?: long(name='Interval', description='The interval of capturing video frames. Unit: seconds. Valid values: 1 to 600. Default value: 1.', example='1'),
  maxFrames?: long(name='MaxFrames', description='The maximum number of frames that can be captured from the video. Valid values: 5 to 3600. Default value: 200.', example='200'),
  notificationShrink?: string(name='Notification', description='The notification settings. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  scenesShrink?: string(name='Scenes', description='The scenarios of video moderation.'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the video.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

This parameter is required.', example='oss://test-bucket/test-object'),
  tagsShrink?: string(name='Tags', description='The custom tags. The custom tags help you retrieve the task.', example='{"test": "val1"}'),
  userData?: string(name='UserData', description='The custom information, which is returned in an asynchronous notification and facilitates notification management. The maximum length of the value is 2,048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model CreateVideoModerationTaskResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='2E6-1I0FGn0zFnl5AflRfhzClma*****'),
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
  taskId?: string(name='TaskId', description='The task ID.', example='VideoModeration-9442a216-4691-4a48-846d-76daccaf*****'),
}

model CreateVideoModerationTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: CreateVideoModerationTaskResponseBody(name='body'),
}

/**
 * @summary Detects risky or non-compliant content from videos. You can use this operation in scenarios such as intelligent pornography detection, terrorist content and political bias detection, ad violation detection, and logo detection.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The detection result is sent as an asynchronous notification. The Suggestion parameter in asynchronous notifications supports the following values:
 *     *   pass: No non-compliant content is found.
 *     *   block: Non-compliant content is detected. The Categories field value indicates the non-compliance category. For more information, see [Content moderation results](https://help.aliyun.com/document_detail/2743995.html).
 *     *   review: A manual review is needed. After the manual review is completed, an asynchronous notification is sent to inform you about the result.
 * *   The following video frame requirements apply:
 *     *   The URLs for video frames must use HTTP or HTTPS.
 *     *   Video frames must be in PNG, JPG, JPEG, BMP, GIF, or WebP format.
 *     *   The size of a video frame cannot exceed 10 MB.
 *     *   The resolution for video frames is not lower than 256 × 256 pixels. A frame resolution lower than this recommended resolution may affect detection accuracy.
 *     *   The response time of the operation varies based on the amount of time required to download frames. Make sure that video frames to be detected are stored in a reliable and stable service. We recommend that you store video frames in OSS or cache video frames on Alibaba Cloud CDN.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications. >
 *
 * @param tmpReq CreateVideoModerationTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return CreateVideoModerationTaskResponse
 */
async function createVideoModerationTaskWithOptions(tmpReq: CreateVideoModerationTaskRequest, runtime: $RuntimeOptions): CreateVideoModerationTaskResponse {
  tmpReq.validate();
  var request = new CreateVideoModerationTaskShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.scenes)) {
    request.scenesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.scenes, 'Scenes', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.interval)) {
    query['Interval'] = request.interval;
  }
  if (!$isNull(request.maxFrames)) {
    query['MaxFrames'] = request.maxFrames;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.scenesShrink)) {
    query['Scenes'] = request.scenesShrink;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'CreateVideoModerationTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects risky or non-compliant content from videos. You can use this operation in scenarios such as intelligent pornography detection, terrorist content and political bias detection, ad violation detection, and logo detection.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 *     **
 *     **Note** Asynchronous processing does not guarantee timely task completion.
 * *   The detection result is sent as an asynchronous notification. The Suggestion parameter in asynchronous notifications supports the following values:
 *     *   pass: No non-compliant content is found.
 *     *   block: Non-compliant content is detected. The Categories field value indicates the non-compliance category. For more information, see [Content moderation results](https://help.aliyun.com/document_detail/2743995.html).
 *     *   review: A manual review is needed. After the manual review is completed, an asynchronous notification is sent to inform you about the result.
 * *   The following video frame requirements apply:
 *     *   The URLs for video frames must use HTTP or HTTPS.
 *     *   Video frames must be in PNG, JPG, JPEG, BMP, GIF, or WebP format.
 *     *   The size of a video frame cannot exceed 10 MB.
 *     *   The resolution for video frames is not lower than 256 × 256 pixels. A frame resolution lower than this recommended resolution may affect detection accuracy.
 *     *   The response time of the operation varies based on the amount of time required to download frames. Make sure that video frames to be detected are stored in a reliable and stable service. We recommend that you store video frames in OSS or cache video frames on Alibaba Cloud CDN.
 * *   This operation is an asynchronous operation. After a task is executed, the task information is retained only for seven days and cannot be retrieved when the retention period elapses. You can call the [GetTask](https://help.aliyun.com/document_detail/478241.html) or [ListTasks](https://help.aliyun.com/document_detail/478242.html) operation to query information about the task.`` If you specify [Notification](https://help.aliyun.com/document_detail/2743997.html), you can obtain information about the task based on notifications. >
 *
 * @param request CreateVideoModerationTaskRequest
 * @return CreateVideoModerationTaskResponse
 */
async function createVideoModerationTask(request: CreateVideoModerationTaskRequest): CreateVideoModerationTaskResponse {
  var runtime = new $RuntimeOptions{};
  return createVideoModerationTaskWithOptions(request, runtime);
}

model DeleteBatchRequest {
  id?: string(name='Id', description='The ID of the batch processing task.

This parameter is required.', example='batch-4eb9223f-3e88-42d3-a578-3f2852******'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
}

model DeleteBatchResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='91AC8C98-0F36-49D2-8290-742E24******'),
}

model DeleteBatchResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteBatchResponseBody(name='body'),
}

/**
 * @summary Deletes a batch processing task.
 *
 * @description *   You can delete only a batch processing task that is in one of the following states: Ready, Failed, Suspended, and Succeeded.
 * *   Before you delete a batch processing task, you can call the [GetBatch](https://help.aliyun.com/document_detail/479922.html) operation to query the task status. This ensures a successful deletion.
 *
 * @param request DeleteBatchRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteBatchResponse
 */
async function deleteBatchWithOptions(request: DeleteBatchRequest, runtime: $RuntimeOptions): DeleteBatchResponse {
  request.validate();
  var body : map[string]any = {};
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteBatch',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes a batch processing task.
 *
 * @description *   You can delete only a batch processing task that is in one of the following states: Ready, Failed, Suspended, and Succeeded.
 * *   Before you delete a batch processing task, you can call the [GetBatch](https://help.aliyun.com/document_detail/479922.html) operation to query the task status. This ensures a successful deletion.
 *
 * @param request DeleteBatchRequest
 * @return DeleteBatchResponse
 */
async function deleteBatch(request: DeleteBatchRequest): DeleteBatchResponse {
  var runtime = new $RuntimeOptions{};
  return deleteBatchWithOptions(request, runtime);
}

model DeleteBindingRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. For more information, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='dataset001'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
  URI?: string(name='URI', description='The URI of the OSS bucket to which the dataset is bound.

Specify the value in the oss://${Bucket} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project.

This parameter is required.', example='oss://examplebucket'),
}

model DeleteBindingResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='ACDFE467-C817-4B36-951A-6EB5A592****'),
}

model DeleteBindingResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteBindingResponseBody(name='body'),
}

/**
 * @summary Deletes the binding between a dataset and an Object Storage Service (OSS) bucket.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   If you delete a binding, new changes in the OSS bucket are not synchronized to the dataset. Exercise caution when you perform this operation.
 *
 * @param request DeleteBindingRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteBindingResponse
 */
async function deleteBindingWithOptions(request: DeleteBindingRequest, runtime: $RuntimeOptions): DeleteBindingResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.URI)) {
    query['URI'] = request.URI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteBinding',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes the binding between a dataset and an Object Storage Service (OSS) bucket.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   If you delete a binding, new changes in the OSS bucket are not synchronized to the dataset. Exercise caution when you perform this operation.
 *
 * @param request DeleteBindingRequest
 * @return DeleteBindingResponse
 */
async function deleteBinding(request: DeleteBindingRequest): DeleteBindingResponse {
  var runtime = new $RuntimeOptions{};
  return deleteBindingWithOptions(request, runtime);
}

model DeleteDatasetRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. For information about how to create a dataset, see [CreateDataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='dataset001'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
}

model DeleteDatasetResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='EC564B8B-BA5C-4499-B196-D9B9E76E****'),
}

model DeleteDatasetResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteDatasetResponseBody(name='body'),
}

/**
 * @summary Deletes a dataset.
 *
 * @description *   Before you delete a dataset, make sure that you have deleted all indexes in the dataset. For more information about how to delete indexes, see [DeleteFileMeta](https://help.aliyun.com/document_detail/478172.html) and [BatchDeleteFileMeta](https://help.aliyun.com/document_detail/478173.html).
 * *   Before you [delete a dataset](https://help.aliyun.com/document_detail/478160.html), make sure that you have deleted all bindings between the dataset and Object Storage Service (OSS) buckets. For more information about how to delete a binding, see [DeleteBinding](https://help.aliyun.com/document_detail/478205.html). The [DeleteBinding](https://help.aliyun.com/document_detail/478205.html) operation does not delete an index that is manually created, even if you set the `Cleanup` parameter to `true`. To delete indexes that are manually created, you must call the [DeleteFileMeta](https://help.aliyun.com/document_detail/478172.html) or [BatchDeleteFileMeta](https://help.aliyun.com/document_detail/478173.html) operation. For more information about the differences between automatically and manually created indexes, see [Create a metadata index](https://help.aliyun.com/document_detail/478166.html).
 *
 * @param request DeleteDatasetRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteDatasetResponse
 */
async function deleteDatasetWithOptions(request: DeleteDatasetRequest, runtime: $RuntimeOptions): DeleteDatasetResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteDataset',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes a dataset.
 *
 * @description *   Before you delete a dataset, make sure that you have deleted all indexes in the dataset. For more information about how to delete indexes, see [DeleteFileMeta](https://help.aliyun.com/document_detail/478172.html) and [BatchDeleteFileMeta](https://help.aliyun.com/document_detail/478173.html).
 * *   Before you [delete a dataset](https://help.aliyun.com/document_detail/478160.html), make sure that you have deleted all bindings between the dataset and Object Storage Service (OSS) buckets. For more information about how to delete a binding, see [DeleteBinding](https://help.aliyun.com/document_detail/478205.html). The [DeleteBinding](https://help.aliyun.com/document_detail/478205.html) operation does not delete an index that is manually created, even if you set the `Cleanup` parameter to `true`. To delete indexes that are manually created, you must call the [DeleteFileMeta](https://help.aliyun.com/document_detail/478172.html) or [BatchDeleteFileMeta](https://help.aliyun.com/document_detail/478173.html) operation. For more information about the differences between automatically and manually created indexes, see [Create a metadata index](https://help.aliyun.com/document_detail/478166.html).
 *
 * @param request DeleteDatasetRequest
 * @return DeleteDatasetResponse
 */
async function deleteDataset(request: DeleteDatasetRequest): DeleteDatasetResponse {
  var runtime = new $RuntimeOptions{};
  return deleteDatasetWithOptions(request, runtime);
}

model DeleteFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. For more information, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='test-datset'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  URI?: string(name='URI', description='The URI of the file in OSS.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the path of the object with the extension included.

The URI of the file in Photo and Drive Service must be in the pds://domains/${domain}/drives/${drive}/files/${file}/revisions/${revision} format.

This parameter is required.', example='oss://examplebucket/exampleobject.txt'),
}

model DeleteFileMetaResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='7F82D6C9-5AC0-49F9-914D-F02678F3****'),
}

model DeleteFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteFileMetaResponseBody(name='body'),
}

/**
 * @summary Removes the metadata of a file from a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   A successful deletion message is returned regardless of whether the metadata of the file exists in the dataset.
 * > 
 * *   The objects stored in Object Storage Service (OSS) or Photo and Drive Service are **not** deleted if you delete metadata from a dataset. If you want to delete the file, call the corresponding operations of OSS and Photo and Drive Service.
 * *   When you delete file metadata, the corresponding face clustering group information and story (if any) are changed, but the spatiotemporal clustering is not changed.
 *
 * @param request DeleteFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteFileMetaResponse
 */
async function deleteFileMetaWithOptions(request: DeleteFileMetaRequest, runtime: $RuntimeOptions): DeleteFileMetaResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.URI)) {
    query['URI'] = request.URI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Removes the metadata of a file from a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   A successful deletion message is returned regardless of whether the metadata of the file exists in the dataset.
 * > 
 * *   The objects stored in Object Storage Service (OSS) or Photo and Drive Service are **not** deleted if you delete metadata from a dataset. If you want to delete the file, call the corresponding operations of OSS and Photo and Drive Service.
 * *   When you delete file metadata, the corresponding face clustering group information and story (if any) are changed, but the spatiotemporal clustering is not changed.
 *
 * @param request DeleteFileMetaRequest
 * @return DeleteFileMetaResponse
 */
async function deleteFileMeta(request: DeleteFileMetaRequest): DeleteFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return deleteFileMetaWithOptions(request, runtime);
}

model DeleteLocationDateClusterRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. For information about how to create a dataset, see [CreateDataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='test-dataset'),
  objectId?: string(name='ObjectId', description='The ID of the group to be deleted.

This parameter is required.', example='location-date-cluster-71dd4f32-9597-4085-a2ab-3a7b0fd0aff9'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
}

model DeleteLocationDateClusterResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='B121940C-9794-4EE3-8D6E-F8EC525F****'),
}

model DeleteLocationDateClusterResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteLocationDateClusterResponseBody(name='body'),
}

/**
 * @summary Deletes a spatiotemporal cluster.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   Before you call this operation, you must call the [CreateLocationDateClusteringTask](https://help.aliyun.com/document_detail/478188.html) operation to perform spatiotemporal clustering.
 * *   A successful deletion is returned regardless of whether a spatiotemporal clustering group ID exists.
 *
 * @param request DeleteLocationDateClusterRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteLocationDateClusterResponse
 */
async function deleteLocationDateClusterWithOptions(request: DeleteLocationDateClusterRequest, runtime: $RuntimeOptions): DeleteLocationDateClusterResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var body : map[string]any = {};
  if (!$isNull(request.objectId)) {
    body['ObjectId'] = request.objectId;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteLocationDateCluster',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes a spatiotemporal cluster.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   Before you call this operation, you must call the [CreateLocationDateClusteringTask](https://help.aliyun.com/document_detail/478188.html) operation to perform spatiotemporal clustering.
 * *   A successful deletion is returned regardless of whether a spatiotemporal clustering group ID exists.
 *
 * @param request DeleteLocationDateClusterRequest
 * @return DeleteLocationDateClusterResponse
 */
async function deleteLocationDateCluster(request: DeleteLocationDateClusterRequest): DeleteLocationDateClusterResponse {
  var runtime = new $RuntimeOptions{};
  return deleteLocationDateClusterWithOptions(request, runtime);
}

model DeleteProjectRequest {
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [Create a project](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
}

model DeleteProjectResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='FEDC9B1F-30F2-4C1F-8ED2-B7860187****'),
}

model DeleteProjectResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteProjectResponseBody(name='body'),
}

/**
 * @summary Deletes a project.
 *
 * @description *   Before you delete a project, make sure that all resources in the project, such as datasets, bindings, batch processing tasks, and triggers, are deleted. For more information, see [DeleteDataset](https://help.aliyun.com/document_detail/478164.html), [DeleteBatch](https://help.aliyun.com/document_detail/479918.html), and [DeleteTrigger](https://help.aliyun.com/document_detail/479915.html).
 * *   After a project is deleted, all resources used by the project are recycled, and all related data is lost and cannot be recovered.
 *
 * @param request DeleteProjectRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteProjectResponse
 */
async function deleteProjectWithOptions(request: DeleteProjectRequest, runtime: $RuntimeOptions): DeleteProjectResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteProject',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes a project.
 *
 * @description *   Before you delete a project, make sure that all resources in the project, such as datasets, bindings, batch processing tasks, and triggers, are deleted. For more information, see [DeleteDataset](https://help.aliyun.com/document_detail/478164.html), [DeleteBatch](https://help.aliyun.com/document_detail/479918.html), and [DeleteTrigger](https://help.aliyun.com/document_detail/479915.html).
 * *   After a project is deleted, all resources used by the project are recycled, and all related data is lost and cannot be recovered.
 *
 * @param request DeleteProjectRequest
 * @return DeleteProjectResponse
 */
async function deleteProject(request: DeleteProjectRequest): DeleteProjectResponse {
  var runtime = new $RuntimeOptions{};
  return deleteProjectWithOptions(request, runtime);
}

model DeleteStoryRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='dataset001'),
  objectId?: string(name='ObjectId', description='The ID of the story to delete.

This parameter is required.', example='id1'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
}

model DeleteStoryResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
}

model DeleteStoryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteStoryResponseBody(name='body'),
}

/**
 * @summary Deletes a story.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Before you call this operation, make sure that you have called the [CreateStory](https://help.aliyun.com/document_detail/478193.html) or [CreateCustomizedStory](https://help.aliyun.com/document_detail/478196.html) operation to create a story.
 *
 * @param request DeleteStoryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteStoryResponse
 */
async function deleteStoryWithOptions(request: DeleteStoryRequest, runtime: $RuntimeOptions): DeleteStoryResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.objectId)) {
    query['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteStory',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes a story.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Before you call this operation, make sure that you have called the [CreateStory](https://help.aliyun.com/document_detail/478193.html) or [CreateCustomizedStory](https://help.aliyun.com/document_detail/478196.html) operation to create a story.
 *
 * @param request DeleteStoryRequest
 * @return DeleteStoryResponse
 */
async function deleteStory(request: DeleteStoryRequest): DeleteStoryResponse {
  var runtime = new $RuntimeOptions{};
  return deleteStoryWithOptions(request, runtime);
}

model DeleteTriggerRequest {
  id?: string(name='Id', description='The ID of the trigger. You can obtain the ID of the trigger from the response of the [CreateTrigger](https://help.aliyun.com/document_detail/479912.html) operation.

This parameter is required.', example='trigger-9f72636a-0f0c-4baf-ae78-38b27b******'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
}

model DeleteTriggerResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='FEDC9B1F-30F2-4C1F-8ED2-B7860187****'),
}

model DeleteTriggerResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DeleteTriggerResponseBody(name='body'),
}

/**
 * @summary Deletes a trigger.
 *
 * @description You can delete a trigger only if the trigger is in one of the following states: Ready, Failed, Suspended, and Succeeded. You cannot delete a trigger that is in the Running state.
 *
 * @param request DeleteTriggerRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DeleteTriggerResponse
 */
async function deleteTriggerWithOptions(request: DeleteTriggerRequest, runtime: $RuntimeOptions): DeleteTriggerResponse {
  request.validate();
  var body : map[string]any = {};
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'DeleteTrigger',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes a trigger.
 *
 * @description You can delete a trigger only if the trigger is in one of the following states: Ready, Failed, Suspended, and Succeeded. You cannot delete a trigger that is in the Running state.
 *
 * @param request DeleteTriggerRequest
 * @return DeleteTriggerResponse
 */
async function deleteTrigger(request: DeleteTriggerRequest): DeleteTriggerResponse {
  var runtime = new $RuntimeOptions{};
  return deleteTriggerWithOptions(request, runtime);
}

model DetachOSSBucketRequest {
  OSSBucket?: string(name='OSSBucket', description='The OSS bucket that you want to unbind.

This parameter is required.', example='examplebucket'),
}

model DetachOSSBucketResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='5F74C5C9-5AC0-49F9-914D-E01589D3****'),
}

model DetachOSSBucketResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetachOSSBucketResponseBody(name='body'),
}

/**
 * @summary Unbinds an Object Storage Service (OSS) bucket from the corresponding project.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that the project is bound to a bucket. For more information, see [AttachOSSBucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param request DetachOSSBucketRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetachOSSBucketResponse
 */
async function detachOSSBucketWithOptions(request: DetachOSSBucketRequest, runtime: $RuntimeOptions): DetachOSSBucketResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.OSSBucket)) {
    query['OSSBucket'] = request.OSSBucket;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetachOSSBucket',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Unbinds an Object Storage Service (OSS) bucket from the corresponding project.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that the project is bound to a bucket. For more information, see [AttachOSSBucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param request DetachOSSBucketRequest
 * @return DetachOSSBucketResponse
 */
async function detachOSSBucket(request: DetachOSSBucketRequest): DetachOSSBucketResponse {
  var runtime = new $RuntimeOptions{};
  return detachOSSBucketWithOptions(request, runtime);
}

model DetectImageBodiesRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  sensitivity?: float(name='Sensitivity', description='The accuracy level of detecting and recognizing specific content in the image. Valid values: 0 to 1. Default value: 0.6. A higher sensitivity specifies that more image details can be detected.', example='0.6', nullable=true),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which the image file is stored.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.', example='oss://test-bucket/test-object'),
}

model DetectImageBodiesShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  sensitivity?: float(name='Sensitivity', description='The accuracy level of detecting and recognizing specific content in the image. Valid values: 0 to 1. Default value: 0.6. A higher sensitivity specifies that more image details can be detected.', example='0.6', nullable=true),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which the image file is stored.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.', example='oss://test-bucket/test-object'),
}

model DetectImageBodiesResponseBody = {
  bodies?: [
    Body
  ](name='Bodies', description='The human bodies.'),
  requestId?: string(name='RequestId', description='The request ID.', example='501339F9-4B70-0CE2-AB8C-866C********'),
}

model DetectImageBodiesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageBodiesResponseBody(name='body'),
}

/**
 * @summary Detects human body information, such as the confidence level and body bounding box, in an image.
 *
 * @description *   Before you call this operation, make sure that an Intelligent Media Management (IMM) project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   For information about the image encoding formats supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param tmpReq DetectImageBodiesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageBodiesResponse
 */
async function detectImageBodiesWithOptions(tmpReq: DetectImageBodiesRequest, runtime: $RuntimeOptions): DetectImageBodiesResponse {
  tmpReq.validate();
  var request = new DetectImageBodiesShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sensitivity)) {
    query['Sensitivity'] = request.sensitivity;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageBodies',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects human body information, such as the confidence level and body bounding box, in an image.
 *
 * @description *   Before you call this operation, make sure that an Intelligent Media Management (IMM) project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   For information about the image encoding formats supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param request DetectImageBodiesRequest
 * @return DetectImageBodiesResponse
 */
async function detectImageBodies(request: DetectImageBodiesRequest): DetectImageBodiesResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageBodiesWithOptions(request, runtime);
}

model DetectImageCarsRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you do not have special requirements, leave this parameter empty.**

The authorization chain. This parameter is optional. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image file.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.

This parameter is required.', example='oss://test-bucket/test-object'),
}

model DetectImageCarsShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you do not have special requirements, leave this parameter empty.**

The authorization chain. This parameter is optional. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image file.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.

This parameter is required.', example='oss://test-bucket/test-object'),
}

model DetectImageCarsResponseBody = {
  cars?: [
    Car
  ](name='Cars', description='The vehicles.

This parameter is required.'),
  requestId?: string(name='RequestId', description='The request ID.', example='A8745209-DD0E-027E-8ABA-085E0C******'),
}

model DetectImageCarsResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageCarsResponseBody(name='body'),
}

/**
 * @summary Detects the outline data, attributes, and license plate information of vehicles in an image. The vehicle attributes include the vehicle color (CarColor) and vehicle type (CarType). The license plate information includes the recognition content (Content) and plate frame (Boundary).
 *
 * @description *   For information about the image encoding formats supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param tmpReq DetectImageCarsRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageCarsResponse
 */
async function detectImageCarsWithOptions(tmpReq: DetectImageCarsRequest, runtime: $RuntimeOptions): DetectImageCarsResponse {
  tmpReq.validate();
  var request = new DetectImageCarsShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageCars',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects the outline data, attributes, and license plate information of vehicles in an image. The vehicle attributes include the vehicle color (CarColor) and vehicle type (CarType). The license plate information includes the recognition content (Content) and plate frame (Boundary).
 *
 * @description *   For information about the image encoding formats supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param request DetectImageCarsRequest
 * @return DetectImageCarsResponse
 */
async function detectImageCars(request: DetectImageCarsRequest): DetectImageCarsResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageCarsWithOptions(request, runtime);
}

model DetectImageCodesRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which the image file is stored.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.

This parameter is required.', example='oss://bucketname/objectname'),
}

model DetectImageCodesShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which the image file is stored.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.

This parameter is required.', example='oss://bucketname/objectname'),
}

model DetectImageCodesResponseBody = {
  codes?: [
    Codes
  ](name='Codes', description='The barcodes or QR codes.

This parameter is required.'),
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model DetectImageCodesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageCodesResponseBody(name='body'),
}

/**
 * @summary Detects barcodes and QR codes in an image.
 *
 * @description *   For information about the image encoding formats supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param tmpReq DetectImageCodesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageCodesResponse
 */
async function detectImageCodesWithOptions(tmpReq: DetectImageCodesRequest, runtime: $RuntimeOptions): DetectImageCodesResponse {
  tmpReq.validate();
  var request = new DetectImageCodesShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageCodes',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects barcodes and QR codes in an image.
 *
 * @description *   For information about the image encoding formats supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param request DetectImageCodesRequest
 * @return DetectImageCodesResponse
 */
async function detectImageCodes(request: DetectImageCodesRequest): DetectImageCodesResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageCodesWithOptions(request, runtime);
}

model DetectImageCroppingRequest {
  aspectRatios?: string(name='AspectRatios', description='The cropping ratios. You can specify up to five cropping ratios. Take note of the following requirements:

*   The ratio must be an integer between 0 and 20.
*   The ratio must range from 0.5 to 2.
*   If you leave this parameter empty, the default processing logic is `["auto"]`.

>  Errors are reported in one of the following cases:\\\\
You specify more than five cropping ratios.\\\\
You pass an empty list to the system.\\\\
You specify a ratio that is not an integer, such as `4.1:3`.\\\\
The ratio is beyond the range of 0.5 to 2.', example='["1:1"]'),
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the image file that has an extension.', example='oss://imm-test/testcases/facetest.jpg'),
}

model DetectImageCroppingShrinkRequest {
  aspectRatios?: string(name='AspectRatios', description='The cropping ratios. You can specify up to five cropping ratios. Take note of the following requirements:

*   The ratio must be an integer between 0 and 20.
*   The ratio must range from 0.5 to 2.
*   If you leave this parameter empty, the default processing logic is `["auto"]`.

>  Errors are reported in one of the following cases:\\\\
You specify more than five cropping ratios.\\\\
You pass an empty list to the system.\\\\
You specify a ratio that is not an integer, such as `4.1:3`.\\\\
The ratio is beyond the range of 0.5 to 2.', example='["1:1"]'),
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the image file that has an extension.', example='oss://imm-test/testcases/facetest.jpg'),
}

model DetectImageCroppingResponseBody = {
  croppings?: [
    CroppingSuggestion
  ](name='Croppings', description='The image cropping suggestions.'),
  requestId?: string(name='RequestId', description='The request ID.', example='91AC8C98-0F36-49D2-8290-742E24D*****'),
}

model DetectImageCroppingResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageCroppingResponseBody(name='body'),
}

/**
 * @summary Detects the cropping area that produces the optimal visual effect based on a given image ratio by using AI model capabilities.
 *
 * @param tmpReq DetectImageCroppingRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageCroppingResponse
 */
async function detectImageCroppingWithOptions(tmpReq: DetectImageCroppingRequest, runtime: $RuntimeOptions): DetectImageCroppingResponse {
  tmpReq.validate();
  var request = new DetectImageCroppingShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.aspectRatios)) {
    query['AspectRatios'] = request.aspectRatios;
  }
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageCropping',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects the cropping area that produces the optimal visual effect based on a given image ratio by using AI model capabilities.
 *
 * @param request DetectImageCroppingRequest
 * @return DetectImageCroppingResponse
 */
async function detectImageCropping(request: DetectImageCroppingRequest): DetectImageCroppingResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageCroppingWithOptions(request, runtime);
}

model DetectImageFacesRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the image object.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object.jpg'),
}

model DetectImageFacesShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the image object.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object.jpg'),
}

model DetectImageFacesResponseBody = {
  faces?: [
    Figure
  ](name='Faces', description='The faces.'),
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model DetectImageFacesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageFacesResponseBody(name='body'),
}

/**
 * @summary Detects faces from an image, including face boundary information, attributes, and quality. The boundary information includes the distance from the y-coordinate of the vertex to the top edge (Top), distance from the x-coordinate of the vertex to the left edge (Left), height (Height), and width (Width). Face attributes include the age (Age), age standard deviation (AgeSD), gender (Gender), emotion (Emotion), mouth opening (Mouth), beard (Beard), hat wearing (Hat), mask wearing (Mask), glasses wearing (Glasses), head orientation (HeadPose), attractiveness (Attractive), and confidence levels for preceding attributes. Quality information includes the face quality score (FaceQuality) and face resolution (Sharpness).
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   For information about the image encoding formats supported by this operation, see [Limits](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param tmpReq DetectImageFacesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageFacesResponse
 */
async function detectImageFacesWithOptions(tmpReq: DetectImageFacesRequest, runtime: $RuntimeOptions): DetectImageFacesResponse {
  tmpReq.validate();
  var request = new DetectImageFacesShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageFaces',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects faces from an image, including face boundary information, attributes, and quality. The boundary information includes the distance from the y-coordinate of the vertex to the top edge (Top), distance from the x-coordinate of the vertex to the left edge (Left), height (Height), and width (Width). Face attributes include the age (Age), age standard deviation (AgeSD), gender (Gender), emotion (Emotion), mouth opening (Mouth), beard (Beard), hat wearing (Hat), mask wearing (Mask), glasses wearing (Glasses), head orientation (HeadPose), attractiveness (Attractive), and confidence levels for preceding attributes. Quality information includes the face quality score (FaceQuality) and face resolution (Sharpness).
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   For information about the image encoding formats supported by this operation, see [Limits](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param request DetectImageFacesRequest
 * @return DetectImageFacesResponse
 */
async function detectImageFaces(request: DetectImageFacesRequest): DetectImageFacesResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageFacesWithOptions(request, runtime);
}

model DetectImageLabelsRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immimagetest'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the image file that has an extension.

This parameter is required.', example='oss://imm-test/testcases/facetest.jpg'),
  threshold?: float(name='Threshold', description='The threshold of the label confidence. Labels whose confidence is lower than the specified threshold are not returned in the response. Valid values: 0 to 1. If you leave this parameter empty, the algorithm provides a default threshold.', example='1'),
}

model DetectImageLabelsShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immimagetest'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which you store the image.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the image file that has an extension.

This parameter is required.', example='oss://imm-test/testcases/facetest.jpg'),
  threshold?: float(name='Threshold', description='The threshold of the label confidence. Labels whose confidence is lower than the specified threshold are not returned in the response. Valid values: 0 to 1. If you leave this parameter empty, the algorithm provides a default threshold.', example='1'),
}

model DetectImageLabelsResponseBody = {
  labels?: [
    Label
  ](name='Labels', description='The list of labels detected.'),
  requestId?: string(name='RequestId', description='The request ID.', example='91AC8C98-0F36-49D2-8290-742E24DF1F69'),
}

model DetectImageLabelsResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageLabelsResponseBody(name='body'),
}

/**
 * @summary Detects scene, object, and event information in an image. Scene information includes natural landscapes, daily life, and disasters. Event information includes talent shows, office events, performances, and production events. Object information includes tableware, electronics, furniture, and transportation. The DetectImageLabels operation supports more than 30 different categories and thousands of labels.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Make sure that an IMM [project](https://help.aliyun.com/document_detail/478273.html) is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   For more information about the features of this operation, see [Image label detection](https://help.aliyun.com/document_detail/477179.html).
 * *   For more information about the input images supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param tmpReq DetectImageLabelsRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageLabelsResponse
 */
async function detectImageLabelsWithOptions(tmpReq: DetectImageLabelsRequest, runtime: $RuntimeOptions): DetectImageLabelsResponse {
  tmpReq.validate();
  var request = new DetectImageLabelsShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.threshold)) {
    query['Threshold'] = request.threshold;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageLabels',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects scene, object, and event information in an image. Scene information includes natural landscapes, daily life, and disasters. Event information includes talent shows, office events, performances, and production events. Object information includes tableware, electronics, furniture, and transportation. The DetectImageLabels operation supports more than 30 different categories and thousands of labels.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Make sure that an IMM [project](https://help.aliyun.com/document_detail/478273.html) is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   For more information about the features of this operation, see [Image label detection](https://help.aliyun.com/document_detail/477179.html).
 * *   For more information about the input images supported by this operation, see [Limits on images](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param request DetectImageLabelsRequest
 * @return DetectImageLabelsResponse
 */
async function detectImageLabels(request: DetectImageLabelsRequest): DetectImageLabelsResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageLabelsWithOptions(request, runtime);
}

model DetectImageScoreRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~477051~~)

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the input image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://bucketname/objectname'),
}

model DetectImageScoreShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~477051~~)

This parameter is required.', example='immtest'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the input image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://bucketname/objectname'),
}

model DetectImageScoreResponseBody = {
  imageScore?: {
    overallQualityScore?: float(name='OverallQualityScore', description='The overall quality score.', example='0.6'),
  }(name='ImageScore', description='The quality score of the image.'),
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model DetectImageScoreResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageScoreResponseBody(name='body'),
}

/**
 * @summary Calculates the aesthetics quality score of an image based on metrics such as the composition, brightness, contrast, color, and resolution. The operation returns a score within the range from 0 to 1. A higher score indicates better image quality.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478273.html).[](~~478152~~)
 * *   For information about the image encoding formats supported by this operation, see [Limits](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param tmpReq DetectImageScoreRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageScoreResponse
 */
async function detectImageScoreWithOptions(tmpReq: DetectImageScoreRequest, runtime: $RuntimeOptions): DetectImageScoreResponse {
  tmpReq.validate();
  var request = new DetectImageScoreShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageScore',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Calculates the aesthetics quality score of an image based on metrics such as the composition, brightness, contrast, color, and resolution. The operation returns a score within the range from 0 to 1. A higher score indicates better image quality.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478273.html).[](~~478152~~)
 * *   For information about the image encoding formats supported by this operation, see [Limits](https://help.aliyun.com/document_detail/475569.html).
 *
 * @param request DetectImageScoreRequest
 * @return DetectImageScoreResponse
 */
async function detectImageScore(request: DetectImageScoreRequest): DetectImageScoreResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageScoreWithOptions(request, runtime);
}

model DetectImageTextsRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The Object Storage Service (OSS) URI of the file.

Specify the URI in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of an OSS bucket that is in the same region as the current project. ${Object} specifies the path of the object with the extension included.

This parameter is required.', example='oss://test-bucket/test-object.jpg'),
}

model DetectImageTextsShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The Object Storage Service (OSS) URI of the file.

Specify the URI in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of an OSS bucket that is in the same region as the current project. ${Object} specifies the path of the object with the extension included.

This parameter is required.', example='oss://test-bucket/test-object.jpg'),
}

model DetectImageTextsResponseBody = {
  OCRContents?: [
    OCRContents
  ](name='OCRContents', description='OCR text blocks.'),
  OCRTexts?: string(name='OCRTexts', description='The full Optical Character Recognition (OCR) text, which is spliced by using the content of OCRContents.'),
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
}

model DetectImageTextsResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectImageTextsResponseBody(name='body'),
}

/**
 * @summary Recognizes and extracts text content from an image.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   The size of the image cannot exceed 20 MB.
 * *   The shortest side of the image is not less than 20 px, and the longest side is not more than 30,000 px.
 * *   The aspect ratio of the image is less than 1:2.
 * *   We recommend that you do not use an image that is smaller than 15 px × 15 px in size. Otherwise, the recognition rate is low.
 *
 * @param tmpReq DetectImageTextsRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectImageTextsResponse
 */
async function detectImageTextsWithOptions(tmpReq: DetectImageTextsRequest, runtime: $RuntimeOptions): DetectImageTextsResponse {
  tmpReq.validate();
  var request = new DetectImageTextsShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectImageTexts',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Recognizes and extracts text content from an image.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   The size of the image cannot exceed 20 MB.
 * *   The shortest side of the image is not less than 20 px, and the longest side is not more than 30,000 px.
 * *   The aspect ratio of the image is less than 1:2.
 * *   We recommend that you do not use an image that is smaller than 15 px × 15 px in size. Otherwise, the recognition rate is low.
 *
 * @param request DetectImageTextsRequest
 * @return DetectImageTextsResponse
 */
async function detectImageTexts(request: DetectImageTextsRequest): DetectImageTextsResponse {
  var runtime = new $RuntimeOptions{};
  return detectImageTextsWithOptions(request, runtime);
}

model DetectMediaMetaRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The URI of the media object in Object Storage Service (OSS).

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://examplebucket/sampleobject.mp4'),
}

model DetectMediaMetaShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The URI of the media object in Object Storage Service (OSS).

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://examplebucket/sampleobject.mp4'),
}

model DetectMediaMetaResponseBody = {
  addresses?: [
    Address
  ](name='Addresses', description='The addresses.

This parameter is returned only when address information is detected.'),
  album?: string(name='Album', description='The album.', example='unable'),
  albumArtist?: string(name='AlbumArtist', description='The album artist.', example='unable'),
  artist?: string(name='Artist', description='The artist.', example='unable'),
  audioStreams?: [
    AudioStream
  ](name='AudioStreams', description='The audio streams.'),
  bitrate?: long(name='Bitrate', description='The bitrate. Unit: bit/s.', example='13164131'),
  composer?: string(name='Composer', description='The composer.', example='unable'),
  duration?: double(name='Duration', description='The total duration of the video. Unit: seconds.', example='15.263000'),
  formatLongName?: string(name='FormatLongName', description='The full format name.', example='QuickTime / MOV'),
  formatName?: string(name='FormatName', description='The abbreviated format name.', example='mov,mp4,m4a,3gp,3g2,mj2'),
  language?: string(name='Language', description='The language of the content. For more information, see the ISO 639-2 Alpha-3 codes for the representation of names of languages.', example='eng'),
  latLong?: string(name='LatLong', description='The coordinate pair of the central point. The coordinate pair consists of latitude and longitude values. This parameter value must be in the "latitude,longitude" format. Valid values of the latitude: [-90,+90]. Valid values of the longitude: [-180,+180].', example='+120.029003,+30.283095'),
  performer?: string(name='Performer', description='The performer.', example='unable'),
  produceTime?: string(name='ProduceTime', description='The time of recording. For more information about the time formats, see the RFC3339 Nano standard.', example='2022-04-24T02:39:57Z'),
  programCount?: long(name='ProgramCount', description='The number of programs.', example='2'),
  requestId?: string(name='RequestId', description='The request ID.', example='2213B1A9-EB3D-4666-84E0-24980BC*****'),
  size?: long(name='Size', description='The size of the media object. Unit: bytes.', example='25115517'),
  startTime?: double(name='StartTime', description='The initial playback time.', example='0.000000'),
  streamCount?: long(name='StreamCount', description='The number of media streams.', example='2'),
  subtitles?: [
    SubtitleStream
  ](name='Subtitles', description='The subtitle streams.'),
  title?: string(name='Title', description='The title of the media object.', example='test'),
  videoHeight?: long(name='VideoHeight', description='The video height in pixels.', example='1920'),
  videoStreams?: [
    VideoStream
  ](name='VideoStreams', description='The video streams.'),
  videoWidth?: long(name='VideoWidth', description='The video width in pixels.', example='1080'),
}

model DetectMediaMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectMediaMetaResponseBody(name='body'),
}

/**
 * @summary Queries media metadata, including the media format and stream information.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 *
 * @param tmpReq DetectMediaMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectMediaMetaResponse
 */
async function detectMediaMetaWithOptions(tmpReq: DetectMediaMetaRequest, runtime: $RuntimeOptions): DetectMediaMetaResponse {
  tmpReq.validate();
  var request = new DetectMediaMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectMediaMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries media metadata, including the media format and stream information.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478152.html).
 *
 * @param request DetectMediaMetaRequest
 * @return DetectMediaMetaResponse
 */
async function detectMediaMeta(request: DetectMediaMetaRequest): DetectMediaMetaResponse {
  var runtime = new $RuntimeOptions{};
  return detectMediaMetaWithOptions(request, runtime);
}

model DetectTextAnomalyRequest {
  content?: string(name='Content', description='The text to be detected. It can contain up to 10,000 characters (including punctuation marks). Only Chinese text can be detected.

This parameter is required.', example='content'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
}

model DetectTextAnomalyResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='91AC8C98-0F36-49D2-8290-742E24DF*****'),
  suggestion?: string(name='Suggestion', description='Indicates whether the text contains anomalies. Valid values:

*   pass: the text does not contain anomalies.
*   block: the text contains anomalies.', example='pass'),
}

model DetectTextAnomalyResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: DetectTextAnomalyResponseBody(name='body'),
}

/**
 * @summary Detects whether specified text contains anomalies, such as pornography, advertisements, excessive junk content, politically sensitive content, and abuse.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * >  The text compliance detection feature only supports Chinese characters.
 *
 * @param request DetectTextAnomalyRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return DetectTextAnomalyResponse
 */
async function detectTextAnomalyWithOptions(request: DetectTextAnomalyRequest, runtime: $RuntimeOptions): DetectTextAnomalyResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.content)) {
    query['Content'] = request.content;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'DetectTextAnomaly',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Detects whether specified text contains anomalies, such as pornography, advertisements, excessive junk content, politically sensitive content, and abuse.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * >  The text compliance detection feature only supports Chinese characters.
 *
 * @param request DetectTextAnomalyRequest
 * @return DetectTextAnomalyResponse
 */
async function detectTextAnomaly(request: DetectTextAnomalyRequest): DetectTextAnomalyResponse {
  var runtime = new $RuntimeOptions{};
  return detectTextAnomalyWithOptions(request, runtime);
}

model EncodeBlindWatermarkRequest {
  content?: string(name='Content', description='The text content of watermarks. It can be up to 256 characters in length.'),
  imageQuality?: int32(name='ImageQuality', description='This parameter takes effect only if the input image format is JPG.

The storage quality of the output image that carries the watermarks. Default value: 90. Valid values: 70 to 100. The higher the quality, the larger the image size and the higher the watermark resolution quality.', example='90'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The Object Storage Service (OSS) URI of the image.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region with the current project. `${Object}` specifies the path of the object with the extension included.

Supported image formats: JPG, PNG, BMP, TIFF, and WebP.

Image size limit: 10,000 px maximum and 80 px x 80 px minimum.

This parameter is required.', example='oss://test-bucket/test-object.jpg'),
  strengthLevel?: string(name='StrengthLevel', description='The watermark strength level. The higher the strength, the more resistant the watermarked image is to attacks, but the more the image is distorted. Default value: low. Valid values: [low, medium, high].', example='low'),
  targetURI?: string(name='TargetURI', description='The URI of the output image in OSS.

Specify the URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

> 

*   The format of the output image is the same as that of the input image.

This parameter is required.', example='oss://test-bucket/target-object.jpg'),
}

model EncodeBlindWatermarkResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='8E0DD64B-28C6-4653-8FF7-93E4C234BCF0'),
}

model EncodeBlindWatermarkResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: EncodeBlindWatermarkResponseBody(name='body'),
}

/**
 * @summary Embeds specific textual information into an image as watermarks. These watermarks are visually imperceptible and do not affect the aesthetics of the image or the integrity of the original data. The watermarks can be extracted by using the CreateDecodeBlindWatermarkTask operation.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the billing of Intelligent Media Management (IMM).
 * *   Make sure that an IMM project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   You can embed only text as blind watermarks to an image.
 * *   The format of the output image is the same as that of the input image.
 * *   The watermarks can still be extracted even if attacks, such as compression, scaling, cropping, rotation, and color transformation, are performed on the image.
 * *   Pure black and white images and images with low resolution (roughly less than 200 px × 200 px,) are not supported.
 *
 * @param request EncodeBlindWatermarkRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return EncodeBlindWatermarkResponse
 */
async function encodeBlindWatermarkWithOptions(request: EncodeBlindWatermarkRequest, runtime: $RuntimeOptions): EncodeBlindWatermarkResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.content)) {
    query['Content'] = request.content;
  }
  if (!$isNull(request.imageQuality)) {
    query['ImageQuality'] = request.imageQuality;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.strengthLevel)) {
    query['StrengthLevel'] = request.strengthLevel;
  }
  if (!$isNull(request.targetURI)) {
    query['TargetURI'] = request.targetURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'EncodeBlindWatermark',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Embeds specific textual information into an image as watermarks. These watermarks are visually imperceptible and do not affect the aesthetics of the image or the integrity of the original data. The watermarks can be extracted by using the CreateDecodeBlindWatermarkTask operation.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the billing of Intelligent Media Management (IMM).
 * *   Make sure that an IMM project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   You can embed only text as blind watermarks to an image.
 * *   The format of the output image is the same as that of the input image.
 * *   The watermarks can still be extracted even if attacks, such as compression, scaling, cropping, rotation, and color transformation, are performed on the image.
 * *   Pure black and white images and images with low resolution (roughly less than 200 px × 200 px,) are not supported.
 *
 * @param request EncodeBlindWatermarkRequest
 * @return EncodeBlindWatermarkResponse
 */
async function encodeBlindWatermark(request: EncodeBlindWatermarkRequest): EncodeBlindWatermarkResponse {
  var runtime = new $RuntimeOptions{};
  return encodeBlindWatermarkWithOptions(request, runtime);
}

model ExtractDocumentTextRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/477051.html) operation.

This parameter is required.', example='immtest'),
  sourceType?: string(name='SourceType', description='The type of the filename extension of the source data. By default, the filename extension of the source data is the same as the filename extension of the input document. If the input document has no extension, you can specify this parameter. Valid values:

*   Text documents: doc, docx, wps, wpss, docm, dotm, dot, dotx, and html
*   Presentation documents: pptx, ppt, pot, potx, pps, ppsx, dps, dpt, pptm, potm, ppsm, and dpss
*   Table documents: xls, xlt, et, ett, xlsx, xltx, csv, xlsb, xlsm, xltm, and ets
*   PDF documents: pdf.', example='docx'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which the document is stored.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.

This parameter is required.', example='oss://test-bucket/test-object'),
}

model ExtractDocumentTextShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/477051.html) operation.

This parameter is required.', example='immtest'),
  sourceType?: string(name='SourceType', description='The type of the filename extension of the source data. By default, the filename extension of the source data is the same as the filename extension of the input document. If the input document has no extension, you can specify this parameter. Valid values:

*   Text documents: doc, docx, wps, wpss, docm, dotm, dot, dotx, and html
*   Presentation documents: pptx, ppt, pot, potx, pps, ppsx, dps, dpt, pptm, potm, ppsm, and dpss
*   Table documents: xls, xlt, et, ett, xlsx, xltx, csv, xlsb, xlsm, xltm, and ets
*   PDF documents: pdf.', example='docx'),
  sourceURI?: string(name='SourceURI', description='The URI of the Object Storage Service (OSS) bucket in which the document is stored.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the file that has an extension.

This parameter is required.', example='oss://test-bucket/test-object'),
}

model ExtractDocumentTextResponseBody = {
  documentText?: string(name='DocumentText'),
  requestId?: string(name='RequestId', example='94D6F994-E298-037E-8E8B-0090F27*****'),
}

model ExtractDocumentTextResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ExtractDocumentTextResponseBody(name='body'),
}

/**
 * @summary Extracts the text from the document body.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).**
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478273.html).[](~~478152~~)
 * *   The following document formats are supported: Word, Excel, PPT, PDF, and TXT.
 * *   The document cannot exceed 200 MB in size. The size of the extracted text cannot exceed 2 MB in size (approximately 1.2 million letters).
 * >  If the format of the document is complex or the document body is too large, a timeout error may occur. In this case, we recommend that you call the CreateOfficeConversionTask operation to convert the document to the TXT format before you call the ExtractDocumentText operation.
 *
 * @param tmpReq ExtractDocumentTextRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ExtractDocumentTextResponse
 */
async function extractDocumentTextWithOptions(tmpReq: ExtractDocumentTextRequest, runtime: $RuntimeOptions): ExtractDocumentTextResponse {
  tmpReq.validate();
  var request = new ExtractDocumentTextShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceType)) {
    query['SourceType'] = request.sourceType;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ExtractDocumentText',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Extracts the text from the document body.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).**
 * *   Make sure that the specified project exists in the current region. For more information, see [Project management](https://help.aliyun.com/document_detail/478273.html).[](~~478152~~)
 * *   The following document formats are supported: Word, Excel, PPT, PDF, and TXT.
 * *   The document cannot exceed 200 MB in size. The size of the extracted text cannot exceed 2 MB in size (approximately 1.2 million letters).
 * >  If the format of the document is complex or the document body is too large, a timeout error may occur. In this case, we recommend that you call the CreateOfficeConversionTask operation to convert the document to the TXT format before you call the ExtractDocumentText operation.
 *
 * @param request ExtractDocumentTextRequest
 * @return ExtractDocumentTextResponse
 */
async function extractDocumentText(request: ExtractDocumentTextRequest): ExtractDocumentTextResponse {
  var runtime = new $RuntimeOptions{};
  return extractDocumentTextWithOptions(request, runtime);
}

model FuzzyQueryRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 0 to 200.

Default value: 100.', example='1'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. If the total number of files is greater than the value of MaxResults, you must specify NextToken.

The file information is returned in alphabetical order starting from the value of NextToken.

You do not need to specify this parameter for the first request.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sorting method. Valid values:

*   asc: ascending order.
*   desc (default): descending order.

> 

*   Separate multiple sorting methods with commas (,). Example: asc,desc.

*   The number of values for Order must be less than or equal to the number of values for Sort. For example, if you set Sort to Size,Filename, you can set Order only to desc or asc.

*   If the number of values for Order is less than the number of values for Sort, the unsorted fields are default to the value of asc. For example, if you set Sort to Size,Filename and Order to asc, the Filename field is default to the value of asc.', example='asc,desc'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  query?: string(name='Query', description='The query content. The value can be up to 1 MB in size.

This parameter is required.'),
  sort?: string(name='Sort', description='The sort fields. For more information, see [Supported fields and operators](https://help.aliyun.com/document_detail/2743991.html).

*   Separate multiple sort fields with commas (,). Example: `Size,Filename`.
*   You can specify up to five sort fields.
*   The priority order of sorting is determined based on the order of the sort fields.', example='Size,Filename'),
  withFields?: [ string ](name='WithFields', description='The fields that you want to include in the response. To help reduce the size of the response, include only necessary metadata fields.

If you do not specify this parameter or set the value to null, all existing metadata fields are returned.'),
}

model FuzzyQueryShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 0 to 200.

Default value: 100.', example='1'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. If the total number of files is greater than the value of MaxResults, you must specify NextToken.

The file information is returned in alphabetical order starting from the value of NextToken.

You do not need to specify this parameter for the first request.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sorting method. Valid values:

*   asc: ascending order.
*   desc (default): descending order.

> 

*   Separate multiple sorting methods with commas (,). Example: asc,desc.

*   The number of values for Order must be less than or equal to the number of values for Sort. For example, if you set Sort to Size,Filename, you can set Order only to desc or asc.

*   If the number of values for Order is less than the number of values for Sort, the unsorted fields are default to the value of asc. For example, if you set Sort to Size,Filename and Order to asc, the Filename field is default to the value of asc.', example='asc,desc'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  query?: string(name='Query', description='The query content. The value can be up to 1 MB in size.

This parameter is required.'),
  sort?: string(name='Sort', description='The sort fields. For more information, see [Supported fields and operators](https://help.aliyun.com/document_detail/2743991.html).

*   Separate multiple sort fields with commas (,). Example: `Size,Filename`.
*   You can specify up to five sort fields.
*   The priority order of sorting is determined based on the order of the sort fields.', example='Size,Filename'),
  withFieldsShrink?: string(name='WithFields', description='The fields that you want to include in the response. To help reduce the size of the response, include only necessary metadata fields.

If you do not specify this parameter or set the value to null, all existing metadata fields are returned.'),
}

model FuzzyQueryResponseBody = {
  files?: [
    File
  ](name='Files', description='The files.'),
  nextToken?: string(name='NextToken', description='A pagination token.

It can be used in the next request to retrieve a new page of results.

If NextToken is empty, no next page exists.

This parameter is required.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
  totalHits?: long(name='TotalHits', description='The number of hits.'),
}

model FuzzyQueryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: FuzzyQueryResponseBody(name='body'),
}

/**
 * @summary Queries the extracted file metadata, including the file name, labels, path, custom tags, text, and other fields. If the value of a metadata field of a file matches the specified string, the metadata of the file is returned.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, join the DingTalk chat group (ID: 88490020073) and share your questions with us.
 * *   For information about the fields that you can use as query conditions, see [Supported fields and operators](https://help.aliyun.com/document_detail/2743991.html).
 *
 * @param tmpReq FuzzyQueryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return FuzzyQueryResponse
 */
async function fuzzyQueryWithOptions(tmpReq: FuzzyQueryRequest, runtime: $RuntimeOptions): FuzzyQueryResponse {
  tmpReq.validate();
  var request = new FuzzyQueryShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.withFields)) {
    request.withFieldsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.withFields, 'WithFields', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.query)) {
    query['Query'] = request.query;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.withFieldsShrink)) {
    query['WithFields'] = request.withFieldsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'FuzzyQuery',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the extracted file metadata, including the file name, labels, path, custom tags, text, and other fields. If the value of a metadata field of a file matches the specified string, the metadata of the file is returned.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, join the DingTalk chat group (ID: 88490020073) and share your questions with us.
 * *   For information about the fields that you can use as query conditions, see [Supported fields and operators](https://help.aliyun.com/document_detail/2743991.html).
 *
 * @param request FuzzyQueryRequest
 * @return FuzzyQueryResponse
 */
async function fuzzyQuery(request: FuzzyQueryRequest): FuzzyQueryResponse {
  var runtime = new $RuntimeOptions{};
  return fuzzyQueryWithOptions(request, runtime);
}

model GenerateVideoPlaylistRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you do not have special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  masterURI?: string(name='MasterURI', description='The OSS path of the master playlist.

The OSS path must be in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of the OSS bucket that is in the same region as the current project. ${Object} specifies the full path of the file that is suffixed with .m3u8.

>  If a playlist contains subtitles or multiple outputs, the MasterURI parameter is required and the URI of subtitle files or outputs must be in the directory specified by the MasterURI parameter or its subdirectory.', example='oss://bucket/object/master.m3u8'),
  notification?: Notification(name='Notification', description='The notification settings. To view details, click Notification. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  overwritePolicy?: string(name='OverwritePolicy', description='The overwrite policy when the media playlist exists. Valid values:

*   overwrite (default): overwrites an existing media playlist.
*   skip-existing: skips generation and retains the existing media playlist.', example='overwrite'),
  projectName?: string(name='ProjectName', description='The project name.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceDuration?: float(name='SourceDuration', description='The period of time during which the playlist is generated. Unit: seconds.

*   If you set this parameter to 0 (default) or leave this parameter empty, a playlist is generated until the end time of the source video.
*   If you set this parameter to a value greater than 0, a playlist is generated for the specified period of time from the start time that you specify.

>  If you set this parameter to a value that exceeds the end time of a source video, use the default value.', example='0'),
  sourceStartTime?: float(name='SourceStartTime', description='The time when the playlist starts to generate. Unit: seconds.

*   If you set this parameter to 0 (default) or leave this parameter empty, the start time of the source video is used as the time when a playlist starts to generate.
*   If you set this parameter to a value greater than 0, the time when a playlist starts to generate is the specified point in time.

>  If you use this parameter together with the **SourceDuration** parameter, a playlist can be generated based on the partial content of a source video.', example='0'),
  sourceSubtitles?: [ 
    {
      language?: string(name='Language', description='The subtitle language. If you configure this parameter, the value must comply with the ISO 639-2 standard. By default, this parameter is left empty.', example='eng'),
      URI?: string(name='URI', description='The OSS path of the subtitle file.

The OSS path must be in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of the OSS bucket that is in the same region as the current project. ${Object} specifies the full path of the file.

>  The **MasterURI** parameter cannot be left empty, and the OSS path `oss://${Bucket}/${Object}` of a subtitle file must be in the directory specified by the **MasterURI** parameter or its subdirectory.

This parameter is required.', example='oss://test-bucket/test-object/subtitle/eng.vtt'),
    }
  ](name='SourceSubtitles', description='The subtitle files. By default, this parameter is left empty. Up to two subtitle files are supported.'),
  sourceURI?: string(name='SourceURI', description='The OSS path of the video file.

The OSS path must be in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of the OSS bucket that is in the same region as the current project. ${Object} specifies the full path of the file that contains the file name extension.

>  Only OSS buckets of the Standard storage class are supported. OSS buckets for which hotlink protection whitelists are configured are not supported.

This parameter is required.', example='oss://imm-test/testcases/video.mp4'),
  tags?: map[string]string(name='Tags', description='The [tags](https://help.aliyun.com/document_detail/106678.html) that you want to add to a TS file in OSS. You can use tags to manage the lifecycles of TS files in OSS.', example='{"key1": "value1", "key2": "value2"}'),
  targets?: [ 
    {
      audio?: TargetAudio(name='Audio', description='The audio processing configuration. If you set this parameter to null (default), audio processing is disabled. The generated TS files do not contain audio streams.

>  The Audio and Subtitle parameters in the same output are mutually exclusive. If the Audio parameter is configured, the Subtitle parameter is ignored. The Audio and Video parameters can be configured at the same time. You can also configure only the Audio parameter to generate only audio information.'),
      duration?: float(name='Duration', description='The playback duration of a single TS file. Unit: seconds. Default value: 10. Valid values: 5 to 15.', example='5'),
      initialSegments?: [ float ](name='InitialSegments', description='The array of the durations of the pre-transcoded TS files. The array can contain the durations of up to six pre-transcoded TS files. By default, this parameter is left empty. This parameter is independent of the **Duration** parameter.'),
      initialTranscode?: float(name='InitialTranscode', description='The pre-transcoding duration. Unit: seconds. Default value: 30.

*   If you set this parameter to 0, pre-transcoding is disabled.
*   If you set this parameter to a value that is less than 0 or greater than the duration of a source video, the entire video is pre-transcoded.
*   If you set this parameter to a value that is within the middle of the playback duration of a TS file, the transcoding continues until the end of the playback duration.

>  This parameter is used to reduce the time spent in waiting for the initial playback of a video and improve the playback experience. If you want to replace the traditional video on demand (VOD) business scenario, you can try to pre-transcode the entire video.', example='30.0'),
      subtitle?: TargetSubtitle(name='Subtitle', description='The subtitle processing configuration.

>  The Subtitle and Video or Audio parameters in the same output are mutually exclusive. You must configure the Subtitle parameter independently to generate subtitles.'),
      tags?: map[string]string(name='Tags', description='The [tags](https://help.aliyun.com/document_detail/106678.html) that you want to add to a TS file in OSS. You can use tags to manage the lifecycles of TS files in OSS.

>  The combination of the value of the Tags parameter and the value of the Tags parameter in the upper level is used as the tag value of the current output. If the value of the Tags parameter in the current level is the same as the value of the Tags parameter in the upper level, use the value of the Tags parameter in the current level.'),
      transcodeAhead?: int32(name='TranscodeAhead', description='The number of TS files that are pre-transcoded when the live transcoding is triggered. By default, a 2-minute video is pre-transcoded.

*   Example: If you set the **Duration** parameter to 10, the value of the **TranscodeAhead** parameter is 12 by default. You can configure this parameter to manage the number of pre-transcoded files in an asynchronous manner. Valid values: 10 to 30.', example='3'),
      URI?: string(name='URI', description='The prefix of the OSS path that is used to store the live transcoding files. The live transcoding files include a M3U8 file and multiple TS files.

The OSS path must be in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of the OSS bucket that is in the same region as the current project. ${Object} specifies the prefix of the full path of the file that does not contain the file name extension.

*   Example: If the URI is oss://test-bucket/test-object/output-video, the output-video.m3u8 file and multiple output-video-${token}-${index}.ts files are generated in the oss://test-bucket/test-object/ directory. ${token} is a unique string generated based on the transcoding parameters. The ${token} parameter is included in the response of the operation. ${index} is the serial number of the generated TS files that are numbered starting from 0.

>  If the **MasterURI** parameter is not left empty, the URI specified by this parameter must be in the directory specified by the **MasterURI** parameter or its subdirectory.', example='oss://imm-test/testcases/video'),
      video?: TargetVideo(name='Video', description='The video processing configuration. If you set this parameter to null (default), video processing is disabled. The generated TS files do not contain video streams.

>  The Video and Subtitle parameters in the same output are mutually exclusive. If the Video parameter is configured, the Subtitle parameter is ignored.'),
    }
  ](name='Targets', description='The live transcoding playlists. Up to 6 playlists are supported. Each output corresponds to at most one video media playlist and one or more subtitle media playlists.

>  If more than one output is configured, the **MasterURI** parameter is required.

This parameter is required.'),
  userData?: string(name='UserData', description='The custom user information, which is returned in asynchronous notifications to help you handle the notifications in the system. The maximum length of a notification is 2048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model GenerateVideoPlaylistShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you do not have special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  masterURI?: string(name='MasterURI', description='The OSS path of the master playlist.

The OSS path must be in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of the OSS bucket that is in the same region as the current project. ${Object} specifies the full path of the file that is suffixed with .m3u8.

>  If a playlist contains subtitles or multiple outputs, the MasterURI parameter is required and the URI of subtitle files or outputs must be in the directory specified by the MasterURI parameter or its subdirectory.', example='oss://bucket/object/master.m3u8'),
  notificationShrink?: string(name='Notification', description='The notification settings. To view details, click Notification. For information about the asynchronous notification format, see [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html).'),
  overwritePolicy?: string(name='OverwritePolicy', description='The overwrite policy when the media playlist exists. Valid values:

*   overwrite (default): overwrites an existing media playlist.
*   skip-existing: skips generation and retains the existing media playlist.', example='overwrite'),
  projectName?: string(name='ProjectName', description='The project name.[](~~478153~~)

This parameter is required.', example='immtest'),
  sourceDuration?: float(name='SourceDuration', description='The period of time during which the playlist is generated. Unit: seconds.

*   If you set this parameter to 0 (default) or leave this parameter empty, a playlist is generated until the end time of the source video.
*   If you set this parameter to a value greater than 0, a playlist is generated for the specified period of time from the start time that you specify.

>  If you set this parameter to a value that exceeds the end time of a source video, use the default value.', example='0'),
  sourceStartTime?: float(name='SourceStartTime', description='The time when the playlist starts to generate. Unit: seconds.

*   If you set this parameter to 0 (default) or leave this parameter empty, the start time of the source video is used as the time when a playlist starts to generate.
*   If you set this parameter to a value greater than 0, the time when a playlist starts to generate is the specified point in time.

>  If you use this parameter together with the **SourceDuration** parameter, a playlist can be generated based on the partial content of a source video.', example='0'),
  sourceSubtitlesShrink?: string(name='SourceSubtitles', description='The subtitle files. By default, this parameter is left empty. Up to two subtitle files are supported.'),
  sourceURI?: string(name='SourceURI', description='The OSS path of the video file.

The OSS path must be in the oss://${Bucket}/${Object} format. ${Bucket} specifies the name of the OSS bucket that is in the same region as the current project. ${Object} specifies the full path of the file that contains the file name extension.

>  Only OSS buckets of the Standard storage class are supported. OSS buckets for which hotlink protection whitelists are configured are not supported.

This parameter is required.', example='oss://imm-test/testcases/video.mp4'),
  tagsShrink?: string(name='Tags', description='The [tags](https://help.aliyun.com/document_detail/106678.html) that you want to add to a TS file in OSS. You can use tags to manage the lifecycles of TS files in OSS.', example='{"key1": "value1", "key2": "value2"}'),
  targetsShrink?: string(name='Targets', description='The live transcoding playlists. Up to 6 playlists are supported. Each output corresponds to at most one video media playlist and one or more subtitle media playlists.

>  If more than one output is configured, the **MasterURI** parameter is required.

This parameter is required.'),
  userData?: string(name='UserData', description='The custom user information, which is returned in asynchronous notifications to help you handle the notifications in the system. The maximum length of a notification is 2048 bytes.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model GenerateVideoPlaylistResponseBody = {
  audioPlaylist?: [ 
    {
      channels?: int32(name='Channels', description='The number of audio channels.', example='1'),
      token?: string(name='Token', description='The token of the audio media playlist. You can use this parameter to generate the path of a TS file.', example='affe0c6042f09722fec95a21b8b******'),
      URI?: string(name='URI', description='The OSS path of the audio media playlist.', example='oss://imm-test/testcases/video.m3u8'),
    }
  ](name='AudioPlaylist', description='The audio media playlist files.'),
  duration?: float(name='Duration', description='The total duration of the generated video.', example='1082'),
  masterURI?: string(name='MasterURI', description='The OSS path of the master playlist.', example='oss://test-bucket/test-object/master.m3u8'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FFF*****'),
  subtitlePlaylist?: [ 
    {
      index?: int32(name='Index', description='The serial number of the subtitle stream. The value starts from 0.', example='1'),
      language?: string(name='Language', description='The language of the subtitle stream.

>  The language is derived from the subtitle stream information in the OSS path specified by the SourceURI parameter for a source video. If no language information exists in the source video, null is returned.', example='en'),
      token?: string(name='Token', description='The token of the subtitle media playlist. You can use this parameter to generate the path of a subtitle file.

>  You can generate the path of a transcoded subtitle file based on the returned token value. The path must be in the oss://${Bucket}/${Object}-${Token}_${Index}.ts format. oss://${Bucket}/${Object} specifies the URI specified by input parameters for output files. ${Token} specifies the returned token value, and ${Index} specifies the serial number of a subtitle file.', example='affe0c6042f09722fec95a21b8b******'),
      URI?: string(name='URI', description='The OSS path of the subtitle media playlist.', example='oss://imm-test/testcases/vide_0.m3u8'),
    }
  ](name='SubtitlePlaylist', description='The subtitle media playlist files.'),
  token?: string(name='Token', description='The token of the master playlist.', example='92376fbb-171f-4259-913f-705f7ee0****'),
  videoPlaylist?: [ 
    {
      frameRate?: string(name='FrameRate', description='The video frame rate.', example='25/1'),
      resolution?: string(name='Resolution', description='The video resolution.', example='640x480'),
      token?: string(name='Token', description='The token of the video media playlist. You can use this parameter to generate the path of a TS file.

>  You can generate the path of a transcoded TS file based on the value of this parameter. The path must be in the oss://${Bucket}/${Object}-${Token}-${Index}.ts format. oss://${Bucket}/${Object} specifies the URI specified by input parameters for output files. ${Token} specifies the returned token, and ${Index} specifies the serial number of a TS file.', example='affe0c6042f09722fec95a21b8b******'),
      URI?: string(name='URI', description='The OSS path of the video media playlist.', example='oss://imm-test/testcases/video.m3u8'),
    }
  ](name='VideoPlaylist', description='The video media playlist files.'),
}

model GenerateVideoPlaylistResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GenerateVideoPlaylistResponseBody(name='body'),
}

/**
 * @summary Generates a live transcoding playlist and converts video files into M3U8 files. After a playlist is generated, the videos in the playlist are immediately played and the video files are transcoded based on the playback progress. Compared with offline transcoding, online transcoding significantly reduces the time spent in waiting for the videos to be transcoded and reduces transcoding and storage costs.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).**
 * *   Make sure that the project that you want to use is available in the current region. For more information, see [Project Management](https://help.aliyun.com/document_detail/478152.html).
 * *   By default, you can call this operation to process only one video, audio, or subtitle track. You can specify the number of the video, audio, or subtitle tracks that you want to process.
 * *   You can call this operation to generate a media playlist and a master playlist. For more information, see the parameter description.
 * *   This operation is a synchronous operation. Synchronous or asynchronous transcoding is triggered only during playback or pre-transcoding. You can configure the [Notification](https://help.aliyun.com/document_detail/2743997.html) parameter to obtain the transcoding task result.
 * *   For information about the feature description of this operation, see [Live transcoding](https://help.aliyun.com/document_detail/477192.html).
 * *   The data processing capability of Object Storage Service (OSS) also provides the playlist generation feature. However, this feature can generate only a media playlist, and related parameters are simplified.
 *
 * @param tmpReq GenerateVideoPlaylistRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GenerateVideoPlaylistResponse
 */
async function generateVideoPlaylistWithOptions(tmpReq: GenerateVideoPlaylistRequest, runtime: $RuntimeOptions): GenerateVideoPlaylistResponse {
  tmpReq.validate();
  var request = new GenerateVideoPlaylistShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.sourceSubtitles)) {
    request.sourceSubtitlesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.sourceSubtitles, 'SourceSubtitles', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  if (!$isNull(tmpReq.targets)) {
    request.targetsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.targets, 'Targets', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.masterURI)) {
    query['MasterURI'] = request.masterURI;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.overwritePolicy)) {
    query['OverwritePolicy'] = request.overwritePolicy;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceDuration)) {
    query['SourceDuration'] = request.sourceDuration;
  }
  if (!$isNull(request.sourceStartTime)) {
    query['SourceStartTime'] = request.sourceStartTime;
  }
  if (!$isNull(request.sourceSubtitlesShrink)) {
    query['SourceSubtitles'] = request.sourceSubtitlesShrink;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.tagsShrink)) {
    query['Tags'] = request.tagsShrink;
  }
  if (!$isNull(request.targetsShrink)) {
    query['Targets'] = request.targetsShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GenerateVideoPlaylist',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Generates a live transcoding playlist and converts video files into M3U8 files. After a playlist is generated, the videos in the playlist are immediately played and the video files are transcoded based on the playback progress. Compared with offline transcoding, online transcoding significantly reduces the time spent in waiting for the videos to be transcoded and reduces transcoding and storage costs.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).**
 * *   Make sure that the project that you want to use is available in the current region. For more information, see [Project Management](https://help.aliyun.com/document_detail/478152.html).
 * *   By default, you can call this operation to process only one video, audio, or subtitle track. You can specify the number of the video, audio, or subtitle tracks that you want to process.
 * *   You can call this operation to generate a media playlist and a master playlist. For more information, see the parameter description.
 * *   This operation is a synchronous operation. Synchronous or asynchronous transcoding is triggered only during playback or pre-transcoding. You can configure the [Notification](https://help.aliyun.com/document_detail/2743997.html) parameter to obtain the transcoding task result.
 * *   For information about the feature description of this operation, see [Live transcoding](https://help.aliyun.com/document_detail/477192.html).
 * *   The data processing capability of Object Storage Service (OSS) also provides the playlist generation feature. However, this feature can generate only a media playlist, and related parameters are simplified.
 *
 * @param request GenerateVideoPlaylistRequest
 * @return GenerateVideoPlaylistResponse
 */
async function generateVideoPlaylist(request: GenerateVideoPlaylistRequest): GenerateVideoPlaylistResponse {
  var runtime = new $RuntimeOptions{};
  return generateVideoPlaylistWithOptions(request, runtime);
}

model GenerateWebofficeTokenRequest {
  cachePreview?: boolean(name='CachePreview', description='Specifies whether to enable cache preview.

*   true: enables cache preview. The document can be previewed only and cannot be collaboratively edited.
*   false: does not enable cache preview. The document can be collaboratively edited when it is being previewed.

>  The pricing for document previews varies based on whether cache preview is enabled or disabled.

>  During a cache preview, document content search and printing are not supported.', example='false'),
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  externalUploaded?: boolean(name='ExternalUploaded', description='Specifies whether to allow an upload of a document to the Object Storage Service (OSS) bucket. Valid values:

*   true: Documents can be directly uploaded to OSS. The uploaded document overwrites the existing document and a new version is generated for the document. Before you upload a new document, close the existing document if it is being edited. After the document is uploaded, wait for approximately 5 minutes before you open the document again so that the new version can successfully load. Upload a new document only when the existing is closed. Otherwise, the uploaded document is overwritten when the existing document is saved.
*   false: Documents cannot be directly uploaded to OSS. If you try to upload a document, an error is returned. This is the default value.', example='false'),
  filename?: string(name='Filename', description='The name of the file. The extension must be included in the file name. By default, this parameter is set to the last depth level of the **SourceURI** parameter value.

Supported extensions (only preview supported for .pdf):

*   Word documents: .doc, .docx, .txt, .dot, .wps, .wpt, .dotx, .docm, .dotm, and .rtf
*   Presentation documents: .ppt, .pptx, .pptm, .ppsx, .ppsm, .pps, .potx, .potm, .dpt, and .dps
*   Table documents: .et, .xls, .xlt, .xlsx, .xlsm, .xltx, .xltm, and .csv
*   PDF documents: .pdf', example='test.pptx'),
  hidecmb?: boolean(name='Hidecmb', example='false'),
  notification?: Notification(name='Notification', description='The notification settings. Only SMQ messages are supported. For more information, see [WebOffice message example](https://help.aliyun.com/document_detail/2743999.html).

>  A notification is sent after the document is saved or renamed.'),
  notifyTopicName?: string(name='NotifyTopicName', example='topic1'),
  password?: string(name='Password', example='123456'),
  permission?: WebofficePermission(name='Permission', description='The user permission settings in the JSON format.

The parameter supports the following permission fields:

Each field is of type Boolean and can have a value of true and false (the default value):

*   Readonly: grants the permission to preview the document. This field is optional.
*   Rename: grants the permission to rename the document. Notification messages of a rename event can be sent only by using SMQ. This field is optional.
*   History: grants the permission to view historical versions. This field is optional.
*   Copy: grants the permission to copy the document. This field is optional.
*   Export: grants the permission to export the document as a PDF file. This field is optional.
*   Print: grants the permission to print the document. This field is optional.

>  Only online preview is supported for PDF documents. When you call the operation on a PDF document, you can set Readonly only to true.

>  To manage multiple versions of the document, you must enable versioning for the bucket that stores the document and set the History parameter to true.

>  Printing is not supported during cache preview.'),
  previewPages?: long(name='PreviewPages', example='5'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='immtest'),
  referer?: string(name='Referer', example='*'),
  sourceURI?: string(name='SourceURI', description='This parameter is required.', example='oss://imm-test/test.pptx'),
  user?: WebofficeUser(name='User', description='The user information. The user information that you want to display on the WebOffice page. If you do not specify this parameter, the user name displayed is Unknown.'),
  userData?: string(name='UserData', description='The user-defined data that you want to return in asynchronous messages. This parameter takes effect only when you specify the MNS settings in the Notification parameter. The maximum length of the value is 2,048 bytes.', example='{"file_id": "abc"}'),
  watermark?: WebofficeWatermark(name='Watermark'),
}

model GenerateWebofficeTokenShrinkRequest {
  cachePreview?: boolean(name='CachePreview', description='Specifies whether to enable cache preview.

*   true: enables cache preview. The document can be previewed only and cannot be collaboratively edited.
*   false: does not enable cache preview. The document can be collaboratively edited when it is being previewed.

>  The pricing for document previews varies based on whether cache preview is enabled or disabled.

>  During a cache preview, document content search and printing are not supported.', example='false'),
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The configurations of authorization chains. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  externalUploaded?: boolean(name='ExternalUploaded', description='Specifies whether to allow an upload of a document to the Object Storage Service (OSS) bucket. Valid values:

*   true: Documents can be directly uploaded to OSS. The uploaded document overwrites the existing document and a new version is generated for the document. Before you upload a new document, close the existing document if it is being edited. After the document is uploaded, wait for approximately 5 minutes before you open the document again so that the new version can successfully load. Upload a new document only when the existing is closed. Otherwise, the uploaded document is overwritten when the existing document is saved.
*   false: Documents cannot be directly uploaded to OSS. If you try to upload a document, an error is returned. This is the default value.', example='false'),
  filename?: string(name='Filename', description='The name of the file. The extension must be included in the file name. By default, this parameter is set to the last depth level of the **SourceURI** parameter value.

Supported extensions (only preview supported for .pdf):

*   Word documents: .doc, .docx, .txt, .dot, .wps, .wpt, .dotx, .docm, .dotm, and .rtf
*   Presentation documents: .ppt, .pptx, .pptm, .ppsx, .ppsm, .pps, .potx, .potm, .dpt, and .dps
*   Table documents: .et, .xls, .xlt, .xlsx, .xlsm, .xltx, .xltm, and .csv
*   PDF documents: .pdf', example='test.pptx'),
  hidecmb?: boolean(name='Hidecmb', example='false'),
  notificationShrink?: string(name='Notification', description='The notification settings. Only SMQ messages are supported. For more information, see [WebOffice message example](https://help.aliyun.com/document_detail/2743999.html).

>  A notification is sent after the document is saved or renamed.'),
  notifyTopicName?: string(name='NotifyTopicName', example='topic1'),
  password?: string(name='Password', example='123456'),
  permissionShrink?: string(name='Permission', description='The user permission settings in the JSON format.

The parameter supports the following permission fields:

Each field is of type Boolean and can have a value of true and false (the default value):

*   Readonly: grants the permission to preview the document. This field is optional.
*   Rename: grants the permission to rename the document. Notification messages of a rename event can be sent only by using SMQ. This field is optional.
*   History: grants the permission to view historical versions. This field is optional.
*   Copy: grants the permission to copy the document. This field is optional.
*   Export: grants the permission to export the document as a PDF file. This field is optional.
*   Print: grants the permission to print the document. This field is optional.

>  Only online preview is supported for PDF documents. When you call the operation on a PDF document, you can set Readonly only to true.

>  To manage multiple versions of the document, you must enable versioning for the bucket that stores the document and set the History parameter to true.

>  Printing is not supported during cache preview.'),
  previewPages?: long(name='PreviewPages', example='5'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='immtest'),
  referer?: string(name='Referer', example='*'),
  sourceURI?: string(name='SourceURI', description='This parameter is required.', example='oss://imm-test/test.pptx'),
  userShrink?: string(name='User', description='The user information. The user information that you want to display on the WebOffice page. If you do not specify this parameter, the user name displayed is Unknown.'),
  userData?: string(name='UserData', description='The user-defined data that you want to return in asynchronous messages. This parameter takes effect only when you specify the MNS settings in the Notification parameter. The maximum length of the value is 2,048 bytes.', example='{"file_id": "abc"}'),
  watermarkShrink?: string(name='Watermark'),
}

model GenerateWebofficeTokenResponseBody = {
  accessToken?: string(name='AccessToken', example='2d73dd5d87524c5e8a194c3eb5********'),
  accessTokenExpiredTime?: string(name='AccessTokenExpiredTime', example='2021-08-30T13:13:11.347146982Z'),
  refreshToken?: string(name='RefreshToken', example='e374995ec532432bb678074d36********'),
  refreshTokenExpiredTime?: string(name='RefreshTokenExpiredTime', example='2021-08-31T12:43:11.347146982Z'),
  requestId?: string(name='RequestId', example='1759315A-CB33-0A75-A72B-62D7********'),
  webofficeURL?: string(name='WebofficeURL', example='https://office-hl.imm.aliyuncs.com/office/s/dd221b2cdb44fb66e9070d1d70a8b9bbb6d6fff7?_w_tokentype=1'),
}

model GenerateWebofficeTokenResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GenerateWebofficeTokenResponseBody(name='body'),
}

/**
 * @summary Generates an access token for document preview or editing.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   The operation generates an access token that is valid for 30 minutes and a refresh token that is valid for 1 day.
 * *   The returned expiration time is in UTC.
 * *   The operation supports the following document types:
 *     *   Word files: .doc, .docx, .txt, .dot, .wps, .wpt, .dotx, .docm, .dotm, and .rtf
 *     *   Presentation files: .ppt, .pptx, .pptm, .ppsx, .ppsm, .pps, .potx, .potm, .dpt, and .dps
 *     *   Spreadsheet documents: .et, .xls, .xlt, .xlsx, .xlsm, .xltx, .xltm, and .csv
 *     *   PDF files: .pdf
 * *   The operation supports an input document that is up to 200 MB in size.
 * *   The operation supports an input document that contains up to 5,000 pages.
 * *   For a project created before December 1, 2023, you are charged for previewing or editing a document in the project based on the number of times the document is opened. For a project created on or after December 1, 2023, you are charged based on the number of API operation calls made for previewing or editing a document. If you want to switch to API call-based billing for document previewing and editing, use a project created on or after December 1, 2023. In API call-based billing, one API call allows only one user to use the feature. If multiple users use the information returned by the API call, only the last user has access to the document and the access permissions of other users are revoked.
 * *   You can use the NotifyTopicName parameter to specify a Simple Message Queue (SMQ) topic in the same region as the IMM project for getting notified of file save operations. For more information about how to send and receive messages by using the SMQ SDK, see [Use queues](https://help.aliyun.com/document_detail/32449.html). For more information about the JSON example of the Message field, see [WebOffice message example](https://help.aliyun.com/document_detail/2743999.html).
 * >  To manage multiple versions of the document, you must enable versioning for the bucket that stores the document and set the History parameter to true.
 *
 * @param tmpReq GenerateWebofficeTokenRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GenerateWebofficeTokenResponse
 */
async function generateWebofficeTokenWithOptions(tmpReq: GenerateWebofficeTokenRequest, runtime: $RuntimeOptions): GenerateWebofficeTokenResponse {
  tmpReq.validate();
  var request = new GenerateWebofficeTokenShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  if (!$isNull(tmpReq.permission)) {
    request.permissionShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.permission, 'Permission', 'json');
  }
  if (!$isNull(tmpReq.user)) {
    request.userShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.user, 'User', 'json');
  }
  if (!$isNull(tmpReq.watermark)) {
    request.watermarkShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.watermark, 'Watermark', 'json');
  }
  var query = {};
  if (!$isNull(request.cachePreview)) {
    query['CachePreview'] = request.cachePreview;
  }
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.externalUploaded)) {
    query['ExternalUploaded'] = request.externalUploaded;
  }
  if (!$isNull(request.filename)) {
    query['Filename'] = request.filename;
  }
  if (!$isNull(request.hidecmb)) {
    query['Hidecmb'] = request.hidecmb;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.notifyTopicName)) {
    query['NotifyTopicName'] = request.notifyTopicName;
  }
  if (!$isNull(request.password)) {
    query['Password'] = request.password;
  }
  if (!$isNull(request.permissionShrink)) {
    query['Permission'] = request.permissionShrink;
  }
  if (!$isNull(request.previewPages)) {
    query['PreviewPages'] = request.previewPages;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.referer)) {
    query['Referer'] = request.referer;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  if (!$isNull(request.userShrink)) {
    query['User'] = request.userShrink;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  if (!$isNull(request.watermarkShrink)) {
    query['Watermark'] = request.watermarkShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GenerateWebofficeToken',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Generates an access token for document preview or editing.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   The operation generates an access token that is valid for 30 minutes and a refresh token that is valid for 1 day.
 * *   The returned expiration time is in UTC.
 * *   The operation supports the following document types:
 *     *   Word files: .doc, .docx, .txt, .dot, .wps, .wpt, .dotx, .docm, .dotm, and .rtf
 *     *   Presentation files: .ppt, .pptx, .pptm, .ppsx, .ppsm, .pps, .potx, .potm, .dpt, and .dps
 *     *   Spreadsheet documents: .et, .xls, .xlt, .xlsx, .xlsm, .xltx, .xltm, and .csv
 *     *   PDF files: .pdf
 * *   The operation supports an input document that is up to 200 MB in size.
 * *   The operation supports an input document that contains up to 5,000 pages.
 * *   For a project created before December 1, 2023, you are charged for previewing or editing a document in the project based on the number of times the document is opened. For a project created on or after December 1, 2023, you are charged based on the number of API operation calls made for previewing or editing a document. If you want to switch to API call-based billing for document previewing and editing, use a project created on or after December 1, 2023. In API call-based billing, one API call allows only one user to use the feature. If multiple users use the information returned by the API call, only the last user has access to the document and the access permissions of other users are revoked.
 * *   You can use the NotifyTopicName parameter to specify a Simple Message Queue (SMQ) topic in the same region as the IMM project for getting notified of file save operations. For more information about how to send and receive messages by using the SMQ SDK, see [Use queues](https://help.aliyun.com/document_detail/32449.html). For more information about the JSON example of the Message field, see [WebOffice message example](https://help.aliyun.com/document_detail/2743999.html).
 * >  To manage multiple versions of the document, you must enable versioning for the bucket that stores the document and set the History parameter to true.
 *
 * @param request GenerateWebofficeTokenRequest
 * @return GenerateWebofficeTokenResponse
 */
async function generateWebofficeToken(request: GenerateWebofficeTokenRequest): GenerateWebofficeTokenResponse {
  var runtime = new $RuntimeOptions{};
  return generateWebofficeTokenWithOptions(request, runtime);
}

model GetBatchRequest {
  id?: string(name='Id', description='The ID of the batch processing task. For more information about how to obtain the ID, see [CreateBatch](https://help.aliyun.com/document_detail/606694.html).

This parameter is required.', example='batch-4eb9223f-3e88-42d3-a578-3f2852******'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
}

model GetBatchResponseBody = {
  batch?: DataIngestion(name='Batch', description='The information about the batch processing task.'),
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model GetBatchResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetBatchResponseBody(name='body'),
}

/**
 * @summary Queries the information about a batch processing task.
 *
 * @param request GetBatchRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetBatchResponse
 */
async function getBatchWithOptions(request: GetBatchRequest, runtime: $RuntimeOptions): GetBatchResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.id)) {
    query['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetBatch',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the information about a batch processing task.
 *
 * @param request GetBatchRequest
 * @return GetBatchResponse
 */
async function getBatch(request: GetBatchRequest): GetBatchResponse {
  var runtime = new $RuntimeOptions{};
  return getBatchWithOptions(request, runtime);
}

model GetBindingRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='dataset001'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  URI?: string(name='URI', description='The URI of the OSS bucket to which you bind the dataset.

Specify the value in the oss://${Bucket} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project.

This parameter is required.', example='oss://examplebucket'),
}

model GetBindingResponseBody = {
  binding?: Binding(name='Binding', description='The details of the binding.'),
  requestId?: string(name='RequestId', description='The request ID.', example='AEFCD467-C928-4A36-951A-6EB5A592****'),
}

model GetBindingResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetBindingResponseBody(name='body'),
}

/**
 * @summary Queries the binding relationship between a specific dataset and an Object Storage Service (OSS) bucket.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**
 * *   Make sure that the binding relationship that you want to query exists. For information about how to create a binding relationship, see [CreateBinding](https://help.aliyun.com/document_detail/478202.html).
 *
 * @param request GetBindingRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetBindingResponse
 */
async function getBindingWithOptions(request: GetBindingRequest, runtime: $RuntimeOptions): GetBindingResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.URI)) {
    query['URI'] = request.URI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetBinding',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the binding relationship between a specific dataset and an Object Storage Service (OSS) bucket.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**
 * *   Make sure that the binding relationship that you want to query exists. For information about how to create a binding relationship, see [CreateBinding](https://help.aliyun.com/document_detail/478202.html).
 *
 * @param request GetBindingRequest
 * @return GetBindingResponse
 */
async function getBinding(request: GetBindingRequest): GetBindingResponse {
  var runtime = new $RuntimeOptions{};
  return getBindingWithOptions(request, runtime);
}

model GetDRMLicenseRequest {
  keyId?: string(name='KeyId', example='AESzB8SQgpACioSEJ3yqiFwruAOUgIvlCx*****'),
  notifyEndpoint?: string(name='NotifyEndpoint', example='http://1111111111.mns.cn-hangzhou.aliyuncs.com'),
  notifyTopicName?: string(name='NotifyTopicName', example='topic1'),
  projectName?: string(name='ProjectName', example='immtest'),
  protectionSystem?: string(name='ProtectionSystem', example='widevine'),
}

model GetDRMLicenseResponseBody = {
  deviceInfo?: string(name='DeviceInfo', example='IEEE1284DeviceID'),
  license?: string(name='License', example='AESzB8SQgpACioSEJ3yqiFwruAOUgIvlCx*****'),
  requestId?: string(name='RequestId', example='896ABAD1-C452-4BED-B5E0-302955F*****'),
  states?: long(name='States', example='200'),
}

model GetDRMLicenseResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetDRMLicenseResponseBody(name='body'),
}

/**
 * @deprecated OpenAPI GetDRMLicense is deprecated
 *
 * @summary drmlicense获取
 *
 * @param request GetDRMLicenseRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetDRMLicenseResponse
 */
// Deprecated
async function getDRMLicenseWithOptions(request: GetDRMLicenseRequest, runtime: $RuntimeOptions): GetDRMLicenseResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.keyId)) {
    query['KeyId'] = request.keyId;
  }
  if (!$isNull(request.notifyEndpoint)) {
    query['NotifyEndpoint'] = request.notifyEndpoint;
  }
  if (!$isNull(request.notifyTopicName)) {
    query['NotifyTopicName'] = request.notifyTopicName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.protectionSystem)) {
    query['ProtectionSystem'] = request.protectionSystem;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetDRMLicense',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @deprecated OpenAPI GetDRMLicense is deprecated
 *
 * @summary drmlicense获取
 *
 * @param request GetDRMLicenseRequest
 * @return GetDRMLicenseResponse
 */
// Deprecated
async function getDRMLicense(request: GetDRMLicenseRequest): GetDRMLicenseResponse {
  var runtime = new $RuntimeOptions{};
  return getDRMLicenseWithOptions(request, runtime);
}

model GetDatasetRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='dataset001'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  withStatistics?: boolean(name='WithStatistics', description='Specifies whether to enable real-time retrieval of file statistics. Default value: false.

*   If you set the value to true, FileCount and TotalFileSize in the response return true and valid values.
*   If you set the value to false, FileCount and TotalFileSize in the response return invalid values or 0.', example='true'),
}

model GetDatasetResponseBody = {
  dataset?: Dataset(name='Dataset', description='The dataset.'),
  requestId?: string(name='RequestId', description='The request ID.', example='6D74B3A9-5AC0-49F9-914D-E01589D3****'),
}

model GetDatasetResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetDatasetResponseBody(name='body'),
}

/**
 * @summary Queries a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   The GetDataset operation supports real-time retrieval of file statistics. You can specify WithStatistics to enable real-time retrieval of file statistics.
 *
 * @param request GetDatasetRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetDatasetResponse
 */
async function getDatasetWithOptions(request: GetDatasetRequest, runtime: $RuntimeOptions): GetDatasetResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.withStatistics)) {
    query['WithStatistics'] = request.withStatistics;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetDataset',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   The GetDataset operation supports real-time retrieval of file statistics. You can specify WithStatistics to enable real-time retrieval of file statistics.
 *
 * @param request GetDatasetRequest
 * @return GetDatasetResponse
 */
async function getDataset(request: GetDatasetRequest): GetDatasetResponse {
  var runtime = new $RuntimeOptions{};
  return getDatasetWithOptions(request, runtime);
}

model GetDecodeBlindWatermarkResultRequest {
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  taskId?: string(name='TaskId', description='The ID of the task. You can obtain the ID of the task from the response of the CreateDecodeBlindWatermarkTask operation.

This parameter is required.', example='DecodeBlindWatermark-c09b0943-ed79-4983-8dbe-7a882574****'),
  taskType?: string(name='TaskType', description='The type of the task.

*   Set the value to DecodeBlindWatermark.

This parameter is required.', example='DecodeBlindWatermark'),
}

model GetDecodeBlindWatermarkResultResponseBody = {
  code?: string(name='Code', description='The error code of the task.', example='ResourceNotFound'),
  content?: string(name='Content', description='The watermark content.'),
  endTime?: string(name='EndTime', description='The end time of the task.', example='2024-03-03T09:45:56.87Z'),
  eventId?: string(name='EventId', description='The event ID.', example='2C2-1I0EG57VR37J4rQ8oKG6C9*****'),
  message?: string(name='Message', description='The error message of the task.', example='The specified resource project is not found.'),
  projectName?: string(name='ProjectName', description='The project name.', example='test-project'),
  requestId?: string(name='RequestId', description='The request ID.', example='93126E40-0296-4129-95E3-AFAC709372E5'),
  startTime?: string(name='StartTime', description='The start time of the task.', example='2024-03-03T09:44:31.029Z'),
  status?: string(name='Status', description='The task status.', example='Succeeded'),
  taskId?: string(name='TaskId', description='The task ID.', example='DecodeBlindWatermark-c09b0943-ed79-4983-8dbe-7a882574****'),
  taskType?: string(name='TaskType', description='The task type.', example='DecodeBlindWatermark'),
  userData?: string(name='UserData', description='The user data of the task.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model GetDecodeBlindWatermarkResultResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetDecodeBlindWatermarkResultResponseBody(name='body'),
}

/**
 * @summary Queries the result of an invisible watermark parsing task.
 *
 * @description *   Before you call this operation, make sure that an Intelligent Media Management (IMM) project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   Before you call this operation, make sure that an invisible watermark task is created and the task ID is obtained.``
 *
 * @param request GetDecodeBlindWatermarkResultRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetDecodeBlindWatermarkResultResponse
 */
async function getDecodeBlindWatermarkResultWithOptions(request: GetDecodeBlindWatermarkResultRequest, runtime: $RuntimeOptions): GetDecodeBlindWatermarkResultResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.taskId)) {
    query['TaskId'] = request.taskId;
  }
  if (!$isNull(request.taskType)) {
    query['TaskType'] = request.taskType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetDecodeBlindWatermarkResult',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the result of an invisible watermark parsing task.
 *
 * @description *   Before you call this operation, make sure that an Intelligent Media Management (IMM) project is created. For information about how to create a project, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   Before you call this operation, make sure that an invisible watermark task is created and the task ID is obtained.``
 *
 * @param request GetDecodeBlindWatermarkResultRequest
 * @return GetDecodeBlindWatermarkResultResponse
 */
async function getDecodeBlindWatermarkResult(request: GetDecodeBlindWatermarkResultRequest): GetDecodeBlindWatermarkResultResponse {
  var runtime = new $RuntimeOptions{};
  return getDecodeBlindWatermarkResultWithOptions(request, runtime);
}

model GetFigureClusterRequest {
  datasetName?: string(name='DatasetName', description='The dataset name.[](~~CreateDataset~~)

This parameter is required.', example='dataset001'),
  objectId?: string(name='ObjectId', description='The ID of the face clustering task. You can obtain the ID from the face clustering information returned after you call the [QueryFigureClusters](~~QueryFigureClusters~~) operation.

This parameter is required.', example='Cluster-1f2e1a2c-d5ee-4bc5-84f6-fef94ea****'),
  projectName?: string(name='ProjectName', description='The project name.[](~~CreateProject~~)

This parameter is required.', example='immtest'),
}

model GetFigureClusterResponseBody = {
  figureCluster?: FigureCluster(name='FigureCluster', description='The information about the face cluster.'),
  requestId?: string(name='RequestId', description='The request ID.', example='5F74C5C9-5AC0-49F9-914D-E01589D3****'),
}

model GetFigureClusterResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetFigureClusterResponseBody(name='body'),
}

/**
 * @summary Obtains basic information about face clustering, including the creation time, number of images, and cover.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**
 * *   Before you call this operation, make sure that a face clustering task is created to group all faces in a dataset. For information about how to create a face clustering task, see [CreateFigureClusteringTask](~~CreateFigureClusteringTask~~). For information about how to create a dataset, see [CreateDataset](~~CreateDataset~~).
 *
 * @param request GetFigureClusterRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetFigureClusterResponse
 */
async function getFigureClusterWithOptions(request: GetFigureClusterRequest, runtime: $RuntimeOptions): GetFigureClusterResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.objectId)) {
    query['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetFigureCluster',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Obtains basic information about face clustering, including the creation time, number of images, and cover.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**
 * *   Before you call this operation, make sure that a face clustering task is created to group all faces in a dataset. For information about how to create a face clustering task, see [CreateFigureClusteringTask](~~CreateFigureClusteringTask~~). For information about how to create a dataset, see [CreateDataset](~~CreateDataset~~).
 *
 * @param request GetFigureClusterRequest
 * @return GetFigureClusterResponse
 */
async function getFigureCluster(request: GetFigureClusterRequest): GetFigureClusterResponse {
  var runtime = new $RuntimeOptions{};
  return getFigureClusterWithOptions(request, runtime);
}

model GetFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  URI?: string(name='URI', description='The URI of the file. Make sure that the file is indexed****.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

Specify the URI of the file in Photo and Drive Service in the pds://domains/${domain}/drives/${drive}/files/${file}/revisions/${revision} format.

This parameter is required.', example='oss://test-bucket/test-object'),
  withFields?: [ string ](name='WithFields'),
}

model GetFileMetaShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  URI?: string(name='URI', description='The URI of the file. Make sure that the file is indexed****.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.

Specify the URI of the file in Photo and Drive Service in the pds://domains/${domain}/drives/${drive}/files/${file}/revisions/${revision} format.

This parameter is required.', example='oss://test-bucket/test-object'),
  withFieldsShrink?: string(name='WithFields'),
}

model GetFileMetaResponseBody = {
  files?: [
    File
  ](name='Files', description='The metadata returned.'),
  requestId?: string(name='RequestId', description='The request ID.', example='7F84C6D9-5AC0-49F9-914D-F02678E3****'),
}

model GetFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetFileMetaResponseBody(name='body'),
}

/**
 * @summary Queries metadata of a file whose metadata is indexed into the dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, join the DingTalk chat group (ID: 31690030817) and share your questions with us.
 *
 * @param tmpReq GetFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetFileMetaResponse
 */
async function getFileMetaWithOptions(tmpReq: GetFileMetaRequest, runtime: $RuntimeOptions): GetFileMetaResponse {
  tmpReq.validate();
  var request = new GetFileMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.withFields)) {
    request.withFieldsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.withFields, 'WithFields', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.URI)) {
    query['URI'] = request.URI;
  }
  if (!$isNull(request.withFieldsShrink)) {
    query['WithFields'] = request.withFieldsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries metadata of a file whose metadata is indexed into the dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, join the DingTalk chat group (ID: 31690030817) and share your questions with us.
 *
 * @param request GetFileMetaRequest
 * @return GetFileMetaResponse
 */
async function getFileMeta(request: GetFileMetaRequest): GetFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return getFileMetaWithOptions(request, runtime);
}

model GetImageModerationResultRequest {
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='test-project'),
  taskId?: string(name='TaskId', description='The task ID.

This parameter is required.', example='ImageModeration-ff207203-3f93-4645-a041-7b8f0f******'),
  taskType?: string(name='TaskType', description='The type of the task.

This parameter is required.', example='ImageModeration'),
}

model GetImageModerationResultResponseBody = {
  code?: string(name='Code', description='The error code of the task.', example='ResourceNotFound'),
  endTime?: string(name='EndTime', description='The end time of the task.', example='2023-04-03T09:44:32Z'),
  eventId?: string(name='EventId', description='The event ID.', example='1B6-1XBMX3BixLMILvXVGtlkr******'),
  message?: string(name='Message', description='The error message of the task.', example='The specified resource TaskId is not found.'),
  moderationResult?: {
    categories?: [ string ](name='Categories', description='List of categories.'),
    frames?: {
      blockFrames?: [ 
        {
          label?: string(name='Label', description='The label of the violation.', example='{
      "test": "val"
}'),
          offset?: int32(name='Offset', description='The offset of the frame.', example='2'),
          rate?: double(name='Rate', description='The confidence level of the violation.', example='30'),
        }
      ](name='BlockFrames', description='The violated frames.'),
      totalCount?: int32(name='TotalCount', description='The total number of detected frames.', example='30'),
    }(name='Frames', description='The information about video and motion detection frames.'),
    suggestion?: string(name='Suggestion', description='The recommended operation. Valid values:

*   pass: The image has passed the check. No action is required.
*   review: The image contains suspected violations and requires human review.
*   block: The image contains violations. Further actions, such as deleting or blocking the image, are recommended.', example='block'),
    URI?: string(name='URI', description='The OSS URI of the file. The URI follows the oss://${bucketname}/${objectname} format. bucketname indicates the name of an OSS bucket that is in the same region as the current project, and objectname is the file path.', example='oss://test-bucket/test-object'),
  }(name='ModerationResult', description='The result of the image compliance detection task.'),
  projectName?: string(name='ProjectName', description='The name of the project.', example='test-project'),
  requestId?: string(name='RequestId', description='The request ID.', example='E6A120B1-BEB3-0F63-A7C2-0783B6******'),
  startTime?: string(name='StartTime', description='The start time of the task.', example='2023-04-03T09:44:31.029Z'),
  status?: string(name='Status', description='The task status. Valid values:

*   Running
*   Succeeded
*   Failed', example='Succeeded'),
  taskId?: string(name='TaskId', description='The task ID.', example='ImageModeration-ff207203-3f93-4645-a041-7b8f0f******'),
  taskType?: string(name='TaskType', description='The type of the task.', example='ImageModeration'),
  userData?: string(name='UserData', description='The custom information.', example='{
      "fileId": "123"
}'),
}

model GetImageModerationResultResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetImageModerationResultResponseBody(name='body'),
}

/**
 * @summary Queries an image compliance detection task.
 *
 * @param request GetImageModerationResultRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetImageModerationResultResponse
 */
async function getImageModerationResultWithOptions(request: GetImageModerationResultRequest, runtime: $RuntimeOptions): GetImageModerationResultResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.taskId)) {
    query['TaskId'] = request.taskId;
  }
  if (!$isNull(request.taskType)) {
    query['TaskType'] = request.taskType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetImageModerationResult',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries an image compliance detection task.
 *
 * @param request GetImageModerationResultRequest
 * @return GetImageModerationResultResponse
 */
async function getImageModerationResult(request: GetImageModerationResultRequest): GetImageModerationResultResponse {
  var runtime = new $RuntimeOptions{};
  return getImageModerationResultWithOptions(request, runtime);
}

model GetOSSBucketAttachmentRequest {
  OSSBucket?: string(name='OSSBucket', description='The name of the OSS bucket.

This parameter is required.', example='examplebucket'),
}

model GetOSSBucketAttachmentResponseBody = {
  createTime?: string(name='CreateTime', description='The time when the dataset was created.', example='""2023-12-19T17:29:34.790931971+08:00"'),
  description?: string(name='Description', description='The description of the dataset.', example='"Dataset"'),
  projectName?: string(name='ProjectName', description='The name of the project.', example='immtest'),
  requestId?: string(name='RequestId', description='The request ID.', example='5F74C5C9-5AC0-49F9-914D-E01589D3****'),
  updateTime?: string(name='UpdateTime', description='The time when the dataset was last updated.', example='"2023-12-19T17:29:34.790931971+08:00"'),
}

model GetOSSBucketAttachmentResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetOSSBucketAttachmentResponseBody(name='body'),
}

/**
 * @summary Queries the name of the project bound to an Object Storage Service (OSS) bucket.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**
 * *   Before you call this operation, make sure that [the project whose name you want to query is bound to the specified OSS bucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param request GetOSSBucketAttachmentRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetOSSBucketAttachmentResponse
 */
async function getOSSBucketAttachmentWithOptions(request: GetOSSBucketAttachmentRequest, runtime: $RuntimeOptions): GetOSSBucketAttachmentResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.OSSBucket)) {
    query['OSSBucket'] = request.OSSBucket;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetOSSBucketAttachment',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the name of the project bound to an Object Storage Service (OSS) bucket.
 *
 * @description *   **Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**
 * *   Before you call this operation, make sure that [the project whose name you want to query is bound to the specified OSS bucket](https://help.aliyun.com/document_detail/478206.html).
 *
 * @param request GetOSSBucketAttachmentRequest
 * @return GetOSSBucketAttachmentResponse
 */
async function getOSSBucketAttachment(request: GetOSSBucketAttachmentRequest): GetOSSBucketAttachmentResponse {
  var runtime = new $RuntimeOptions{};
  return getOSSBucketAttachmentWithOptions(request, runtime);
}

model GetProjectRequest {
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  withStatistics?: boolean(name='WithStatistics', description='Specifies whether to enable real-time retrieval of file statistics. Default value: false.

*   If you set the value to true, the returned values of FileCount and TotalFileSize in the response are valid.
*   If you set the value to false, the returned values of FileCount and TotalFileSize in the response are invalid or equal to 0.', example='true'),
}

model GetProjectResponseBody = {
  project?: Project(name='Project', description='The project information.'),
  requestId?: string(name='RequestId', description='The request ID.', example='5A022F78-B9A8-4ACC-BB6B-B3597553'),
}

model GetProjectResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetProjectResponseBody(name='body'),
}

/**
 * @summary Queries the basic information, datasets, and file statistics of a project.
 *
 * @description When you call this operation, you can enable the real-time retrieval of file statistics based on your business requirements. For more information, see the "Request parameters" section of this topic.
 *
 * @param request GetProjectRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetProjectResponse
 */
async function getProjectWithOptions(request: GetProjectRequest, runtime: $RuntimeOptions): GetProjectResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.withStatistics)) {
    query['WithStatistics'] = request.withStatistics;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetProject',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the basic information, datasets, and file statistics of a project.
 *
 * @description When you call this operation, you can enable the real-time retrieval of file statistics based on your business requirements. For more information, see the "Request parameters" section of this topic.
 *
 * @param request GetProjectRequest
 * @return GetProjectResponse
 */
async function getProject(request: GetProjectRequest): GetProjectResponse {
  var runtime = new $RuntimeOptions{};
  return getProjectWithOptions(request, runtime);
}

model GetStoryRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  objectId?: string(name='ObjectId', description='The ID of the story.

This parameter is required.', example='id1'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
}

model GetStoryResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='1B3D5E0A-D8B8-4DA0-8127-ED32C851****'),
  story?: Story(name='Story', description='The information about the story.'),
}

model GetStoryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetStoryResponseBody(name='body'),
}

/**
 * @summary Queries a story.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Before you call this operation, make sure that you have called the [CreateStory](https://help.aliyun.com/document_detail/478193.html) or [CreateCustomizedStory](https://help.aliyun.com/document_detail/478196.html) operation to create a story.
 *
 * @param request GetStoryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetStoryResponse
 */
async function getStoryWithOptions(request: GetStoryRequest, runtime: $RuntimeOptions): GetStoryResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.objectId)) {
    query['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetStory',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries a story.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Before you call this operation, make sure that you have called the [CreateStory](https://help.aliyun.com/document_detail/478193.html) or [CreateCustomizedStory](https://help.aliyun.com/document_detail/478196.html) operation to create a story.
 *
 * @param request GetStoryRequest
 * @return GetStoryResponse
 */
async function getStory(request: GetStoryRequest): GetStoryResponse {
  var runtime = new $RuntimeOptions{};
  return getStoryWithOptions(request, runtime);
}

model GetTaskRequest {
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='immtest'),
  requestDefinition?: boolean(name='RequestDefinition', description='Specifies whether to return the initial request parameters that are used to create the task. Default value: False.', example='True'),
  taskId?: string(name='TaskId', description='The ID of the task. You can obtain the ID of a task after you create the task.

This parameter is required.', example='c2b277b9-0d30-4882-ad6d-ad661382****'),
  taskType?: string(name='TaskType', description='The type of the task. For information about valid values, see [Task types](https://help.aliyun.com/document_detail/2743993.html).

This parameter is required.', example='VideoLabelClassification'),
}

model GetTaskResponseBody = {
  code?: string(name='Code', description='The error code of the task.', example='ResourceNotFound'),
  endTime?: string(name='EndTime', description='The end time of the task.', example='2021-12-24T03:01:49.480109219Z'),
  eventId?: string(name='EventId', description='The event ID.', example='2F6-1Bz99Xi93EnRpNEyLudILJm****'),
  message?: string(name='Message', description='The error message of the task.', example='The specified resource project is not found.'),
  progress?: int32(name='Progress', description='The task progress. Valid values: 0 to 100. Unit: %.

>  This parameter is valid only if the task is in the `Running` state.``', example='100'),
  projectName?: string(name='ProjectName', description='The project name.', example='immtest'),
  requestId?: string(name='RequestId', description='The request ID.', example='2C5C1E0F-D8B8-4DA0-8127-EC32C771****'),
  startTime?: string(name='StartTime', description='The start time of the task.', example='2021-12-24T03:01:41.662060377Z'),
  status?: string(name='Status', description='The status of the task. Valid values:

*   RUNNING: The task is running.
*   Succeeded: The task is successful.
*   Failed: The task failed.', example='Running'),
  tags?: map[string]any(name='Tags', description='The tags. This parameter is returned only if you specified Tags when you created the task.', example='{"test": "val1"}'),
  taskId?: string(name='TaskId', description='The task ID.', example='c2b277b9-0d30-4882-ad6d-ad661382****'),
  taskRequestDefinition?: string(name='TaskRequestDefinition', description='The initial request parameters used to create the task.', example='{
	"ProjectName":"test-project",
	"CompressedFormat":"zip",
	"TargetURI":"oss://test-bucket/output/test.zip",
	"Sources":[{"URI":"oss://test-bucket/input/test.jpg"}]
}'),
  taskType?: string(name='TaskType', description='The type of the task. For more information, see [Task types](https://help.aliyun.com/document_detail/2743993.html).', example='VideoLabelClassification'),
  userData?: string(name='UserData', description='The user data of the task.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model GetTaskResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetTaskResponseBody(name='body'),
}

/**
 * @summary Queries information about an asynchronous task. Intelligent Media Management (IMM) has multiple asynchronous data processing capabilities, each of which has its own operation for creating tasks. For example, you can call the CreateFigureClusteringTask operation to create a face clustering task and the CreateFileCompressionTask operation to create a file compression task. The GetTask operation is a general operation. You can call this operation to query information about asynchronous tasks by task ID or type.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.
 *
 * @param request GetTaskRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetTaskResponse
 */
async function getTaskWithOptions(request: GetTaskRequest, runtime: $RuntimeOptions): GetTaskResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.requestDefinition)) {
    query['RequestDefinition'] = request.requestDefinition;
  }
  if (!$isNull(request.taskId)) {
    query['TaskId'] = request.taskId;
  }
  if (!$isNull(request.taskType)) {
    query['TaskType'] = request.taskType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetTask',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries information about an asynchronous task. Intelligent Media Management (IMM) has multiple asynchronous data processing capabilities, each of which has its own operation for creating tasks. For example, you can call the CreateFigureClusteringTask operation to create a face clustering task and the CreateFileCompressionTask operation to create a file compression task. The GetTask operation is a general operation. You can call this operation to query information about asynchronous tasks by task ID or type.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.
 *
 * @param request GetTaskRequest
 * @return GetTaskResponse
 */
async function getTask(request: GetTaskRequest): GetTaskResponse {
  var runtime = new $RuntimeOptions{};
  return getTaskWithOptions(request, runtime);
}

model GetTriggerRequest {
  id?: string(name='Id', description='The ID of the trigger. You can obtain the ID from the response parameters of the [CreateTrigger](https://help.aliyun.com/document_detail/479912.html) operation.

This parameter is required.', example='trigger-9f72636a-0f0c-4baf-ae78-38b27b******'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
}

model GetTriggerResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='4A7A2D0E-D8B8-4DA0-8127-EB32C6******'),
  trigger?: DataIngestion(name='Trigger', description='The trigger information.'),
}

model GetTriggerResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetTriggerResponseBody(name='body'),
}

/**
 * @summary Queries the information about a trigger.
 *
 * @param request GetTriggerRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetTriggerResponse
 */
async function getTriggerWithOptions(request: GetTriggerRequest, runtime: $RuntimeOptions): GetTriggerResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.id)) {
    query['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetTrigger',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the information about a trigger.
 *
 * @param request GetTriggerRequest
 * @return GetTriggerResponse
 */
async function getTrigger(request: GetTriggerRequest): GetTriggerResponse {
  var runtime = new $RuntimeOptions{};
  return getTriggerWithOptions(request, runtime);
}

model GetVideoLabelClassificationResultRequest {
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='immtest'),
  taskId?: string(name='TaskId', description='The task ID, which is obtained from response parameters of [CreateVideoLabelClassificationTask](https://help.aliyun.com/document_detail/478223.html).

This parameter is required.', example='VideoLabelClassification-2f157087-91df-4fda-8c3e-232407ec****'),
  taskType?: string(name='TaskType', description='The type of the task. Valid values:

This parameter is required.', example='VideoLabelClassification'),
}

model GetVideoLabelClassificationResultResponseBody = {
  code?: string(name='Code', description='The error code of the task.', example='ResourceNotFound'),
  endTime?: string(name='EndTime', description='The end time of the task.', example='2021-12-24T03:00:42.134971294Z'),
  eventId?: string(name='EventId', description='The event ID.', example='2F6-1Bz99Xi93EnRpNEyLudILJm****'),
  labels?: [
    Label
  ](name='Labels', description='The labels.'),
  message?: string(name='Message', description='The error message of the task.', example='The specified resource project is not found.'),
  projectName?: string(name='ProjectName', description='The project name.', example='immtest'),
  requestId?: string(name='RequestId', description='The request ID.', example='7F84C6D9-5AC0-49F9-914D-F02678E3****'),
  startTime?: string(name='StartTime', description='The start time of the task.', example='2021-12-24T03:00:38.892462383Z'),
  status?: string(name='Status', description='The task status.', example='Succeeded'),
  taskId?: string(name='TaskId', description='The task ID.', example='VideoLabelClassification-2f157087-91df-4fda-8c3e-232407ec****'),
  taskType?: string(name='TaskType', description='The type of the task.', example='VideoLabelClassification'),
  userData?: string(name='UserData', description='The custom information.', example='{"ID": "user1","Name": "test-user1","Avatar": "http://example.com?id=user1"}'),
}

model GetVideoLabelClassificationResultResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetVideoLabelClassificationResultResponseBody(name='body'),
}

/**
 * @summary Queries the results of a video label detection task.
 *
 * @description *   Before you call this operation, make sure that a [project](https://help.aliyun.com/document_detail/478273.html) is created on Intelligent Media Management (IMM). For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   Before you call this operation, make sure that a video label detection task is created and the `TaskId` of the task is obtained. For more information, see [CreateVideoLabelClassificationTask](https://help.aliyun.com/document_detail/478223.html).
 *
 * @param request GetVideoLabelClassificationResultRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetVideoLabelClassificationResultResponse
 */
async function getVideoLabelClassificationResultWithOptions(request: GetVideoLabelClassificationResultRequest, runtime: $RuntimeOptions): GetVideoLabelClassificationResultResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.taskId)) {
    query['TaskId'] = request.taskId;
  }
  if (!$isNull(request.taskType)) {
    query['TaskType'] = request.taskType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetVideoLabelClassificationResult',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the results of a video label detection task.
 *
 * @description *   Before you call this operation, make sure that a [project](https://help.aliyun.com/document_detail/478273.html) is created on Intelligent Media Management (IMM). For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).
 * *   Before you call this operation, make sure that a video label detection task is created and the `TaskId` of the task is obtained. For more information, see [CreateVideoLabelClassificationTask](https://help.aliyun.com/document_detail/478223.html).
 *
 * @param request GetVideoLabelClassificationResultRequest
 * @return GetVideoLabelClassificationResultResponse
 */
async function getVideoLabelClassificationResult(request: GetVideoLabelClassificationResultRequest): GetVideoLabelClassificationResultResponse {
  var runtime = new $RuntimeOptions{};
  return getVideoLabelClassificationResultWithOptions(request, runtime);
}

model GetVideoModerationResultRequest {
  projectName?: string(name='ProjectName', description='This parameter is required.', example='test-project'),
  taskId?: string(name='TaskId', description='This parameter is required.', example='VideoModeration-d0f0df1d-531d-4ab4-b353-e7f475******'),
  taskType?: string(name='TaskType', description='This parameter is required.', example='VideoModeration'),
}

model GetVideoModerationResultResponseBody = {
  code?: string(name='Code', example='ResourceNotFound'),
  endTime?: string(name='EndTime', example='2023-04-03T10:20:56.87Z'),
  eventId?: string(name='EventId', example='05C-1XBQvsG2Tn5kBx2dUWo43******'),
  message?: string(name='Message', example='The specified resource TaskId is not found.'),
  moderationResult?: {
    categories?: [ string ](name='Categories'),
    frames?: {
      blockFrames?: [ 
        {
          label?: string(name='Label', example='{"teat":"val"}'),
          offset?: int32(name='Offset', example='1'),
          rate?: double(name='Rate', example='10'),
        }
      ](name='BlockFrames'),
      totalCount?: int32(name='TotalCount', example='12'),
    }(name='Frames'),
    suggestion?: string(name='Suggestion', example='block'),
    URI?: string(name='URI', example='oss://test-bucket/test-object'),
  }(name='ModerationResult'),
  projectName?: string(name='ProjectName', example='test-project'),
  requestId?: string(name='RequestId', example='VideoModeration-d0f0df1d-531d-4ab4-b353-e7f475******'),
  startTime?: string(name='StartTime', example='2023-04-03T10:20:41.432Z'),
  status?: string(name='Status', example='Succeeded'),
  taskId?: string(name='TaskId', example='VideoModeration-d0f0df1d-531d-4ab4-b353-e7f4750******'),
  taskType?: string(name='TaskType', example='VideoModeration'),
  userData?: string(name='UserData'),
}

model GetVideoModerationResultResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: GetVideoModerationResultResponseBody(name='body'),
}

/**
 * @summary 获取视频审核任务结果
 *
 * @param request GetVideoModerationResultRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return GetVideoModerationResultResponse
 */
async function getVideoModerationResultWithOptions(request: GetVideoModerationResultRequest, runtime: $RuntimeOptions): GetVideoModerationResultResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.taskId)) {
    query['TaskId'] = request.taskId;
  }
  if (!$isNull(request.taskType)) {
    query['TaskType'] = request.taskType;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'GetVideoModerationResult',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary 获取视频审核任务结果
 *
 * @param request GetVideoModerationResultRequest
 * @return GetVideoModerationResultResponse
 */
async function getVideoModerationResult(request: GetVideoModerationResultRequest): GetVideoModerationResultResponse {
  var runtime = new $RuntimeOptions{};
  return getVideoModerationResultWithOptions(request, runtime);
}

model IndexFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  file?: InputFile(name='File', description='The file for which you want to create a metadata index. The value must be in the JSON format.

This parameter is required.'),
  notification?: Notification(name='Notification', description='The notification settings. For more information, see the "Metadata indexing" section of the [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html) topic.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  userData?: string(name='UserData'),
}

model IndexFileMetaShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  fileShrink?: string(name='File', description='The file for which you want to create a metadata index. The value must be in the JSON format.

This parameter is required.'),
  notificationShrink?: string(name='Notification', description='The notification settings. For more information, see the "Metadata indexing" section of the [Asynchronous message examples](https://help.aliyun.com/document_detail/2743997.html) topic.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  userData?: string(name='UserData'),
}

model IndexFileMetaResponseBody = {
  eventId?: string(name='EventId', description='The event ID.', example='30F-1D8FxFzDXKJH9YQdve4CjR****'),
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model IndexFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: IndexFileMetaResponseBody(name='body'),
}

/**
 * @summary Creates an index from metadata extracted by using techniques such as label recognition, face detection, and location detection from input files. You can retrieve data from the same dataset by using multiple methods.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   For information about how to create indexes from metadata, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).
 * *   For information about the limits on the maximum number and size of index files that you can create, see the "Limits on datasets" section of the [Limits](https://help.aliyun.com/document_detail/475569.html) topic. For information about how to create a dataset, see the "CreateDataset" topic.
 * *   For information about the regions in which you can create index files from metadata, see the "Datasets and indexes" section of the [Limits](https://help.aliyun.com/document_detail/475569.html) topic.
 * *   After you create an index from metadata, you can try [simple query](https://help.aliyun.com/document_detail/478175.html) to retrieve data. For information about other query capabilities, see [Query and statistics](https://help.aliyun.com/document_detail/2402363.html). You can also [create a face clustering task](https://help.aliyun.com/document_detail/478180.html) to group faces. For information about other clustering capabilities, see [Intelligent management](https://help.aliyun.com/document_detail/2402365.html).
 * **
 * **Usage notes**
 * *   The IndexFileMeta operation is asynchronous, indicating that it takes some time to process the data after a request is submitted. After the processing is complete, the metadata is stored in your dataset. The amount of time it takes for this process varies based on [the workflow template, the operator](https://help.aliyun.com/document_detail/466304.html), and the content of the file, ranging from several seconds to several minutes or even longer. You can subscribe to [Simple Message Service](https://help.aliyun.com/document_detail/2743997.html) for task completion notifications.
 *
 * @param tmpReq IndexFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return IndexFileMetaResponse
 */
async function indexFileMetaWithOptions(tmpReq: IndexFileMetaRequest, runtime: $RuntimeOptions): IndexFileMetaResponse {
  tmpReq.validate();
  var request = new IndexFileMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.file)) {
    request.fileShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.file, 'File', 'json');
  }
  if (!$isNull(tmpReq.notification)) {
    request.notificationShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.notification, 'Notification', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.fileShrink)) {
    query['File'] = request.fileShrink;
  }
  if (!$isNull(request.notificationShrink)) {
    query['Notification'] = request.notificationShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.userData)) {
    query['UserData'] = request.userData;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'IndexFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Creates an index from metadata extracted by using techniques such as label recognition, face detection, and location detection from input files. You can retrieve data from the same dataset by using multiple methods.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   For information about how to create indexes from metadata, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).
 * *   For information about the limits on the maximum number and size of index files that you can create, see the "Limits on datasets" section of the [Limits](https://help.aliyun.com/document_detail/475569.html) topic. For information about how to create a dataset, see the "CreateDataset" topic.
 * *   For information about the regions in which you can create index files from metadata, see the "Datasets and indexes" section of the [Limits](https://help.aliyun.com/document_detail/475569.html) topic.
 * *   After you create an index from metadata, you can try [simple query](https://help.aliyun.com/document_detail/478175.html) to retrieve data. For information about other query capabilities, see [Query and statistics](https://help.aliyun.com/document_detail/2402363.html). You can also [create a face clustering task](https://help.aliyun.com/document_detail/478180.html) to group faces. For information about other clustering capabilities, see [Intelligent management](https://help.aliyun.com/document_detail/2402365.html).
 * **
 * **Usage notes**
 * *   The IndexFileMeta operation is asynchronous, indicating that it takes some time to process the data after a request is submitted. After the processing is complete, the metadata is stored in your dataset. The amount of time it takes for this process varies based on [the workflow template, the operator](https://help.aliyun.com/document_detail/466304.html), and the content of the file, ranging from several seconds to several minutes or even longer. You can subscribe to [Simple Message Service](https://help.aliyun.com/document_detail/2743997.html) for task completion notifications.
 *
 * @param request IndexFileMetaRequest
 * @return IndexFileMetaResponse
 */
async function indexFileMeta(request: IndexFileMetaRequest): IndexFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return indexFileMetaWithOptions(request, runtime);
}

model ListBatchesRequest {
  maxResults?: int32(name='MaxResults', description='The maximum number of results to return. Valid values: 0 to 100.

If you do not specify this parameter or set the parameter to 0, the default value of 100 is used.', example='10'),
  nextToken?: string(name='NextToken', description='The pagination token.

The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter. The next call to the operation returns results lexicographically after the NextToken parameter value.

You do not need to specify this parameter in your initial request.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sort order. Valid values:

*   ASC: sorts the results in ascending order. This is the default sort order.
*   DES: sorts the results in descending order.', example='ASC'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The sort field. Valid values:

*   CreateTime
*   UpdateTime', example='2020-11-11T06:51:17.5Z'),
  state?: string(name='State', description='The task status.

*   Ready: The task is newly created and ready.
*   Running: The task is running.
*   Failed: The task failed and cannot be automatically recovered.
*   Suspended: The task is suspended.
*   Succeeded: The task is successful.', example='Succeed'),
  tagSelector?: string(name='TagSelector', description='The custom tag. You can use this parameter to query tasks that have the specified tag.', example='test=val1'),
}

model ListBatchesResponseBody = {
  batches?: [
    DataIngestion
  ](name='Batches', description='The batch processing tasks.'),
  nextToken?: string(name='NextToken', description='The pagination token.

The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter. The next call to the operation returns results lexicographically after the NextToken parameter value.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpw****'),
  requestId?: string(name='RequestId', description='The request ID.', example='FEDC9B1F-30F2-4C1F-8ED2-B7860187****'),
}

model ListBatchesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ListBatchesResponseBody(name='body'),
}

/**
 * @summary Queries batch processing tasks. You can query batch processing tasks based on conditions such task tags and status. The results can be sorted.
 *
 * @param request ListBatchesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ListBatchesResponse
 */
async function listBatchesWithOptions(request: ListBatchesRequest, runtime: $RuntimeOptions): ListBatchesResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.state)) {
    query['State'] = request.state;
  }
  if (!$isNull(request.tagSelector)) {
    query['TagSelector'] = request.tagSelector;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ListBatches',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries batch processing tasks. You can query batch processing tasks based on conditions such task tags and status. The results can be sorted.
 *
 * @param request ListBatchesRequest
 * @return ListBatchesResponse
 */
async function listBatches(request: ListBatchesRequest): ListBatchesResponse {
  var runtime = new $RuntimeOptions{};
  return listBatchesWithOptions(request, runtime);
}

model ListBindingsRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  maxResults?: long(name='MaxResults', description='*   The maximum number of bindings to return. Valid values: 0 to 200.
*   If you do not specify this parameter or set the parameter to 0, the default value of 100 is used.', example='1'),
  nextToken?: string(name='NextToken', description='*   The pagination token that is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter.
*   The next call to the operation returns results lexicographically after the NextToken parameter value.
*   You do not need to specify this parameter in your initial request.', example='immtest:dataset001:examplebucket01'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
}

model ListBindingsResponseBody = {
  bindings?: [
    Binding
  ](name='Bindings', description='The bindings between the dataset and OSS buckets.'),
  nextToken?: string(name='NextToken', description='*   The pagination token that is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter.
*   The next request returns remaining results starting from the position marked by the NextToken parameter value.
*   This parameter has a non-empty value only when not all bindings are returned.', example='immtest:dataset001:examplebucket01'),
  requestId?: string(name='RequestId', description='The request ID.', example='EFDFD356-C928-4A36-951A-6EB5A592****'),
}

model ListBindingsResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ListBindingsResponseBody(name='body'),
}

/**
 * @summary Queries bindings between a dataset and Object Storage Service (OSS) buckets.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).
 *
 * @param request ListBindingsRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ListBindingsResponse
 */
async function listBindingsWithOptions(request: ListBindingsRequest, runtime: $RuntimeOptions): ListBindingsResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ListBindings',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries bindings between a dataset and Object Storage Service (OSS) buckets.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).
 *
 * @param request ListBindingsRequest
 * @return ListBindingsResponse
 */
async function listBindings(request: ListBindingsRequest): ListBindingsResponse {
  var runtime = new $RuntimeOptions{};
  return listBindingsWithOptions(request, runtime);
}

model ListDatasetsRequest {
  maxResults?: long(name='MaxResults', description='The maximum number of datasets to return. Valid values: 0 to 200.

If this parameter is left empty or set to 0, 100 datasets are returned.', example='1'),
  nextToken?: string(name='NextToken', description='The pagination token.

If the total number of datasets is greater than the value of MaxResults, you must specify this parameter. The list is returned in lexicographic order starting from the value of NextToken.

>  The first time you call this operation in a query, set this parameter to null.', example='12345678:immtest:dataset002'),
  prefix?: string(name='Prefix', description='The dataset prefix.', example='dataset'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
}

model ListDatasetsResponseBody = {
  datasets?: [
    Dataset
  ](name='Datasets', description='The list of datasets.'),
  nextToken?: string(name='NextToken', description='The pagination token. If the total number of datasets is greater than the value of MaxResults, you must specify this parameter. This parameter has a value only if not all the datasets that meet the conditions are returned.

Pass this value as the value of NextToken in the next call to query subsequent datasets.', example='12345678:immtest:dataset002'),
  requestId?: string(name='RequestId', description='The request ID.', example='FEEDE356-C928-4A36-951A-6EB5A592****'),
}

model ListDatasetsResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ListDatasetsResponseBody(name='body'),
}

/**
 * @summary Queries a list of datasets. You can query the list by dataset prefix.
 *
 * @param request ListDatasetsRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ListDatasetsResponse
 */
async function listDatasetsWithOptions(request: ListDatasetsRequest, runtime: $RuntimeOptions): ListDatasetsResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.prefix)) {
    query['Prefix'] = request.prefix;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ListDatasets',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries a list of datasets. You can query the list by dataset prefix.
 *
 * @param request ListDatasetsRequest
 * @return ListDatasetsResponse
 */
async function listDatasets(request: ListDatasetsRequest): ListDatasetsResponse {
  var runtime = new $RuntimeOptions{};
  return listDatasetsWithOptions(request, runtime);
}

model ListProjectsRequest {
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 0 to 200. Default value: 100.', example='100'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. You do not need to specify this parameter for the first request. You must specify the token that is obtained from the previous query as the value of NextToken. The operation returns the projects in lexicographical order starting from the location specified by NextToken.', example='MTIzNDU2Nzg6aW1tdGVzdDAx'),
  prefix?: string(name='Prefix', description='The prefix used by the projects that you want to query. The prefix must be up to 128 characters in length.', example='immtest'),
  tag?: [ 
    {
      key?: string(name='Key', description='The tag key.', example='TestKey'),
      value?: string(name='Value', description='The tag value.', example='TestValue'),
    }
  ](name='Tag', description='The tags.'),
}

model ListProjectsShrinkRequest {
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 0 to 200. Default value: 100.', example='100'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. You do not need to specify this parameter for the first request. You must specify the token that is obtained from the previous query as the value of NextToken. The operation returns the projects in lexicographical order starting from the location specified by NextToken.', example='MTIzNDU2Nzg6aW1tdGVzdDAx'),
  prefix?: string(name='Prefix', description='The prefix used by the projects that you want to query. The prefix must be up to 128 characters in length.', example='immtest'),
  tagShrink?: string(name='Tag', description='The tags.'),
}

model ListProjectsResponseBody = {
  nextToken?: string(name='NextToken', description='A pagination token. It can be used in the next request to retrieve a new page of results. If NextToken is empty, no next page exists.', example='MTIzNDU2Nzg6aW1tdGVzdDAx'),
  projects?: [
    Project
  ](name='Projects', description='The projects.'),
  requestId?: string(name='RequestId', description='The request ID.', example='4A7A2D0E-D8B8-4DA0-8127-EB32C660'),
}

model ListProjectsResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ListProjectsResponseBody(name='body'),
}

/**
 * @summary Queries projects. You can call this operation to query the basic information, datasets, and file statistics of multiple projects at the same time.
 *
 * @description The ListProjects operation supports pagination. When you call this operation, you must specify the token that is obtained from the previous query as the value of NextToken. You must also specify MaxResults to limit the number of entries to return.
 *
 * @param tmpReq ListProjectsRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ListProjectsResponse
 */
async function listProjectsWithOptions(tmpReq: ListProjectsRequest, runtime: $RuntimeOptions): ListProjectsResponse {
  tmpReq.validate();
  var request = new ListProjectsShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.tag)) {
    request.tagShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tag, 'Tag', 'json');
  }
  var query = {};
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.prefix)) {
    query['Prefix'] = request.prefix;
  }
  if (!$isNull(request.tagShrink)) {
    query['Tag'] = request.tagShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ListProjects',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries projects. You can call this operation to query the basic information, datasets, and file statistics of multiple projects at the same time.
 *
 * @description The ListProjects operation supports pagination. When you call this operation, you must specify the token that is obtained from the previous query as the value of NextToken. You must also specify MaxResults to limit the number of entries to return.
 *
 * @param request ListProjectsRequest
 * @return ListProjectsResponse
 */
async function listProjects(request: ListProjectsRequest): ListProjectsResponse {
  var runtime = new $RuntimeOptions{};
  return listProjectsWithOptions(request, runtime);
}

model ListRegionsRequest {
  acceptLanguage?: string(name='AcceptLanguage', description='The language. Valid values:

*   zh-CN: Chinese.
*   en-US: English.
*   ja: Japanese.

This parameter is required.', example='zh-CN'),
}

model ListRegionsResponseBody = {
  regions?: [
    RegionType
  ](name='Regions', description='The regions.'),
  requestId?: string(name='RequestId', description='The request ID.', example='7F7D235C-76FF-4B65-800C-8238AE3F****'),
}

model ListRegionsResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ListRegionsResponseBody(name='body'),
}

/**
 * @summary Queries the regions where Intelligent Media Management (IMM) is available and the supported languages.
 *
 * @param request ListRegionsRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ListRegionsResponse
 */
async function listRegionsWithOptions(request: ListRegionsRequest, runtime: $RuntimeOptions): ListRegionsResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.acceptLanguage)) {
    query['AcceptLanguage'] = request.acceptLanguage;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ListRegions',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries the regions where Intelligent Media Management (IMM) is available and the supported languages.
 *
 * @param request ListRegionsRequest
 * @return ListRegionsResponse
 */
async function listRegions(request: ListRegionsRequest): ListRegionsResponse {
  var runtime = new $RuntimeOptions{};
  return listRegionsWithOptions(request, runtime);
}

model ListTasksRequest {
  endTimeRange?: TimeRange(name='EndTimeRange', description='The task end time range. You can specify this parameter to filter tasks that end within the specified range.'),
  maxResults?: long(name='MaxResults', description='The maximum number of results to return. Valid value range: (0, 100]. Default value: 100.', example='1'),
  nextToken?: string(name='NextToken', description='The pagination token.

The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter. The next call to the operation returns results lexicographically after the NextToken parameter value.

>  Leave this parameter empty in your first call to the operation.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sort order. Valid values:

*   asc: in ascending order. This is the default value.
*   desc: in descending order.', example='ASC'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  requestDefinition?: boolean(name='RequestDefinition', description='Specifies whether to return request parameters in the initial request to create the task. Default value: False.', example='True'),
  sort?: string(name='Sort', description='The field used to sort the results by. Valid values:

*   TaskId: sorts the results by task ID. This is the default sort field.
*   StartTime: sorts the results by task start time.
*   StartTime: sorts the results by task end time.', example='TaskId'),
  startTimeRange?: TimeRange(name='StartTimeRange', description='The task start time range. You can specify this parameter to filter tasks that start within the specified range.'),
  status?: string(name='Status', description='The task status. Valid values:

*   Running: The task is running.
*   Succeeded: The task is successful.
*   Failed: The task failed.', example='Succeeded'),
  tagSelector?: string(name='TagSelector', description='The custom tags of tasks.', example='test=val1'),
  taskTypes?: [ string ](name='TaskTypes', description='The task types.'),
}

model ListTasksShrinkRequest {
  endTimeRangeShrink?: string(name='EndTimeRange', description='The task end time range. You can specify this parameter to filter tasks that end within the specified range.'),
  maxResults?: long(name='MaxResults', description='The maximum number of results to return. Valid value range: (0, 100]. Default value: 100.', example='1'),
  nextToken?: string(name='NextToken', description='The pagination token.

The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter. The next call to the operation returns results lexicographically after the NextToken parameter value.

>  Leave this parameter empty in your first call to the operation.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sort order. Valid values:

*   asc: in ascending order. This is the default value.
*   desc: in descending order.', example='ASC'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  requestDefinition?: boolean(name='RequestDefinition', description='Specifies whether to return request parameters in the initial request to create the task. Default value: False.', example='True'),
  sort?: string(name='Sort', description='The field used to sort the results by. Valid values:

*   TaskId: sorts the results by task ID. This is the default sort field.
*   StartTime: sorts the results by task start time.
*   StartTime: sorts the results by task end time.', example='TaskId'),
  startTimeRangeShrink?: string(name='StartTimeRange', description='The task start time range. You can specify this parameter to filter tasks that start within the specified range.'),
  status?: string(name='Status', description='The task status. Valid values:

*   Running: The task is running.
*   Succeeded: The task is successful.
*   Failed: The task failed.', example='Succeeded'),
  tagSelector?: string(name='TagSelector', description='The custom tags of tasks.', example='test=val1'),
  taskTypesShrink?: string(name='TaskTypes', description='The task types.'),
}

model ListTasksResponseBody = {
  maxResults?: string(name='MaxResults', description='The length of the returned result list.', example='1'),
  nextToken?: string(name='NextToken', description='The pagination token. The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter. This parameter has a value only when not all results are returned.

You can specify the value of the NextToken parameter in the next request to list remaining results.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  projectName?: string(name='ProjectName', description='The name of the project.', example='immtest'),
  requestId?: string(name='RequestId', description='The request ID.', example='9847E7D0-A9A3-0053-84C6-BA16FFFA726E'),
  tasks?: [
    TaskInfo
  ](name='Tasks', description='The tasks.'),
}

model ListTasksResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ListTasksResponseBody(name='body'),
}

/**
 * @summary Lists tasks based on specific conditions, such as by time range and by tag.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).
 *
 * @param tmpReq ListTasksRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ListTasksResponse
 */
async function listTasksWithOptions(tmpReq: ListTasksRequest, runtime: $RuntimeOptions): ListTasksResponse {
  tmpReq.validate();
  var request = new ListTasksShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.endTimeRange)) {
    request.endTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.endTimeRange, 'EndTimeRange', 'json');
  }
  if (!$isNull(tmpReq.startTimeRange)) {
    request.startTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.startTimeRange, 'StartTimeRange', 'json');
  }
  if (!$isNull(tmpReq.taskTypes)) {
    request.taskTypesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.taskTypes, 'TaskTypes', 'json');
  }
  var query = {};
  if (!$isNull(request.endTimeRangeShrink)) {
    query['EndTimeRange'] = request.endTimeRangeShrink;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.requestDefinition)) {
    query['RequestDefinition'] = request.requestDefinition;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.startTimeRangeShrink)) {
    query['StartTimeRange'] = request.startTimeRangeShrink;
  }
  if (!$isNull(request.status)) {
    query['Status'] = request.status;
  }
  if (!$isNull(request.tagSelector)) {
    query['TagSelector'] = request.tagSelector;
  }
  if (!$isNull(request.taskTypesShrink)) {
    query['TaskTypes'] = request.taskTypesShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ListTasks',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Lists tasks based on specific conditions, such as by time range and by tag.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).
 *
 * @param request ListTasksRequest
 * @return ListTasksResponse
 */
async function listTasks(request: ListTasksRequest): ListTasksResponse {
  var runtime = new $RuntimeOptions{};
  return listTasksWithOptions(request, runtime);
}

model ListTriggersRequest {
  maxResults?: int32(name='MaxResults', description='The maximum number of entries to return. Valid values: 0 to 100.

Default value: 100.', example='10'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results.

If the total number of triggers is greater than the value of MaxResults, you must specify NextToken.

You do not need to specify this parameter for the first request.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sort order. Default value: DESC.

*   ASC (default): ascending order.
*   DESC: descending order.', example='ASC'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The sort field. Valid values:

*   CreateTime: the point in time when the trigger is created.
*   UpdateTime: the most recent point in time when the trigger is updated.', example='2020-11-11T06:51:17.5Z'),
  state?: string(name='State', description='The status of the trigger. Valid values:

*   Ready: The trigger is ready.
*   Running: The trigger is running.
*   Failed: The trigger failed and cannot be automatically recovered.
*   Suspended: The trigger is suspended.
*   Succeeded: The trigger is complete.', example='Succeeded'),
  tagSelector?: string(name='TagSelector', description='The custom tag. You can specify this parameter only if you specified Tags when you called the CreateTrigger operation.', example='test=val1'),
}

model ListTriggersResponseBody = {
  nextToken?: string(name='NextToken', description='A pagination token. It can be used in the next request to retrieve a new page of results.

If NextToken is empty, no next page exists.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  requestId?: string(name='RequestId', description='The request ID.', example='F480BFAF-E778-5079-93AD-1E4631******'),
  triggers?: [
    DataIngestion
  ](name='Triggers', description='The triggers.'),
}

model ListTriggersResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ListTriggersResponseBody(name='body'),
}

/**
 * @summary Queries triggers by tag or status.
 *
 * @param request ListTriggersRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ListTriggersResponse
 */
async function listTriggersWithOptions(request: ListTriggersRequest, runtime: $RuntimeOptions): ListTriggersResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.state)) {
    query['State'] = request.state;
  }
  if (!$isNull(request.tagSelector)) {
    query['TagSelector'] = request.tagSelector;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'ListTriggers',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries triggers by tag or status.
 *
 * @param request ListTriggersRequest
 * @return ListTriggersResponse
 */
async function listTriggers(request: ListTriggersRequest): ListTriggersResponse {
  var runtime = new $RuntimeOptions{};
  return listTriggersWithOptions(request, runtime);
}

model QueryFigureClustersRequest {
  createTimeRange?: TimeRange(name='CreateTimeRange', description='The time range within which the face group was created.'),
  customLabels?: string(name='CustomLabels', description='The custom labels, which can be used as query conditions.', example='key=value'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 0 to 100. Default value: 100.', example='100'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. You do not need to specify this parameter for the first request. You must specify the token that is obtained from the previous query as the value of NextToken.', example='10'),
  order?: string(name='Order', description='The sort order. Default value: asc.

Valid values:

*   asc: ascending order.
*   desc: descending order.', example='asc'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The sort field. If you leave this parameter empty, the group ID is used as the sort field.

Valid values:

*   ImageCount: the number of images.
*   VideoCount: the number of videos.
*   ProjectName: the name of the project.
*   DatasetName: the name of the dataset.
*   CreateTime: the point in time when the group is created.
*   UpdateTime: the most recent point in time when the group is updated.
*   Gender: the gender.
*   FaceCount: the number of faces.
*   GroupName: the name of the group.', example='ImageCount'),
  updateTimeRange?: TimeRange(name='UpdateTimeRange', description='The time range within which the face group was last updated.'),
  withTotalCount?: boolean(name='WithTotalCount', description='Specifies whether to return the total number of face groups that match the current query conditions. Default value: false.', example='false'),
}

model QueryFigureClustersShrinkRequest {
  createTimeRangeShrink?: string(name='CreateTimeRange', description='The time range within which the face group was created.'),
  customLabels?: string(name='CustomLabels', description='The custom labels, which can be used as query conditions.', example='key=value'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 0 to 100. Default value: 100.', example='100'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. You do not need to specify this parameter for the first request. You must specify the token that is obtained from the previous query as the value of NextToken.', example='10'),
  order?: string(name='Order', description='The sort order. Default value: asc.

Valid values:

*   asc: ascending order.
*   desc: descending order.', example='asc'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The sort field. If you leave this parameter empty, the group ID is used as the sort field.

Valid values:

*   ImageCount: the number of images.
*   VideoCount: the number of videos.
*   ProjectName: the name of the project.
*   DatasetName: the name of the dataset.
*   CreateTime: the point in time when the group is created.
*   UpdateTime: the most recent point in time when the group is updated.
*   Gender: the gender.
*   FaceCount: the number of faces.
*   GroupName: the name of the group.', example='ImageCount'),
  updateTimeRangeShrink?: string(name='UpdateTimeRange', description='The time range within which the face group was last updated.'),
  withTotalCount?: boolean(name='WithTotalCount', description='Specifies whether to return the total number of face groups that match the current query conditions. Default value: false.', example='false'),
}

model QueryFigureClustersResponseBody = {
  figureClusters?: [
    FigureCluster
  ](name='FigureClusters', description='The face groups.'),
  nextToken?: string(name='NextToken', description='A pagination token. It can be used in the next request to retrieve a new page of results.', example='10'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FFF****'),
  totalCount?: long(name='TotalCount', description='The total number of face groups that matches the current query conditions.', example='100'),
}

model QueryFigureClustersResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: QueryFigureClustersResponseBody(name='body'),
}

/**
 * @summary Queries face groups based on given conditions.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that a face clustering task is created to group all faces in a dataset. For information about how to create a face clustering task, see [CreateFigureClusteringTask](~~CreateFigureClusteringTask~~). For information about how to create a dataset, see [CreateDataset](~~CreateDataset~~).
 *
 * @param tmpReq QueryFigureClustersRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return QueryFigureClustersResponse
 */
async function queryFigureClustersWithOptions(tmpReq: QueryFigureClustersRequest, runtime: $RuntimeOptions): QueryFigureClustersResponse {
  tmpReq.validate();
  var request = new QueryFigureClustersShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.createTimeRange)) {
    request.createTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.createTimeRange, 'CreateTimeRange', 'json');
  }
  if (!$isNull(tmpReq.updateTimeRange)) {
    request.updateTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.updateTimeRange, 'UpdateTimeRange', 'json');
  }
  var query = {};
  if (!$isNull(request.createTimeRangeShrink)) {
    query['CreateTimeRange'] = request.createTimeRangeShrink;
  }
  if (!$isNull(request.customLabels)) {
    query['CustomLabels'] = request.customLabels;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.updateTimeRangeShrink)) {
    query['UpdateTimeRange'] = request.updateTimeRangeShrink;
  }
  if (!$isNull(request.withTotalCount)) {
    query['WithTotalCount'] = request.withTotalCount;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'QueryFigureClusters',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries face groups based on given conditions.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that a face clustering task is created to group all faces in a dataset. For information about how to create a face clustering task, see [CreateFigureClusteringTask](~~CreateFigureClusteringTask~~). For information about how to create a dataset, see [CreateDataset](~~CreateDataset~~).
 *
 * @param request QueryFigureClustersRequest
 * @return QueryFigureClustersResponse
 */
async function queryFigureClusters(request: QueryFigureClustersRequest): QueryFigureClustersResponse {
  var runtime = new $RuntimeOptions{};
  return queryFigureClustersWithOptions(request, runtime);
}

model QueryLocationDateClustersRequest {
  address?: Address(name='Address', description='The address information.'),
  createTimeRange?: TimeRange(name='CreateTimeRange', description='The time range during which the spatiotemporal clusters were generated.'),
  customLabels?: string(name='CustomLabels', description='The custom labels, which can be used as query conditions.', example='key=value'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. For more information, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='test-dataset'),
  locationDateClusterEndTimeRange?: TimeRange(name='LocationDateClusterEndTimeRange', description='The time range during which the latest photo in a cluster was taken.'),
  locationDateClusterLevels?: [ string ](name='LocationDateClusterLevels', description='The administrative level of the spatiotemporal clustering groups to be queried.'),
  locationDateClusterStartTimeRange?: TimeRange(name='LocationDateClusterStartTimeRange', description='The time range during which the earliest photo in a cluster was taken.'),
  maxResults?: int32(name='MaxResults', description='The number of entries per page. Valid values: [1,100]. Default value: 20.', example='20'),
  nextToken?: string(name='NextToken', description='The pagination token.', example='MzQNjmY2MzYxNhNjk2ZNjEu****'),
  objectId?: string(name='ObjectId', description='The ID of the group that you want to query. Specify this parameter if you want to obtain the information about a specific spatiotemporal clustering group. Otherwise, leave this parameter empty and use other parameters to query the groups that meet the matching conditions.', example='location-date-cluster-71dd4f32-9597-4085-a2ab-3a7b0fd0aff9'),
  order?: string(name='Order', description='The sorting order.

Default value: asc. Valid values:

*   asc: ascending order.
*   desc: descending order.', example='asc'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The condition by which the results are sorted.

Valid values:

*   LocationDateClusterEndTime: by the end time of the spatiotemporal clustering groups.
*   CreateTime: by the creation time of the spatiotemporal clustering groups.
*   UpdateTime: by the update time of the spatiotemporal clustering groups.
*   LocationDateClusterStartTime: by the start time of the spatiotemporal clustering groups. This is the default value.', example='LocationDateClusterStartTime'),
  title?: string(name='Title', description='The title of spatiotemporal clustering. Fuzzy matching is performed.'),
  updateTimeRange?: TimeRange(name='UpdateTimeRange', description='The time range during which the spatiotemporal clusters were updated.'),
}

model QueryLocationDateClustersShrinkRequest {
  addressShrink?: string(name='Address', description='The address information.'),
  createTimeRangeShrink?: string(name='CreateTimeRange', description='The time range during which the spatiotemporal clusters were generated.'),
  customLabels?: string(name='CustomLabels', description='The custom labels, which can be used as query conditions.', example='key=value'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. For more information, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='test-dataset'),
  locationDateClusterEndTimeRangeShrink?: string(name='LocationDateClusterEndTimeRange', description='The time range during which the latest photo in a cluster was taken.'),
  locationDateClusterLevelsShrink?: string(name='LocationDateClusterLevels', description='The administrative level of the spatiotemporal clustering groups to be queried.'),
  locationDateClusterStartTimeRangeShrink?: string(name='LocationDateClusterStartTimeRange', description='The time range during which the earliest photo in a cluster was taken.'),
  maxResults?: int32(name='MaxResults', description='The number of entries per page. Valid values: [1,100]. Default value: 20.', example='20'),
  nextToken?: string(name='NextToken', description='The pagination token.', example='MzQNjmY2MzYxNhNjk2ZNjEu****'),
  objectId?: string(name='ObjectId', description='The ID of the group that you want to query. Specify this parameter if you want to obtain the information about a specific spatiotemporal clustering group. Otherwise, leave this parameter empty and use other parameters to query the groups that meet the matching conditions.', example='location-date-cluster-71dd4f32-9597-4085-a2ab-3a7b0fd0aff9'),
  order?: string(name='Order', description='The sorting order.

Default value: asc. Valid values:

*   asc: ascending order.
*   desc: descending order.', example='asc'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The condition by which the results are sorted.

Valid values:

*   LocationDateClusterEndTime: by the end time of the spatiotemporal clustering groups.
*   CreateTime: by the creation time of the spatiotemporal clustering groups.
*   UpdateTime: by the update time of the spatiotemporal clustering groups.
*   LocationDateClusterStartTime: by the start time of the spatiotemporal clustering groups. This is the default value.', example='LocationDateClusterStartTime'),
  title?: string(name='Title', description='The title of spatiotemporal clustering. Fuzzy matching is performed.'),
  updateTimeRangeShrink?: string(name='UpdateTimeRange', description='The time range during which the spatiotemporal clusters were updated.'),
}

model QueryLocationDateClustersResponseBody = {
  locationDateClusters?: [
    LocationDateCluster
  ](name='LocationDateClusters', description='The list of spatiotemporal clusters.'),
  nextToken?: string(name='NextToken', description='The pagination token.', example='MzQNjmY2MzYxNhNjk2ZNjEu****'),
  requestId?: string(name='RequestId', description='The request ID.', example='7055FCF7-4D7B-098E-BD4D-DD2932B0****'),
}

model QueryLocationDateClustersResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: QueryLocationDateClustersResponseBody(name='body'),
}

/**
 * @summary Queries a list of spatiotemporal clustering groups. Multiple conditions are supported. For more information, see the request parameters.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, you must call the [CreateLocationDateClusteringTask](https://help.aliyun.com/document_detail/478188.html) operation to perform spatiotemporal clustering.
 *
 * @param tmpReq QueryLocationDateClustersRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return QueryLocationDateClustersResponse
 */
async function queryLocationDateClustersWithOptions(tmpReq: QueryLocationDateClustersRequest, runtime: $RuntimeOptions): QueryLocationDateClustersResponse {
  tmpReq.validate();
  var request = new QueryLocationDateClustersShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.address)) {
    request.addressShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.address, 'Address', 'json');
  }
  if (!$isNull(tmpReq.createTimeRange)) {
    request.createTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.createTimeRange, 'CreateTimeRange', 'json');
  }
  if (!$isNull(tmpReq.locationDateClusterEndTimeRange)) {
    request.locationDateClusterEndTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.locationDateClusterEndTimeRange, 'LocationDateClusterEndTimeRange', 'json');
  }
  if (!$isNull(tmpReq.locationDateClusterLevels)) {
    request.locationDateClusterLevelsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.locationDateClusterLevels, 'LocationDateClusterLevels', 'json');
  }
  if (!$isNull(tmpReq.locationDateClusterStartTimeRange)) {
    request.locationDateClusterStartTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.locationDateClusterStartTimeRange, 'LocationDateClusterStartTimeRange', 'json');
  }
  if (!$isNull(tmpReq.updateTimeRange)) {
    request.updateTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.updateTimeRange, 'UpdateTimeRange', 'json');
  }
  var query = {};
  if (!$isNull(request.addressShrink)) {
    query['Address'] = request.addressShrink;
  }
  if (!$isNull(request.createTimeRangeShrink)) {
    query['CreateTimeRange'] = request.createTimeRangeShrink;
  }
  if (!$isNull(request.customLabels)) {
    query['CustomLabels'] = request.customLabels;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.locationDateClusterEndTimeRangeShrink)) {
    query['LocationDateClusterEndTimeRange'] = request.locationDateClusterEndTimeRangeShrink;
  }
  if (!$isNull(request.locationDateClusterLevelsShrink)) {
    query['LocationDateClusterLevels'] = request.locationDateClusterLevelsShrink;
  }
  if (!$isNull(request.locationDateClusterStartTimeRangeShrink)) {
    query['LocationDateClusterStartTimeRange'] = request.locationDateClusterStartTimeRangeShrink;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.objectId)) {
    query['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.title)) {
    query['Title'] = request.title;
  }
  if (!$isNull(request.updateTimeRangeShrink)) {
    query['UpdateTimeRange'] = request.updateTimeRangeShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'QueryLocationDateClusters',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries a list of spatiotemporal clustering groups. Multiple conditions are supported. For more information, see the request parameters.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, you must call the [CreateLocationDateClusteringTask](https://help.aliyun.com/document_detail/478188.html) operation to perform spatiotemporal clustering.
 *
 * @param request QueryLocationDateClustersRequest
 * @return QueryLocationDateClustersResponse
 */
async function queryLocationDateClusters(request: QueryLocationDateClustersRequest): QueryLocationDateClustersResponse {
  var runtime = new $RuntimeOptions{};
  return queryLocationDateClustersWithOptions(request, runtime);
}

model QuerySimilarImageClustersRequest {
  customLabels?: string(name='CustomLabels', description='The custom tags, which are used to filter tasks.', example='{"key": "val"}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset. For more information, see [Create a dataset](https://help.aliyun.com/document_detail/478160.html).

This parameter is required.', example='test-dataset'),
  maxResults?: int32(name='MaxResults', description='The number of entries per page. Value range: 0 to 100. Default value: 100.', example='20'),
  nextToken?: string(name='NextToken', description='The pagination token.

If the total number of clusters is greater than the value of MaxResults, you must specify this parameter. The next call to the operation returns results lexicographically after the NextToken parameter value.

>  The first time you call this operation in a query, set this parameter to null.', example='CAESEgoQCg4KClVwZGF0ZVRpbWUQARgBIs8ECgkAAJLUwUCAQ****'),
  order?: string(name='Order', description='The sorting order. Valid values:

*   asc: ascending order.
*   desc: descending order. This is the default value.', example='asc'),
  projectName?: string(name='ProjectName', description='The name of the project. For more information, see [CreateProject](https://help.aliyun.com/document_detail/478153.html).

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The sorting field.

*   CreateTime: the time when the clusters were created.
*   UpdateTime: the time when the clusters were updated. This is the default value.', example='UpdateTime'),
}

model QuerySimilarImageClustersResponseBody = {
  nextToken?: string(name='NextToken', description='The pagination token. If the total number of clusters is greater than the value of MaxResults, this token can be used to retrieve the next page. This parameter has a value only if not all the clusters that meet the condition are returned.

Pass this value as the value of NextToken in the next query to return the subsequent clusters.', example='CAESEgoQCg4KClVwZGF0ZVRpbWUQARgBIs8ECgkAAJLUwUCAQ****'),
  requestId?: string(name='RequestId', description='The request ID.', example='CA995EFD-083D-4F40-BE8A-BDF75FFF****'),
  similarImageClusters?: [
    SimilarImageCluster
  ](name='SimilarImageClusters', description='The list of similar image clusters.'),
}

model QuerySimilarImageClustersResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: QuerySimilarImageClustersResponseBody(name='body'),
}

/**
 * @summary You can call this operation to query the list of similar image clusters.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, you must call the [CreateSimilarImageClusteringTask](https://help.aliyun.com/document_detail/611302.html) operation to cluster similar images in the dataset.
 *
 * @param request QuerySimilarImageClustersRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return QuerySimilarImageClustersResponse
 */
async function querySimilarImageClustersWithOptions(request: QuerySimilarImageClustersRequest, runtime: $RuntimeOptions): QuerySimilarImageClustersResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.customLabels)) {
    query['CustomLabels'] = request.customLabels;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'QuerySimilarImageClusters',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary You can call this operation to query the list of similar image clusters.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, you must call the [CreateSimilarImageClusteringTask](https://help.aliyun.com/document_detail/611302.html) operation to cluster similar images in the dataset.
 *
 * @param request QuerySimilarImageClustersRequest
 * @return QuerySimilarImageClustersResponse
 */
async function querySimilarImageClusters(request: QuerySimilarImageClustersRequest): QuerySimilarImageClustersResponse {
  var runtime = new $RuntimeOptions{};
  return querySimilarImageClustersWithOptions(request, runtime);
}

model QueryStoriesRequest {
  createTimeRange?: TimeRange(name='CreateTimeRange', description='The time range in which stories were created.'),
  customLabels?: string(name='CustomLabels', description='The custom labels in key-value pairs.', example='key=value'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  figureClusterIds?: [ string ](name='FigureClusterIds', description='The IDs of the face clusters.'),
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 1 to 100. Default value: 100.', example='10'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. If you do not specify this token in the next request, results are returned from the beginning.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpw****'),
  objectId?: string(name='ObjectId', description='The ID of the story.', example='id1'),
  order?: string(name='Order', description='The sort order. Valid values:

*   asc: in ascending order.
*   desc: in descending order.', example='asc'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The sort field. Valid values:

*   CreateTime: sorts by story creation time.
*   StoryName: sorts by story name.
*   StoryStartTime: sorts by story start time.
*   StoryEndTime: sorts by story end time.', example='CreateTime'),
  storyEndTimeRange?: TimeRange(name='StoryEndTimeRange', description='The time range for the creation time of the last photo or video in the story.'),
  storyName?: string(name='StoryName', description='The name of the story.', example='name1'),
  storyStartTimeRange?: TimeRange(name='StoryStartTimeRange', description='The time range for the creation time of the first photo or video in the story.'),
  storySubType?: string(name='StorySubType', description='The subtype of the story. For a list of valid values, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).', example='SeasonHighlights'),
  storyType?: string(name='StoryType', description='The type of the story. For a list of valid values, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).', example='TimeMemory'),
  withEmptyStories?: boolean(name='WithEmptyStories', description='Specifies whether to return empty stories. Valid values:

*   true (The default value)
*   false', example='true'),
}

model QueryStoriesShrinkRequest {
  createTimeRangeShrink?: string(name='CreateTimeRange', description='The time range in which stories were created.'),
  customLabels?: string(name='CustomLabels', description='The custom labels in key-value pairs.', example='key=value'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  figureClusterIdsShrink?: string(name='FigureClusterIds', description='The IDs of the face clusters.'),
  maxResults?: long(name='MaxResults', description='The maximum number of entries to return. Valid values: 1 to 100. Default value: 100.', example='10'),
  nextToken?: string(name='NextToken', description='The pagination token that is used in the next request to retrieve a new page of results. If you do not specify this token in the next request, results are returned from the beginning.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpw****'),
  objectId?: string(name='ObjectId', description='The ID of the story.', example='id1'),
  order?: string(name='Order', description='The sort order. Valid values:

*   asc: in ascending order.
*   desc: in descending order.', example='asc'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sort?: string(name='Sort', description='The sort field. Valid values:

*   CreateTime: sorts by story creation time.
*   StoryName: sorts by story name.
*   StoryStartTime: sorts by story start time.
*   StoryEndTime: sorts by story end time.', example='CreateTime'),
  storyEndTimeRangeShrink?: string(name='StoryEndTimeRange', description='The time range for the creation time of the last photo or video in the story.'),
  storyName?: string(name='StoryName', description='The name of the story.', example='name1'),
  storyStartTimeRangeShrink?: string(name='StoryStartTimeRange', description='The time range for the creation time of the first photo or video in the story.'),
  storySubType?: string(name='StorySubType', description='The subtype of the story. For a list of valid values, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).', example='SeasonHighlights'),
  storyType?: string(name='StoryType', description='The type of the story. For a list of valid values, see [Story types and subtypes](https://help.aliyun.com/document_detail/2743998.html).', example='TimeMemory'),
  withEmptyStories?: boolean(name='WithEmptyStories', description='Specifies whether to return empty stories. Valid values:

*   true (The default value)
*   false', example='true'),
}

model QueryStoriesResponseBody = {
  nextToken?: string(name='NextToken', description='The pagination token. It can be used in the next request to retrieve a new page of results. If NextToken is empty, no next page exists.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3Qx****'),
  requestId?: string(name='RequestId', description='The request ID.', example='2C5C1E0F-D8B8-4DA0-8127-EC32C771****'),
  stories?: [
    Story
  ](name='Stories', description='The stories.'),
}

model QueryStoriesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: QueryStoriesResponseBody(name='body'),
}

/**
 * @summary Queries stories based on the specified conditions.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Before you call this operation, make sure that you have called the [CreateStory](https://help.aliyun.com/document_detail/478193.html) or [CreateCustomizedStory](https://help.aliyun.com/document_detail/478196.html) operation to create a story.
 *
 * @param tmpReq QueryStoriesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return QueryStoriesResponse
 */
async function queryStoriesWithOptions(tmpReq: QueryStoriesRequest, runtime: $RuntimeOptions): QueryStoriesResponse {
  tmpReq.validate();
  var request = new QueryStoriesShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.createTimeRange)) {
    request.createTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.createTimeRange, 'CreateTimeRange', 'json');
  }
  if (!$isNull(tmpReq.figureClusterIds)) {
    request.figureClusterIdsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.figureClusterIds, 'FigureClusterIds', 'json');
  }
  if (!$isNull(tmpReq.storyEndTimeRange)) {
    request.storyEndTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.storyEndTimeRange, 'StoryEndTimeRange', 'json');
  }
  if (!$isNull(tmpReq.storyStartTimeRange)) {
    request.storyStartTimeRangeShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.storyStartTimeRange, 'StoryStartTimeRange', 'json');
  }
  var query = {};
  if (!$isNull(request.createTimeRangeShrink)) {
    query['CreateTimeRange'] = request.createTimeRangeShrink;
  }
  if (!$isNull(request.customLabels)) {
    query['CustomLabels'] = request.customLabels;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.figureClusterIdsShrink)) {
    query['FigureClusterIds'] = request.figureClusterIdsShrink;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.objectId)) {
    query['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.storyEndTimeRangeShrink)) {
    query['StoryEndTimeRange'] = request.storyEndTimeRangeShrink;
  }
  if (!$isNull(request.storyName)) {
    query['StoryName'] = request.storyName;
  }
  if (!$isNull(request.storyStartTimeRangeShrink)) {
    query['StoryStartTimeRange'] = request.storyStartTimeRangeShrink;
  }
  if (!$isNull(request.storySubType)) {
    query['StorySubType'] = request.storySubType;
  }
  if (!$isNull(request.storyType)) {
    query['StoryType'] = request.storyType;
  }
  if (!$isNull(request.withEmptyStories)) {
    query['WithEmptyStories'] = request.withEmptyStories;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'QueryStories',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries stories based on the specified conditions.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   Before you call this operation, make sure that you have called the [CreateStory](https://help.aliyun.com/document_detail/478193.html) or [CreateCustomizedStory](https://help.aliyun.com/document_detail/478196.html) operation to create a story.
 *
 * @param request QueryStoriesRequest
 * @return QueryStoriesResponse
 */
async function queryStories(request: QueryStoriesRequest): QueryStoriesResponse {
  var runtime = new $RuntimeOptions{};
  return queryStoriesWithOptions(request, runtime);
}

model RefreshWebofficeTokenRequest {
  accessToken?: string(name='AccessToken', description='This parameter is required.', example='99d1b8b478b641c1b3372f5bd6********'),
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='immtest'),
  refreshToken?: string(name='RefreshToken', description='This parameter is required.', example='a730ae0d7c6a487d87c661d199********'),
}

model RefreshWebofficeTokenShrinkRequest {
  accessToken?: string(name='AccessToken', description='This parameter is required.', example='99d1b8b478b641c1b3372f5bd6********'),
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='immtest'),
  refreshToken?: string(name='RefreshToken', description='This parameter is required.', example='a730ae0d7c6a487d87c661d199********'),
}

model RefreshWebofficeTokenResponseBody = {
  accessToken?: string(name='AccessToken', example='4996466c690a4902846ce00f96********'),
  accessTokenExpiredTime?: string(name='AccessTokenExpiredTime', example='2021-08-31T13:07:28.950065359Z'),
  refreshToken?: string(name='RefreshToken', example='72a52ab3702a4123ab5594671a********'),
  refreshTokenExpiredTime?: string(name='RefreshTokenExpiredTime', example='2021-09-01T12:37:28.950065359Z'),
  requestId?: string(name='RequestId', example='501339F9-4B70-0CE2-AB8C-866C********'),
}

model RefreshWebofficeTokenResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: RefreshWebofficeTokenResponseBody(name='body'),
}

/**
 * @summary Refreshes the access credential of WebOffice. The access credential of WebOffice is valid for 30 minutes. After the credential expires, you cannot access Weboffice. To access Weboffice again, call this operation to obtain a new credential. The new credential is also valid for 30 minutes.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   For more information, see [WebOffice billing](https://help.aliyun.com/document_detail/2639703.html).
 * *   The access token returned by this operation is valid for 30 minutes. After the access token expires, you cannot use it to access the document.
 * *   The refresh token returned by this operation is valid for one day. You need to use the refresh token for the next call to the operation before the refresh token expires. After the validity period elapses, the refresh token is invalid.
 * *   The returned expiration time is displayed in UTC.
 * >  An access token is used to actually access a document, whereas a refresh token is used to avoid repeated access configurations.
 *
 * @param tmpReq RefreshWebofficeTokenRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return RefreshWebofficeTokenResponse
 */
async function refreshWebofficeTokenWithOptions(tmpReq: RefreshWebofficeTokenRequest, runtime: $RuntimeOptions): RefreshWebofficeTokenResponse {
  tmpReq.validate();
  var request = new RefreshWebofficeTokenShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.accessToken)) {
    query['AccessToken'] = request.accessToken;
  }
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.refreshToken)) {
    query['RefreshToken'] = request.refreshToken;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'RefreshWebofficeToken',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Refreshes the access credential of WebOffice. The access credential of WebOffice is valid for 30 minutes. After the credential expires, you cannot access Weboffice. To access Weboffice again, call this operation to obtain a new credential. The new credential is also valid for 30 minutes.
 *
 * @description Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of IMM.****
 * *   For more information, see [WebOffice billing](https://help.aliyun.com/document_detail/2639703.html).
 * *   The access token returned by this operation is valid for 30 minutes. After the access token expires, you cannot use it to access the document.
 * *   The refresh token returned by this operation is valid for one day. You need to use the refresh token for the next call to the operation before the refresh token expires. After the validity period elapses, the refresh token is invalid.
 * *   The returned expiration time is displayed in UTC.
 * >  An access token is used to actually access a document, whereas a refresh token is used to avoid repeated access configurations.
 *
 * @param request RefreshWebofficeTokenRequest
 * @return RefreshWebofficeTokenResponse
 */
async function refreshWebofficeToken(request: RefreshWebofficeTokenRequest): RefreshWebofficeTokenResponse {
  var runtime = new $RuntimeOptions{};
  return refreshWebofficeTokenWithOptions(request, runtime);
}

model RemoveStoryFilesRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='testdataset'),
  files?: [ 
    {
      URI?: string(name='URI', description='The URI of the Object Storage Service (OSS) bucket where you store the files that you want to delete.

Specify the value in the oss://${Bucket}/${Object} format. `${Bucket}` specifies the name of the OSS bucket that resides in the same region as the current project. `${Object}` specifies the complete path to the files that have an extension.', example='oss://bucket1/object'),
    }
  ](name='Files', description='The files that you want to delete.

This parameter is required.'),
  objectId?: string(name='ObjectId', description='The ID of the story.

This parameter is required.', example='testid'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
}

model RemoveStoryFilesShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='testdataset'),
  filesShrink?: string(name='Files', description='The files that you want to delete.

This parameter is required.'),
  objectId?: string(name='ObjectId', description='The ID of the story.

This parameter is required.', example='testid'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
}

model RemoveStoryFilesResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model RemoveStoryFilesResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: RemoveStoryFilesResponseBody(name='body'),
}

/**
 * @summary Deletes files from a story.
 *
 * @param tmpReq RemoveStoryFilesRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return RemoveStoryFilesResponse
 */
async function removeStoryFilesWithOptions(tmpReq: RemoveStoryFilesRequest, runtime: $RuntimeOptions): RemoveStoryFilesResponse {
  tmpReq.validate();
  var request = new RemoveStoryFilesShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.files)) {
    request.filesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.files, 'Files', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.datasetName)) {
    body['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.filesShrink)) {
    body['Files'] = request.filesShrink;
  }
  if (!$isNull(request.objectId)) {
    body['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'RemoveStoryFiles',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Deletes files from a story.
 *
 * @param request RemoveStoryFilesRequest
 * @return RemoveStoryFilesResponse
 */
async function removeStoryFiles(request: RemoveStoryFilesRequest): RemoveStoryFilesResponse {
  var runtime = new $RuntimeOptions{};
  return removeStoryFilesWithOptions(request, runtime);
}

model ResumeBatchRequest {
  id?: string(name='Id', description='The ID of the batch processing task. You can obtain the ID of the batch processing task from the response of the [CreateBatch](https://help.aliyun.com/document_detail/606694.html) operation.

This parameter is required.', example='batch-4eb9223f-3e88-42d3-a578-3f2852******'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
}

model ResumeBatchResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='D2C628B8-35DF-473C-8A41-757F30******'),
}

model ResumeBatchResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ResumeBatchResponseBody(name='body'),
}

/**
 * @summary Resumes a batch processing task that is in the Suspended or Failed state.
 *
 * @description You can resume a batch processing task only when the task is in the Suspended or Failed state. A batch processing task continues to provide services after you resume the task.
 *
 * @param request ResumeBatchRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ResumeBatchResponse
 */
async function resumeBatchWithOptions(request: ResumeBatchRequest, runtime: $RuntimeOptions): ResumeBatchResponse {
  request.validate();
  var body : map[string]any = {};
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'ResumeBatch',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Resumes a batch processing task that is in the Suspended or Failed state.
 *
 * @description You can resume a batch processing task only when the task is in the Suspended or Failed state. A batch processing task continues to provide services after you resume the task.
 *
 * @param request ResumeBatchRequest
 * @return ResumeBatchResponse
 */
async function resumeBatch(request: ResumeBatchRequest): ResumeBatchResponse {
  var runtime = new $RuntimeOptions{};
  return resumeBatchWithOptions(request, runtime);
}

model ResumeTriggerRequest {
  id?: string(name='Id', description='The ID of the trigger. You can obtain the ID from the response of the [CreateTrigger](https://help.aliyun.com/document_detail/479912.html) operation.

This parameter is required.', example='trigger-9f72636a-0f0c-4baf-ae78-38b27b******'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
}

model ResumeTriggerResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='FEDC9B1F-30F2-4C1F-8ED2-B7860187****'),
}

model ResumeTriggerResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: ResumeTriggerResponseBody(name='body'),
}

/**
 * @summary Resumes a trigger that is in the Suspended or Failed state.
 *
 * @description You can resume only a trigger that is in the Suspended or Failed state. After you resume a trigger, the trigger continues to provide services as expected.
 *
 * @param request ResumeTriggerRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return ResumeTriggerResponse
 */
async function resumeTriggerWithOptions(request: ResumeTriggerRequest, runtime: $RuntimeOptions): ResumeTriggerResponse {
  request.validate();
  var body : map[string]any = {};
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'ResumeTrigger',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Resumes a trigger that is in the Suspended or Failed state.
 *
 * @description You can resume only a trigger that is in the Suspended or Failed state. After you resume a trigger, the trigger continues to provide services as expected.
 *
 * @param request ResumeTriggerRequest
 * @return ResumeTriggerResponse
 */
async function resumeTrigger(request: ResumeTriggerRequest): ResumeTriggerResponse {
  var runtime = new $RuntimeOptions{};
  return resumeTriggerWithOptions(request, runtime);
}

model SearchImageFigureClusterRequest {
  credentialConfig?: CredentialConfig(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the image.

Specify the OSS URI in the `oss://${Bucket}/${Object}` format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
}

model SearchImageFigureClusterShrinkRequest {
  credentialConfigShrink?: string(name='CredentialConfig', description='**If you have no special requirements, leave this parameter empty.**

The authorization chain settings. For more information, see [Use authorization chains to access resources of other entities](https://help.aliyun.com/document_detail/465340.html).'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  sourceURI?: string(name='SourceURI', description='The OSS URI of the image.

Specify the OSS URI in the `oss://${Bucket}/${Object}` format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://test-bucket/test-object'),
}

model SearchImageFigureClusterResponseBody = {
  clusters?: [ 
    {
      boundary?: Boundary(name='Boundary', description='The bounding box of the face.'),
      clusterId?: string(name='ClusterId', description='The ID of the face cluster that contains faces similar to the face within the bounding box.', example='Cluster-ca730577-06b1-42c7-a25b-8f2c7******'),
      similarity?: float(name='Similarity', description='The similarity between the face within the bounding box and the face cluster. Valid value: 0 to 1.', example='0.87413794'),
    }
  ](name='Clusters', description='The face clusters.'),
  requestId?: string(name='RequestId', description='The request ID.', example='C2734912-E6D5-052C-AC67-6A9FD02*****'),
}

model SearchImageFigureClusterResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: SearchImageFigureClusterResponseBody(name='body'),
}

/**
 * @summary Queries face clusters that contain a specific face in an image. Each face cluster contains information such as bounding boxes and similarity.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have created a face clustering task by calling the [CreateFigureClusteringTask](https://help.aliyun.com/document_detail/478180.html) operation to cluster all faces in the dataset.
 *
 * @param tmpReq SearchImageFigureClusterRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return SearchImageFigureClusterResponse
 */
async function searchImageFigureClusterWithOptions(tmpReq: SearchImageFigureClusterRequest, runtime: $RuntimeOptions): SearchImageFigureClusterResponse {
  tmpReq.validate();
  var request = new SearchImageFigureClusterShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.credentialConfig)) {
    request.credentialConfigShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.credentialConfig, 'CredentialConfig', 'json');
  }
  var query = {};
  if (!$isNull(request.credentialConfigShrink)) {
    query['CredentialConfig'] = request.credentialConfigShrink;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.sourceURI)) {
    query['SourceURI'] = request.sourceURI;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'SearchImageFigureCluster',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries face clusters that contain a specific face in an image. Each face cluster contains information such as bounding boxes and similarity.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/88317.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have created a face clustering task by calling the [CreateFigureClusteringTask](https://help.aliyun.com/document_detail/478180.html) operation to cluster all faces in the dataset.
 *
 * @param request SearchImageFigureClusterRequest
 * @return SearchImageFigureClusterResponse
 */
async function searchImageFigureCluster(request: SearchImageFigureClusterRequest): SearchImageFigureClusterResponse {
  var runtime = new $RuntimeOptions{};
  return searchImageFigureClusterWithOptions(request, runtime);
}

model SemanticQueryRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='immDatatest'),
  maxResults?: int32(name='MaxResults', description='The maximum number of entries to return. Valid values: 1 to 1000.', example='10'),
  mediaTypes?: [ string ](name='MediaTypes', description='The types of the media that you want to query. Default value:

["image"]'),
  nextToken?: string(name='NextToken', description='This parameter is no longer available.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
  query?: string(name='Query', description='The content of the query that you input.

This parameter is required.'),
  withFields?: [ string ](name='WithFields', description='The fields that you want to include in the response. Including only necessary metadata fields can help reduce the size of the response.

If you do not specify this parameter or set the value to null, all existing metadata fields are returned.'),
}

model SemanticQueryShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='immDatatest'),
  maxResults?: int32(name='MaxResults', description='The maximum number of entries to return. Valid values: 1 to 1000.', example='10'),
  mediaTypesShrink?: string(name='MediaTypes', description='The types of the media that you want to query. Default value:

["image"]'),
  nextToken?: string(name='NextToken', description='This parameter is no longer available.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
  query?: string(name='Query', description='The content of the query that you input.

This parameter is required.'),
  withFieldsShrink?: string(name='WithFields', description='The fields that you want to include in the response. Including only necessary metadata fields can help reduce the size of the response.

If you do not specify this parameter or set the value to null, all existing metadata fields are returned.'),
}

model SemanticQueryResponseBody = {
  files?: [
    File
  ](name='Files', description='The files.'),
  requestId?: string(name='RequestId', description='The request ID.', example='2C5C1E0F-D8B8-4DA0-8127-EC32C771****'),
}

model SemanticQueryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: SemanticQueryResponseBody(name='body'),
}

/**
 * @summary Queries metadata in a dataset by inputting natural language.
 *
 * @description ### [](#)Precautions
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**** Each time you call this operation, you are charged for semantic understanding and query fees.
 * *   Before you call this operation, make sure that the file that you want to use is indexed into the dataset that you use. To index a file into a dataset, you can call one of the following operations: [CreateBinding](https://help.aliyun.com/document_detail/478202.html), [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html), and [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html).
 * *   The response provided in this example is for reference only. The categories and content of metadata vary based on configurations of [workflow templates](https://help.aliyun.com/document_detail/466304.html). If you have questions, search for and join the DingTalk group numbered 21714099.
 * ### [](#)Usage limits
 * *   Each time you call this operation, up to 1,000 metadata files are returned.
 * *   Pagination is not supported.
 * *   The natural language processing capability may not always produce completely accurate results.
 * ### [](#)Usage methods
 * You can query files within a dataset by using natural language keywords. Key information supported for understanding includes labels (Labels.LabelName), time (ProduceTime), and location (Address.AddressLine). For example, if you use `2023 Hangzhou scenery` as the query criterion, the operation intelligently breaks the query criterion down into the following sub-criteria, and returns the files that meet all the sub-criteria:
 * *   ProduceTime: 00:00 on January 1, 2023 to 00:00 on December 31, 2023.
 * *   Address.AddressLine: `Hangzhou`
 * *   Labels.LabelName: `scenery`.
 * When you call this operation, you can configure a [workflow template](https://help.aliyun.com/document_detail/466304.html) that includes the `ImageEmbeddingExtraction` operator. This allows the operation to return image content when the query you input matches the image content, thereby achieving intelligent image retrieval.``
 *
 * @param tmpReq SemanticQueryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return SemanticQueryResponse
 */
async function semanticQueryWithOptions(tmpReq: SemanticQueryRequest, runtime: $RuntimeOptions): SemanticQueryResponse {
  tmpReq.validate();
  var request = new SemanticQueryShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.mediaTypes)) {
    request.mediaTypesShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.mediaTypes, 'MediaTypes', 'json');
  }
  if (!$isNull(tmpReq.withFields)) {
    request.withFieldsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.withFields, 'WithFields', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.mediaTypesShrink)) {
    query['MediaTypes'] = request.mediaTypesShrink;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.query)) {
    query['Query'] = request.query;
  }
  if (!$isNull(request.withFieldsShrink)) {
    query['WithFields'] = request.withFieldsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'SemanticQuery',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries metadata in a dataset by inputting natural language.
 *
 * @description ### [](#)Precautions
 * *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).**** Each time you call this operation, you are charged for semantic understanding and query fees.
 * *   Before you call this operation, make sure that the file that you want to use is indexed into the dataset that you use. To index a file into a dataset, you can call one of the following operations: [CreateBinding](https://help.aliyun.com/document_detail/478202.html), [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html), and [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html).
 * *   The response provided in this example is for reference only. The categories and content of metadata vary based on configurations of [workflow templates](https://help.aliyun.com/document_detail/466304.html). If you have questions, search for and join the DingTalk group numbered 21714099.
 * ### [](#)Usage limits
 * *   Each time you call this operation, up to 1,000 metadata files are returned.
 * *   Pagination is not supported.
 * *   The natural language processing capability may not always produce completely accurate results.
 * ### [](#)Usage methods
 * You can query files within a dataset by using natural language keywords. Key information supported for understanding includes labels (Labels.LabelName), time (ProduceTime), and location (Address.AddressLine). For example, if you use `2023 Hangzhou scenery` as the query criterion, the operation intelligently breaks the query criterion down into the following sub-criteria, and returns the files that meet all the sub-criteria:
 * *   ProduceTime: 00:00 on January 1, 2023 to 00:00 on December 31, 2023.
 * *   Address.AddressLine: `Hangzhou`
 * *   Labels.LabelName: `scenery`.
 * When you call this operation, you can configure a [workflow template](https://help.aliyun.com/document_detail/466304.html) that includes the `ImageEmbeddingExtraction` operator. This allows the operation to return image content when the query you input matches the image content, thereby achieving intelligent image retrieval.``
 *
 * @param request SemanticQueryRequest
 * @return SemanticQueryResponse
 */
async function semanticQuery(request: SemanticQueryRequest): SemanticQueryResponse {
  var runtime = new $RuntimeOptions{};
  return semanticQueryWithOptions(request, runtime);
}

model SimpleQueryRequest {
  aggregations?: [ 
    {
      field?: string(name='Field', description='The name of the field. For more information about supported fields, see [Supported fields and operators](https://help.aliyun.com/document_detail/2743991.html).', example='Size'),
      operation?: string(name='Operation', description='The operator.

Enumerated values:

*   average: calculates the average number.
*   min: finds the minimum value.
*   max: finds the maximum value.
*   count: counts the number of results.
*   distinct: counts the number of distinct results.
*   sum: calculates the sum of all matching results..
*   group: counts the number of results by group. The results are sorted by the count number in descending order.', example='sum'),
    }
  ](name='Aggregations', description='The aggregations.

>  If you perform an aggregate query, the aggregation returned in the response contains only statistical results, not the actual metadata.'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  maxResults?: int32(name='MaxResults', description='*   If the Aggregations parameter is not specified, this parameter specifies the maximum number of files that can be returned. Valid values: 1 to 100.
*   If the Aggregations parameter is specified, this parameter specifies the maximum number of aggregation groups that can be returned. Valid values: 0 to 2000.
*   If you do not specify this parameter or set the parameter to 0, the default value of 100 is used.', example='10'),
  nextToken?: string(name='NextToken', description='The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter.

The next call to the operation returns results lexicographically after the NextToken parameter value.

You do not need to specify this parameter in your initial request.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sort order. Valid values:

*   asc: sorts the results in ascending order.
*   desc: sorts the results in descending order. This is the default value.

*   You can specify multiple sort orders that are separated by commas. Example: asc,desc.

*   The number of elements in the Order parameter must be less than or equal to the number of elements in the Sort parameter. For example, if the value of the Sort parameter is Size,Filename, you can set the Order parameter to desc,asc.

*   If the number of sort orders is less than the number of sort fields, the sort fields for which no sorting orders are explicitly specified use the asc order by default. For example, if you set Sort to Size,Filename and Order to asc, the Filename field defaults to the value of asc.', example='asc,desc'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  query?: SimpleQuery(name='Query', description='The query conditions.'),
  sort?: string(name='Sort', description='The sort fields. For more information, see [Supported fields and operators](https://help.aliyun.com/document_detail/2743991.html).

> 

*   If you specify multiple sort fields, separate them with commas (,), as in Size,Filename.

*   You can specify up to five sort fields.

*   The order of the sort fields determines their precedence in the sorting process.', example='Size,Filename'),
  withFields?: [ string ](name='WithFields', description='The fields that you want to include in the response. You can use this parameter to reduce the size of the response.

If you do not specify this parameter or leave this parameter empty, the operation returns all metadata fields.'),
  withoutTotalHits?: boolean(name='WithoutTotalHits', description='Specifies whether to return the total number of hits. Valid values:

*   true
*   false', nullable=true),
}

model SimpleQueryShrinkRequest {
  aggregationsShrink?: string(name='Aggregations', description='The aggregations.

>  If you perform an aggregate query, the aggregation returned in the response contains only statistical results, not the actual metadata.'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  maxResults?: int32(name='MaxResults', description='*   If the Aggregations parameter is not specified, this parameter specifies the maximum number of files that can be returned. Valid values: 1 to 100.
*   If the Aggregations parameter is specified, this parameter specifies the maximum number of aggregation groups that can be returned. Valid values: 0 to 2000.
*   If you do not specify this parameter or set the parameter to 0, the default value of 100 is used.', example='10'),
  nextToken?: string(name='NextToken', description='The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter.

The next call to the operation returns results lexicographically after the NextToken parameter value.

You do not need to specify this parameter in your initial request.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  order?: string(name='Order', description='The sort order. Valid values:

*   asc: sorts the results in ascending order.
*   desc: sorts the results in descending order. This is the default value.

*   You can specify multiple sort orders that are separated by commas. Example: asc,desc.

*   The number of elements in the Order parameter must be less than or equal to the number of elements in the Sort parameter. For example, if the value of the Sort parameter is Size,Filename, you can set the Order parameter to desc,asc.

*   If the number of sort orders is less than the number of sort fields, the sort fields for which no sorting orders are explicitly specified use the asc order by default. For example, if you set Sort to Size,Filename and Order to asc, the Filename field defaults to the value of asc.', example='asc,desc'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  queryShrink?: string(name='Query', description='The query conditions.'),
  sort?: string(name='Sort', description='The sort fields. For more information, see [Supported fields and operators](https://help.aliyun.com/document_detail/2743991.html).

> 

*   If you specify multiple sort fields, separate them with commas (,), as in Size,Filename.

*   You can specify up to five sort fields.

*   The order of the sort fields determines their precedence in the sorting process.', example='Size,Filename'),
  withFieldsShrink?: string(name='WithFields', description='The fields that you want to include in the response. You can use this parameter to reduce the size of the response.

If you do not specify this parameter or leave this parameter empty, the operation returns all metadata fields.'),
  withoutTotalHits?: boolean(name='WithoutTotalHits', description='Specifies whether to return the total number of hits. Valid values:

*   true
*   false', nullable=true),
}

model SimpleQueryResponseBody = {
  aggregations?: [ 
    {
      field?: string(name='Field', description='The name of the field.', example='Size'),
      groups?: [ 
        {
          count?: long(name='Count', description='The number of results in the grouped aggregation.', example='5'),
          value?: string(name='Value', description='The value for the grouped aggregation.', example='100'),
        }
      ](name='Groups', description='The grouped aggregations. This parameter is returned only when the group operator is specified in the Aggregations request parameter.'),
      operation?: string(name='Operation', description='The operator.', example='sum'),
      value?: double(name='Value', description='The statistical result.', example='200'),
    }
  ](name='Aggregations', description='The aggregations. This parameter is returned only when the value of the Aggregations request parameter is not empty.'),
  files?: [
    File
  ](name='Files', description='The files. This parameter is returned only when the value of the Aggregations request parameter is empty.'),
  nextToken?: string(name='NextToken', description='The pagination token is used in the next request to retrieve a new page of results if the total number of results exceeds the value of the MaxResults parameter.

It can be used in the next request to retrieve a new page of results.

If NextToken is empty, no next page exists.

This parameter is required.', example='MTIzNDU2Nzg6aW1tdGVzdDpleGFtcGxlYnVja2V0OmRhdGFzZXQwMDE6b3NzOi8vZXhhbXBsZWJ1Y2tldC9zYW1wbGVvYmplY3QxLmpwZw=='),
  requestId?: string(name='RequestId', description='The request ID.', example='2C5C1E0F-D8B8-4DA0-8127-EC32C771****'),
  totalHits?: long(name='TotalHits', description='The number of total hits.', example='10'),
}

model SimpleQueryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: SimpleQueryResponseBody(name='body'),
}

/**
 * @summary Queries files in a dataset by performing a simple query operation. The operation supports logical expressions.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, join the DingTalk chat group (ID: 31690030817) and share your questions with us.
 * **Limits**
 * *   Each query returns information about up to 100 files.
 * *   Each query returns up to 2,000 aggregations.
 * *   A subquery supports up to 100 conditions.
 * *   A subquery can have a maximum nesting depth of 5 levels.
 * **Example query conditions**
 * *   Retrieve JPEG images larger than 1,000 pixels:
 * <!---->
 *     {
 *       "SubQueries":[
 *         {
 *           "Field":"ContentType",
 *           "Value": "image/jpeg",
 *           "Operation":"eq"
 *         },         
 *         {
 *           "Field":"ImageWidth",
 *           "Value":"1000",
 *           "Operation":"gt"
 *         }
 *       ],
 *       "Operation":"and"
 *     }
 * *   Search `oss://examplebucket/path/` for objects that have the `TV` or `Stereo` label and are larger than 10 MB in size:
 * >  This query requires matching files to have the `TV` or `Stereo` label. The two labels are specified as separate objects in the `Labels` fields.
 * ```
 * {
 *   "SubQueries": [
 *     {
 *       "Field": "URI",
 *       "Value": "oss://examplebucket/path/",
 *       "Operation": "prefix"
 *     },
 *     {
 *       "Field": "Size",
 *       "Value": "1048576",
 *       "Operation": "gt"
 *     },
 *     {
 *       "SubQueries": [
 *         {
 *           "Field": "Labels.LabelName",
 *           "Value": "TV",
 *           "Operation": "eq"
 *         },
 *         {
 *           "Field": "Labels.LabelName",
 *           "Value": "Stereo",
 *           "Operation": "eq"
 *         }
 *       ],
 *       "Operation": "or"
 *     }
 *   ],
 *   "Operation": "and"
 * }
 *         
 * ```
 * *   Exclude images that contain a face of a male over the age of 36:
 * >  In this example query, an image will be excluded from the query results if it contains a face of a male over the age of 36. This query is different from excluding an image that contains a male face or a face of a person over the age of 36. In this query, you need to use the `nested` operator to specify that the conditions are met on the same element.
 *     {
 *     	"Operation": "not",
 *     	"SubQueries": [{
 *     		"Operation": "nested",
 *     		"SubQueries": [{
 *     			"Operation": "and",
 *     			"SubQueries": [{
 *     				"Field": "Figures.Age",
 *     				"Operation": "gt",
 *     				"Value": "36"
 *     			}, {
 *     				"Field": "Figures.Gender",
 *     				"Operation": "eq",
 *     				"Value": "male"
 *     			}]
 *     		}]
 *     	}]
 *     }
 * *   Query JPEG images that have both custom labels and system labels:
 * <!---->
 *     {
 *       "SubQueries":[
 *         {
 *           "Field":"ContentType",
 *           "Value": "image/jpeg",
 *           "Operation":"eq"
 *         },         
 *         {
 *           "Field":"CustomLabels.test",
 *           "Operation":"exist"
 *         },         
 *         {
 *           "Field":"Labels.LabelName",
 *           "Operation":"exist"
 *         }
 *       ],
 *       "Operation":"and"
 *     }
 * You can also perform aggregate operations to collect and analyze different data based on the specified conditions. For example, you can calculate the sum, count, average value, or maximum value of all files that meet the query conditions. You can also calculate the size distribution of images that meet the query conditions.
 *
 * @param tmpReq SimpleQueryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return SimpleQueryResponse
 */
async function simpleQueryWithOptions(tmpReq: SimpleQueryRequest, runtime: $RuntimeOptions): SimpleQueryResponse {
  tmpReq.validate();
  var request = new SimpleQueryShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.aggregations)) {
    request.aggregationsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.aggregations, 'Aggregations', 'json');
  }
  if (!$isNull(tmpReq.query)) {
    request.queryShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.query, 'Query', 'json');
  }
  if (!$isNull(tmpReq.withFields)) {
    request.withFieldsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.withFields, 'WithFields', 'json');
  }
  var query = {};
  if (!$isNull(request.aggregationsShrink)) {
    query['Aggregations'] = request.aggregationsShrink;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.maxResults)) {
    query['MaxResults'] = request.maxResults;
  }
  if (!$isNull(request.nextToken)) {
    query['NextToken'] = request.nextToken;
  }
  if (!$isNull(request.order)) {
    query['Order'] = request.order;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.queryShrink)) {
    query['Query'] = request.queryShrink;
  }
  if (!$isNull(request.sort)) {
    query['Sort'] = request.sort;
  }
  if (!$isNull(request.withFieldsShrink)) {
    query['WithFields'] = request.withFieldsShrink;
  }
  if (!$isNull(request.withoutTotalHits)) {
    query['WithoutTotalHits'] = request.withoutTotalHits;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'SimpleQuery',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Queries files in a dataset by performing a simple query operation. The operation supports logical expressions.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have indexed file metadata into the dataset automatically by calling the [CreateBinding](https://help.aliyun.com/document_detail/478202.html) operation or manually by calling the [IndexFileMeta](https://help.aliyun.com/document_detail/478166.html) or [BatchIndexFileMeta](https://help.aliyun.com/document_detail/478167.html) operation.
 * *   The sample response is provided for reference only. The metadata type and content in your response may differ based on factors such as the [workflow template configurations](https://help.aliyun.com/document_detail/466304.html). For any inquiries, join the DingTalk chat group (ID: 31690030817) and share your questions with us.
 * **Limits**
 * *   Each query returns information about up to 100 files.
 * *   Each query returns up to 2,000 aggregations.
 * *   A subquery supports up to 100 conditions.
 * *   A subquery can have a maximum nesting depth of 5 levels.
 * **Example query conditions**
 * *   Retrieve JPEG images larger than 1,000 pixels:
 * <!---->
 *     {
 *       "SubQueries":[
 *         {
 *           "Field":"ContentType",
 *           "Value": "image/jpeg",
 *           "Operation":"eq"
 *         },         
 *         {
 *           "Field":"ImageWidth",
 *           "Value":"1000",
 *           "Operation":"gt"
 *         }
 *       ],
 *       "Operation":"and"
 *     }
 * *   Search `oss://examplebucket/path/` for objects that have the `TV` or `Stereo` label and are larger than 10 MB in size:
 * >  This query requires matching files to have the `TV` or `Stereo` label. The two labels are specified as separate objects in the `Labels` fields.
 * ```
 * {
 *   "SubQueries": [
 *     {
 *       "Field": "URI",
 *       "Value": "oss://examplebucket/path/",
 *       "Operation": "prefix"
 *     },
 *     {
 *       "Field": "Size",
 *       "Value": "1048576",
 *       "Operation": "gt"
 *     },
 *     {
 *       "SubQueries": [
 *         {
 *           "Field": "Labels.LabelName",
 *           "Value": "TV",
 *           "Operation": "eq"
 *         },
 *         {
 *           "Field": "Labels.LabelName",
 *           "Value": "Stereo",
 *           "Operation": "eq"
 *         }
 *       ],
 *       "Operation": "or"
 *     }
 *   ],
 *   "Operation": "and"
 * }
 *         
 * ```
 * *   Exclude images that contain a face of a male over the age of 36:
 * >  In this example query, an image will be excluded from the query results if it contains a face of a male over the age of 36. This query is different from excluding an image that contains a male face or a face of a person over the age of 36. In this query, you need to use the `nested` operator to specify that the conditions are met on the same element.
 *     {
 *     	"Operation": "not",
 *     	"SubQueries": [{
 *     		"Operation": "nested",
 *     		"SubQueries": [{
 *     			"Operation": "and",
 *     			"SubQueries": [{
 *     				"Field": "Figures.Age",
 *     				"Operation": "gt",
 *     				"Value": "36"
 *     			}, {
 *     				"Field": "Figures.Gender",
 *     				"Operation": "eq",
 *     				"Value": "male"
 *     			}]
 *     		}]
 *     	}]
 *     }
 * *   Query JPEG images that have both custom labels and system labels:
 * <!---->
 *     {
 *       "SubQueries":[
 *         {
 *           "Field":"ContentType",
 *           "Value": "image/jpeg",
 *           "Operation":"eq"
 *         },         
 *         {
 *           "Field":"CustomLabels.test",
 *           "Operation":"exist"
 *         },         
 *         {
 *           "Field":"Labels.LabelName",
 *           "Operation":"exist"
 *         }
 *       ],
 *       "Operation":"and"
 *     }
 * You can also perform aggregate operations to collect and analyze different data based on the specified conditions. For example, you can calculate the sum, count, average value, or maximum value of all files that meet the query conditions. You can also calculate the size distribution of images that meet the query conditions.
 *
 * @param request SimpleQueryRequest
 * @return SimpleQueryResponse
 */
async function simpleQuery(request: SimpleQueryRequest): SimpleQueryResponse {
  var runtime = new $RuntimeOptions{};
  return simpleQueryWithOptions(request, runtime);
}

model SuspendBatchRequest {
  id?: string(name='Id', description='The ID of the batch processing task. You can obtain the ID of the batch processing task from the response of the [CreateBatch](https://help.aliyun.com/document_detail/606694.html) operation.

This parameter is required.', example='batch-4eb9223f-3e88-42d3-a578-3f2852******'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
}

model SuspendBatchResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='EC564A9A-BA5C-4499-A087-D9B9E76E*****'),
}

model SuspendBatchResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: SuspendBatchResponseBody(name='body'),
}

/**
 * @summary Suspends a batch processing task.
 *
 * @description You can suspend a batch processing task that is in the Running state. You can call the [ResumeBatch](https://help.aliyun.com/document_detail/479914.html) operation to resume a batch processing task that is suspended.
 *
 * @param request SuspendBatchRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return SuspendBatchResponse
 */
async function suspendBatchWithOptions(request: SuspendBatchRequest, runtime: $RuntimeOptions): SuspendBatchResponse {
  request.validate();
  var body : map[string]any = {};
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'SuspendBatch',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Suspends a batch processing task.
 *
 * @description You can suspend a batch processing task that is in the Running state. You can call the [ResumeBatch](https://help.aliyun.com/document_detail/479914.html) operation to resume a batch processing task that is suspended.
 *
 * @param request SuspendBatchRequest
 * @return SuspendBatchResponse
 */
async function suspendBatch(request: SuspendBatchRequest): SuspendBatchResponse {
  var runtime = new $RuntimeOptions{};
  return suspendBatchWithOptions(request, runtime);
}

model SuspendTriggerRequest {
  id?: string(name='Id', description='The ID of the trigger.[](~~479912~~)

This parameter is required.', example='trigger-9f72636a-0f0c-4baf-ae78-38b27b******'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
}

model SuspendTriggerResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='0BC1F0C9-8E99-46C6-B502-10DED******'),
}

model SuspendTriggerResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: SuspendTriggerResponseBody(name='body'),
}

/**
 * @summary Suspends a running trigger.
 *
 * @description The operation can be used to suspend a trigger only in the Running state. If you want to resume a suspended trigger, call the [ResumeTrigger](https://help.aliyun.com/document_detail/479919.html) operation.
 *
 * @param request SuspendTriggerRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return SuspendTriggerResponse
 */
async function suspendTriggerWithOptions(request: SuspendTriggerRequest, runtime: $RuntimeOptions): SuspendTriggerResponse {
  request.validate();
  var body : map[string]any = {};
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'SuspendTrigger',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Suspends a running trigger.
 *
 * @description The operation can be used to suspend a trigger only in the Running state. If you want to resume a suspended trigger, call the [ResumeTrigger](https://help.aliyun.com/document_detail/479919.html) operation.
 *
 * @param request SuspendTriggerRequest
 * @return SuspendTriggerResponse
 */
async function suspendTrigger(request: SuspendTriggerRequest): SuspendTriggerResponse {
  var runtime = new $RuntimeOptions{};
  return suspendTriggerWithOptions(request, runtime);
}

model UpdateBatchRequest {
  actions?: [ 
    {
      name?: string(name='Name', description='The name of the template.', example='doc/convert'),
      parameters?: [ string ](name='Parameters', description='The template parameters.'),
    }
  ](name='Actions', description='The processing templates.'),
  id?: string(name='Id', description='The ID of the batch processing task. You can obtain the ID of the batch processing task from the response of the [CreateBatch](https://help.aliyun.com/document_detail/606694.html) operation.

This parameter is required.', example='batch-4eb9223f-3e88-42d3-a578-3f2852******'),
  input?: Input(name='Input', description='The input data source.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"key":"val"}'),
}

model UpdateBatchShrinkRequest {
  actionsShrink?: string(name='Actions', description='The processing templates.'),
  id?: string(name='Id', description='The ID of the batch processing task. You can obtain the ID of the batch processing task from the response of the [CreateBatch](https://help.aliyun.com/document_detail/606694.html) operation.

This parameter is required.', example='batch-4eb9223f-3e88-42d3-a578-3f2852******'),
  inputShrink?: string(name='Input', description='The input data source.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"key":"val"}'),
}

model UpdateBatchResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='CB4D73A3-BAF4-4A9D-A631-15F219AF****'),
}

model UpdateBatchResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateBatchResponseBody(name='body'),
}

/**
 * @summary Updates information about a batch processing task, including the input data source, data processing settings, and tags.
 *
 * @description *   You can update only a batch processing task that is in the Ready or Failed state. The update operation does not change the status of the batch processing task.
 * *   If you update a batch processing task that is in progress, the task is not automatically resumed after the update is complete. You must call the [ResumeBatch](https://help.aliyun.com/document_detail/479914.html) operation to resume the task.
 *
 * @param tmpReq UpdateBatchRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateBatchResponse
 */
async function updateBatchWithOptions(tmpReq: UpdateBatchRequest, runtime: $RuntimeOptions): UpdateBatchResponse {
  tmpReq.validate();
  var request = new UpdateBatchShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.actions)) {
    request.actionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.actions, 'Actions', 'json');
  }
  if (!$isNull(tmpReq.input)) {
    request.inputShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.input, 'Input', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.actionsShrink)) {
    body['Actions'] = request.actionsShrink;
  }
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.inputShrink)) {
    body['Input'] = request.inputShrink;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.tagsShrink)) {
    body['Tags'] = request.tagsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateBatch',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates information about a batch processing task, including the input data source, data processing settings, and tags.
 *
 * @description *   You can update only a batch processing task that is in the Ready or Failed state. The update operation does not change the status of the batch processing task.
 * *   If you update a batch processing task that is in progress, the task is not automatically resumed after the update is complete. You must call the [ResumeBatch](https://help.aliyun.com/document_detail/479914.html) operation to resume the task.
 *
 * @param request UpdateBatchRequest
 * @return UpdateBatchResponse
 */
async function updateBatch(request: UpdateBatchRequest): UpdateBatchResponse {
  var runtime = new $RuntimeOptions{};
  return updateBatchWithOptions(request, runtime);
}

model UpdateDatasetRequest {
  datasetMaxBindCount?: long(name='DatasetMaxBindCount', example='10'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount', example='10000000000'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount', example='100000000'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount', example='100000000000'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize', example='90000000000000000'),
  datasetName?: string(name='DatasetName', description='This parameter is required.', example='test-dataset'),
  description?: string(name='Description', example='immtest'),
  projectName?: string(name='ProjectName', description='This parameter is required.', example='test-project'),
  templateId?: string(name='TemplateId', example='Official:AllFunction'),
}

model UpdateDatasetResponseBody = {
  dataset?: Dataset(name='Dataset', description='The dataset.'),
  requestId?: string(name='RequestId', example='45234D4A-A3E3-4B23-AACA-8D897514****'),
}

model UpdateDatasetResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateDatasetResponseBody(name='body'),
}

/**
 * @summary Updates a dataset.
 *
 * @param request UpdateDatasetRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateDatasetResponse
 */
async function updateDatasetWithOptions(request: UpdateDatasetRequest, runtime: $RuntimeOptions): UpdateDatasetResponse {
  request.validate();
  var query = {};
  if (!$isNull(request.datasetMaxBindCount)) {
    query['DatasetMaxBindCount'] = request.datasetMaxBindCount;
  }
  if (!$isNull(request.datasetMaxEntityCount)) {
    query['DatasetMaxEntityCount'] = request.datasetMaxEntityCount;
  }
  if (!$isNull(request.datasetMaxFileCount)) {
    query['DatasetMaxFileCount'] = request.datasetMaxFileCount;
  }
  if (!$isNull(request.datasetMaxRelationCount)) {
    query['DatasetMaxRelationCount'] = request.datasetMaxRelationCount;
  }
  if (!$isNull(request.datasetMaxTotalFileSize)) {
    query['DatasetMaxTotalFileSize'] = request.datasetMaxTotalFileSize;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.description)) {
    query['Description'] = request.description;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.templateId)) {
    query['TemplateId'] = request.templateId;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateDataset',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates a dataset.
 *
 * @param request UpdateDatasetRequest
 * @return UpdateDatasetResponse
 */
async function updateDataset(request: UpdateDatasetRequest): UpdateDatasetResponse {
  var runtime = new $RuntimeOptions{};
  return updateDatasetWithOptions(request, runtime);
}

model UpdateFigureClusterRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  figureCluster?: FigureClusterForReq(name='FigureCluster', description='The information about the cluster.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
}

model UpdateFigureClusterShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  figureClusterShrink?: string(name='FigureCluster', description='The information about the cluster.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
}

model UpdateFigureClusterResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='5F74C5C9-5AC0-49F9-914D-E01589D3****'),
}

model UpdateFigureClusterResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateFigureClusterResponseBody(name='body'),
}

/**
 * @summary Updates information about a face cluster, such as the cluster name and labels.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have called the [CreateFigureClusteringTask](https://help.aliyun.com/document_detail/478180.html) operation to cluster all faces in the dataset.
 * *   The operation updates only the cover image, cluster name, and tags.
 * *   After the operation is successful, you can call the [GetFigureCluster](https://help.aliyun.com/document_detail/478182.html) or [BatchGetFigureCluster](https://help.aliyun.com/document_detail/2248450.html) operation to query the updated cluster.
 *
 * @param tmpReq UpdateFigureClusterRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateFigureClusterResponse
 */
async function updateFigureClusterWithOptions(tmpReq: UpdateFigureClusterRequest, runtime: $RuntimeOptions): UpdateFigureClusterResponse {
  tmpReq.validate();
  var request = new UpdateFigureClusterShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.figureCluster)) {
    request.figureClusterShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.figureCluster, 'FigureCluster', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.figureClusterShrink)) {
    query['FigureCluster'] = request.figureClusterShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateFigureCluster',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates information about a face cluster, such as the cluster name and labels.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have called the [CreateFigureClusteringTask](https://help.aliyun.com/document_detail/478180.html) operation to cluster all faces in the dataset.
 * *   The operation updates only the cover image, cluster name, and tags.
 * *   After the operation is successful, you can call the [GetFigureCluster](https://help.aliyun.com/document_detail/478182.html) or [BatchGetFigureCluster](https://help.aliyun.com/document_detail/2248450.html) operation to query the updated cluster.
 *
 * @param request UpdateFigureClusterRequest
 * @return UpdateFigureClusterResponse
 */
async function updateFigureCluster(request: UpdateFigureClusterRequest): UpdateFigureClusterResponse {
  var runtime = new $RuntimeOptions{};
  return updateFigureClusterWithOptions(request, runtime);
}

model UpdateFileMetaRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  file?: InputFile(name='File', description='The file and its metadata items to be updated. The value must be in the JSON format.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
}

model UpdateFileMetaShrinkRequest {
  datasetName?: string(name='DatasetName', description='The name of the dataset. You can obtain the name of the dataset from the response of the [CreateDataset](https://help.aliyun.com/document_detail/478160.html) operation.

This parameter is required.', example='test-dataset'),
  fileShrink?: string(name='File', description='The file and its metadata items to be updated. The value must be in the JSON format.

This parameter is required.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
}

model UpdateFileMetaResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='6D53E6C9-5AC0-48F9-825F-D02678E3****'),
}

model UpdateFileMetaResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateFileMetaResponseBody(name='body'),
}

/**
 * @summary Updates the partial metadata of the indexed files in a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   You cannot call this operation to update all metadata. You can update only metadata specified by CustomLabels, CustomId, and Figures. For more information, see the "Request parameters" section of this topic.
 *
 * @param tmpReq UpdateFileMetaRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateFileMetaResponse
 */
async function updateFileMetaWithOptions(tmpReq: UpdateFileMetaRequest, runtime: $RuntimeOptions): UpdateFileMetaResponse {
  tmpReq.validate();
  var request = new UpdateFileMetaShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.file)) {
    request.fileShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.file, 'File', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.fileShrink)) {
    query['File'] = request.fileShrink;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateFileMeta',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates the partial metadata of the indexed files in a dataset.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   You cannot call this operation to update all metadata. You can update only metadata specified by CustomLabels, CustomId, and Figures. For more information, see the "Request parameters" section of this topic.
 *
 * @param request UpdateFileMetaRequest
 * @return UpdateFileMetaResponse
 */
async function updateFileMeta(request: UpdateFileMetaRequest): UpdateFileMetaResponse {
  var runtime = new $RuntimeOptions{};
  return updateFileMetaWithOptions(request, runtime);
}

model UpdateLocationDateClusterRequest {
  customId?: string(name='CustomId', description='The custom ID of the cluster. When the cluster is indexed into the dataset, the custom ID is stored as the data attribute. You can map the custom ID to other data in your business system. For example, you can pass the custom ID to map a URI to an ID. We recommend that you specify a globally unique value. The value can be up to 1,024 bytes in size.', example='member-id-0001'),
  customLabels?: map[string]any(name='CustomLabels', description='The custom labels. The parameter stores custom key-value labels, which can be used to filter data. You can specify up to 100 custom labels for a cluster.', example='{
      "UserScore": "5"
}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  objectId?: string(name='ObjectId', description='The ID of the cluster that you want to update.

This parameter is required.', example='location-date-cluster-71dd4f32-9597-4085-a2ab-3a7b0fd0aff9'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  title?: string(name='Title', description='The name of the cluster. The name can be used to search for the cluster. The value can be up to 1,024 bytes in size.'),
}

model UpdateLocationDateClusterShrinkRequest {
  customId?: string(name='CustomId', description='The custom ID of the cluster. When the cluster is indexed into the dataset, the custom ID is stored as the data attribute. You can map the custom ID to other data in your business system. For example, you can pass the custom ID to map a URI to an ID. We recommend that you specify a globally unique value. The value can be up to 1,024 bytes in size.', example='member-id-0001'),
  customLabelsShrink?: string(name='CustomLabels', description='The custom labels. The parameter stores custom key-value labels, which can be used to filter data. You can specify up to 100 custom labels for a cluster.', example='{
      "UserScore": "5"
}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.[](~~478160~~)

This parameter is required.', example='test-dataset'),
  objectId?: string(name='ObjectId', description='The ID of the cluster that you want to update.

This parameter is required.', example='location-date-cluster-71dd4f32-9597-4085-a2ab-3a7b0fd0aff9'),
  projectName?: string(name='ProjectName', description='The name of the project.[](~~478153~~)

This parameter is required.', example='test-project'),
  title?: string(name='Title', description='The name of the cluster. The name can be used to search for the cluster. The value can be up to 1,024 bytes in size.'),
}

model UpdateLocationDateClusterResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='52B017A8-FEF5-0A61-BAEE-234A8AD8****'),
}

model UpdateLocationDateClusterResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateLocationDateClusterResponseBody(name='body'),
}

/**
 * @summary Updates a spatiotemporal cluster.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have called the [CreateLocationDateClusteringTask](https://help.aliyun.com/document_detail/478188.html) operation to create spatiotemporal clusters in the project.
 *
 * @param tmpReq UpdateLocationDateClusterRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateLocationDateClusterResponse
 */
async function updateLocationDateClusterWithOptions(tmpReq: UpdateLocationDateClusterRequest, runtime: $RuntimeOptions): UpdateLocationDateClusterResponse {
  tmpReq.validate();
  var request = new UpdateLocationDateClusterShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.customLabels)) {
    request.customLabelsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.customLabels, 'CustomLabels', 'json');
  }
  var query = {};
  if (!$isNull(request.customId)) {
    query['CustomId'] = request.customId;
  }
  if (!$isNull(request.customLabelsShrink)) {
    query['CustomLabels'] = request.customLabelsShrink;
  }
  if (!$isNull(request.datasetName)) {
    query['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.objectId)) {
    query['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.title)) {
    query['Title'] = request.title;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateLocationDateCluster',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates a spatiotemporal cluster.
 *
 * @description *   Before you call this operation, make sure that you are familiar with the [billing](https://help.aliyun.com/document_detail/477042.html) of Intelligent Media Management (IMM).****
 * *   Before you call this operation, make sure that you have called the [CreateLocationDateClusteringTask](https://help.aliyun.com/document_detail/478188.html) operation to create spatiotemporal clusters in the project.
 *
 * @param request UpdateLocationDateClusterRequest
 * @return UpdateLocationDateClusterResponse
 */
async function updateLocationDateCluster(request: UpdateLocationDateClusterRequest): UpdateLocationDateClusterResponse {
  var runtime = new $RuntimeOptions{};
  return updateLocationDateClusterWithOptions(request, runtime);
}

model UpdateProjectRequest {
  datasetMaxBindCount?: long(name='DatasetMaxBindCount', description='The maximum number of bindings for each dataset. Valid values: 1 to 10.', example='10'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount', description='The maximum number of metadata entities in each dataset.

>  This is a precautionary setting that does not impose practical limitations.', example='10000000000'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount', description='The maximum number of files in each dataset. Valid values: 1 to 100000000.', example='100000000'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount', description='The maximum number of metadata relationships in a dataset.

>  This is a precautionary setting that does not impose practical limitations.', example='100000000000'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize', description='The maximum size of files in each dataset. If the maximum size is exceeded, indexes can no longer be added. Unit: bytes.', example='90000000000000000'),
  description?: string(name='Description', description='The description of the project. The description must be 1 to 256 characters in length.', example='immtest'),
  projectMaxDatasetCount?: long(name='ProjectMaxDatasetCount', description='The maximum number of datasets in the project. Valid values: 1 to 1000000000.', example='1000000000'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The name of the Resource Access Management (RAM) role. You must grant the RAM role to Intelligent Media Management (IMM) before IMM can access other cloud resources such as Object Storage Service (OSS).

You can also create a custom service role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Create a regular service role](https://help.aliyun.com/document_detail/116800.html) and [Grant permissions to a role](https://help.aliyun.com/document_detail/116147.html).', example='AliyunIMMDefaultRole'),
  tag?: [ 
    {
      key?: string(name='Key', description='The tag key.', example='TestKey'),
      value?: string(name='Value', description='The tag value.', example='TestValue'),
    }
  ](name='Tag', description='The tags.'),
  templateId?: string(name='TemplateId', description='The ID of the workflow template. For more information, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).', example='AliyunIMMDefaultRole'),
}

model UpdateProjectShrinkRequest {
  datasetMaxBindCount?: long(name='DatasetMaxBindCount', description='The maximum number of bindings for each dataset. Valid values: 1 to 10.', example='10'),
  datasetMaxEntityCount?: long(name='DatasetMaxEntityCount', description='The maximum number of metadata entities in each dataset.

>  This is a precautionary setting that does not impose practical limitations.', example='10000000000'),
  datasetMaxFileCount?: long(name='DatasetMaxFileCount', description='The maximum number of files in each dataset. Valid values: 1 to 100000000.', example='100000000'),
  datasetMaxRelationCount?: long(name='DatasetMaxRelationCount', description='The maximum number of metadata relationships in a dataset.

>  This is a precautionary setting that does not impose practical limitations.', example='100000000000'),
  datasetMaxTotalFileSize?: long(name='DatasetMaxTotalFileSize', description='The maximum size of files in each dataset. If the maximum size is exceeded, indexes can no longer be added. Unit: bytes.', example='90000000000000000'),
  description?: string(name='Description', description='The description of the project. The description must be 1 to 256 characters in length.', example='immtest'),
  projectMaxDatasetCount?: long(name='ProjectMaxDatasetCount', description='The maximum number of datasets in the project. Valid values: 1 to 1000000000.', example='1000000000'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  serviceRole?: string(name='ServiceRole', description='The name of the Resource Access Management (RAM) role. You must grant the RAM role to Intelligent Media Management (IMM) before IMM can access other cloud resources such as Object Storage Service (OSS).

You can also create a custom service role in the RAM console and grant the required permissions to the role based on your business requirements. For more information, see [Create a regular service role](https://help.aliyun.com/document_detail/116800.html) and [Grant permissions to a role](https://help.aliyun.com/document_detail/116147.html).', example='AliyunIMMDefaultRole'),
  tagShrink?: string(name='Tag', description='The tags.'),
  templateId?: string(name='TemplateId', description='The ID of the workflow template. For more information, see [Workflow templates and operators](https://help.aliyun.com/document_detail/466304.html).', example='AliyunIMMDefaultRole'),
}

model UpdateProjectResponseBody = {
  project?: Project(name='Project', description='The project.'),
  requestId?: string(name='RequestId', description='The request ID.', example='D33C3574-4093-448E-86E7-15BE2BD3****'),
}

model UpdateProjectResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateProjectResponseBody(name='body'),
}

/**
 * @summary Updates information about a project.
 *
 * @description *   Before you call this operation, make sure that the project exists. For information about how to create a project, see "CreateProject".
 * *   When you call this operation, you need to specify only the parameters that you want to update. The parameters that you do not specify remain unchanged after you call this operation.
 * *   Wait for up to 5 minutes for the update to take effect.
 *
 * @param tmpReq UpdateProjectRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateProjectResponse
 */
async function updateProjectWithOptions(tmpReq: UpdateProjectRequest, runtime: $RuntimeOptions): UpdateProjectResponse {
  tmpReq.validate();
  var request = new UpdateProjectShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.tag)) {
    request.tagShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tag, 'Tag', 'json');
  }
  var query = {};
  if (!$isNull(request.datasetMaxBindCount)) {
    query['DatasetMaxBindCount'] = request.datasetMaxBindCount;
  }
  if (!$isNull(request.datasetMaxEntityCount)) {
    query['DatasetMaxEntityCount'] = request.datasetMaxEntityCount;
  }
  if (!$isNull(request.datasetMaxFileCount)) {
    query['DatasetMaxFileCount'] = request.datasetMaxFileCount;
  }
  if (!$isNull(request.datasetMaxRelationCount)) {
    query['DatasetMaxRelationCount'] = request.datasetMaxRelationCount;
  }
  if (!$isNull(request.datasetMaxTotalFileSize)) {
    query['DatasetMaxTotalFileSize'] = request.datasetMaxTotalFileSize;
  }
  if (!$isNull(request.description)) {
    query['Description'] = request.description;
  }
  if (!$isNull(request.projectMaxDatasetCount)) {
    query['ProjectMaxDatasetCount'] = request.projectMaxDatasetCount;
  }
  if (!$isNull(request.projectName)) {
    query['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.serviceRole)) {
    query['ServiceRole'] = request.serviceRole;
  }
  if (!$isNull(request.tagShrink)) {
    query['Tag'] = request.tagShrink;
  }
  if (!$isNull(request.templateId)) {
    query['TemplateId'] = request.templateId;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    query = OpenApiUtil.query(query),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateProject',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates information about a project.
 *
 * @description *   Before you call this operation, make sure that the project exists. For information about how to create a project, see "CreateProject".
 * *   When you call this operation, you need to specify only the parameters that you want to update. The parameters that you do not specify remain unchanged after you call this operation.
 * *   Wait for up to 5 minutes for the update to take effect.
 *
 * @param request UpdateProjectRequest
 * @return UpdateProjectResponse
 */
async function updateProject(request: UpdateProjectRequest): UpdateProjectResponse {
  var runtime = new $RuntimeOptions{};
  return updateProjectWithOptions(request, runtime);
}

model UpdateStoryRequest {
  cover?: {
    URI?: string(name='URI', description='The URI of the cover image.

Specify the OSS URI in the oss://${Bucket}/${Object} format, where `${Bucket}` is the name of the bucket in the same region as the current project and `${Object}` is the path of the object with the extension included.', example='oss://bucket1/object'),
  }(name='Cover', description='The cover image of the story.'),
  customId?: string(name='CustomId', description='The custom ID.', example='test'),
  customLabels?: map[string]any(name='CustomLabels', description='The custom tags. You can specify up to 100 custom tags.', example='{"key": "value"}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='testdata'),
  objectId?: string(name='ObjectId', description='The ID of the story.

This parameter is required.', example='testid'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
  storyName?: string(name='StoryName', description='The name of the story.', example='newstory'),
}

model UpdateStoryShrinkRequest {
  coverShrink?: string(name='Cover', description='The cover image of the story.'),
  customId?: string(name='CustomId', description='The custom ID.', example='test'),
  customLabelsShrink?: string(name='CustomLabels', description='The custom tags. You can specify up to 100 custom tags.', example='{"key": "value"}'),
  datasetName?: string(name='DatasetName', description='The name of the dataset.

This parameter is required.', example='testdata'),
  objectId?: string(name='ObjectId', description='The ID of the story.

This parameter is required.', example='testid'),
  projectName?: string(name='ProjectName', description='The name of the project.

This parameter is required.', example='immtest'),
  storyName?: string(name='StoryName', description='The name of the story.', example='newstory'),
}

model UpdateStoryResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='6E93D6C9-5AC0-49F9-914D-E02678D3****'),
}

model UpdateStoryResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateStoryResponseBody(name='body'),
}

/**
 * @summary Updates the information about a story, such as the story name and cover image.
 *
 * @param tmpReq UpdateStoryRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateStoryResponse
 */
async function updateStoryWithOptions(tmpReq: UpdateStoryRequest, runtime: $RuntimeOptions): UpdateStoryResponse {
  tmpReq.validate();
  var request = new UpdateStoryShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.cover)) {
    request.coverShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.cover, 'Cover', 'json');
  }
  if (!$isNull(tmpReq.customLabels)) {
    request.customLabelsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.customLabels, 'CustomLabels', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.coverShrink)) {
    body['Cover'] = request.coverShrink;
  }
  if (!$isNull(request.customId)) {
    body['CustomId'] = request.customId;
  }
  if (!$isNull(request.customLabelsShrink)) {
    body['CustomLabels'] = request.customLabelsShrink;
  }
  if (!$isNull(request.datasetName)) {
    body['DatasetName'] = request.datasetName;
  }
  if (!$isNull(request.objectId)) {
    body['ObjectId'] = request.objectId;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.storyName)) {
    body['StoryName'] = request.storyName;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateStory',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates the information about a story, such as the story name and cover image.
 *
 * @param request UpdateStoryRequest
 * @return UpdateStoryResponse
 */
async function updateStory(request: UpdateStoryRequest): UpdateStoryResponse {
  var runtime = new $RuntimeOptions{};
  return updateStoryWithOptions(request, runtime);
}

model UpdateTriggerRequest {
  actions?: [ 
    {
      name?: string(name='Name', description='The template name.', example='doc/convert'),
      parameters?: [ string ](name='Parameters', description='The template parameters.'),
    }
  ](name='Actions', description='The processing templates.'),
  id?: string(name='Id', description='The ID of the trigger. You can obtain the ID of the trigger from the response of the [CreateTrigger](https://help.aliyun.com/document_detail/479912.html) operation.

This parameter is required.', example='trigger-9f72636a-0f0c-4baf-ae78-38b27b******'),
  input?: Input(name='Input', description='The data source configurations.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  tags?: map[string]any(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test": "val1"}'),
}

model UpdateTriggerShrinkRequest {
  actionsShrink?: string(name='Actions', description='The processing templates.'),
  id?: string(name='Id', description='The ID of the trigger. You can obtain the ID of the trigger from the response of the [CreateTrigger](https://help.aliyun.com/document_detail/479912.html) operation.

This parameter is required.', example='trigger-9f72636a-0f0c-4baf-ae78-38b27b******'),
  inputShrink?: string(name='Input', description='The data source configurations.'),
  projectName?: string(name='ProjectName', description='The name of the project. You can obtain the name of the project from the response of the [CreateProject](https://help.aliyun.com/document_detail/478153.html) operation.

This parameter is required.', example='test-project'),
  tagsShrink?: string(name='Tags', description='The custom tags. You can search for or filter asynchronous tasks by custom tag.', example='{"test": "val1"}'),
}

model UpdateTriggerResponseBody = {
  requestId?: string(name='RequestId', description='The request ID.', example='5A022F78-B9A8-4ACC-BB6B-B35975******'),
}

model UpdateTriggerResponse = {
  headers?: map[string]string(name='headers'),
  statusCode?: int32(name='statusCode'),
  body?: UpdateTriggerResponseBody(name='body'),
}

/**
 * @summary Updates information about a trigger, such as the input data source, data processing settings, and tags.
 *
 * @description *   You can update only a trigger that is in the Ready or Failed state. The update operation does not change the trigger status.
 * *   After you update a trigger, the uncompleted tasks under the original trigger are no longer executed. You can call the [ResumeTrigger](https://help.aliyun.com/document_detail/479916.html) operation to resume the execution of the trigger.
 *
 * @param tmpReq UpdateTriggerRequest
 * @param runtime runtime options for this request RuntimeOptions
 * @return UpdateTriggerResponse
 */
async function updateTriggerWithOptions(tmpReq: UpdateTriggerRequest, runtime: $RuntimeOptions): UpdateTriggerResponse {
  tmpReq.validate();
  var request = new UpdateTriggerShrinkRequest{};
  OpenApiUtil.convert(tmpReq, request);
  if (!$isNull(tmpReq.actions)) {
    request.actionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.actions, 'Actions', 'json');
  }
  if (!$isNull(tmpReq.input)) {
    request.inputShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.input, 'Input', 'json');
  }
  if (!$isNull(tmpReq.tags)) {
    request.tagsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.tags, 'Tags', 'json');
  }
  var body : map[string]any = {};
  if (!$isNull(request.actionsShrink)) {
    body['Actions'] = request.actionsShrink;
  }
  if (!$isNull(request.id)) {
    body['Id'] = request.id;
  }
  if (!$isNull(request.inputShrink)) {
    body['Input'] = request.inputShrink;
  }
  if (!$isNull(request.projectName)) {
    body['ProjectName'] = request.projectName;
  }
  if (!$isNull(request.tagsShrink)) {
    body['Tags'] = request.tagsShrink;
  }
  var req = new OpenApiUtil.OpenApiRequest{ 
    body = OpenApiUtil.parseToMap(body),
  };
  var params = new OpenApiUtil.Params{
    action = 'UpdateTrigger',
    version = '2020-09-30',
    protocol = 'HTTPS',
    pathname = '/',
    method = 'POST',
    authType = 'AK',
    style = 'RPC',
    reqBodyType = 'formData',
    bodyType = 'json',
  };
  return callApi(params, req, runtime);
}

/**
 * @summary Updates information about a trigger, such as the input data source, data processing settings, and tags.
 *
 * @description *   You can update only a trigger that is in the Ready or Failed state. The update operation does not change the trigger status.
 * *   After you update a trigger, the uncompleted tasks under the original trigger are no longer executed. You can call the [ResumeTrigger](https://help.aliyun.com/document_detail/479916.html) operation to resume the execution of the trigger.
 *
 * @param request UpdateTriggerRequest
 * @return UpdateTriggerResponse
 */
async function updateTrigger(request: UpdateTriggerRequest): UpdateTriggerResponse {
  var runtime = new $RuntimeOptions{};
  return updateTriggerWithOptions(request, runtime);
}

