/* eslint-disable @typescript-eslint/no-explicit-any */
/*
 *  Copyright 2021 Collate
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *  http://www.apache.org/licenses/LICENSE-2.0
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

 /**
 * Create Pipeline service entity request
 */
export interface CreatePipelineService {
    connection?: PipelineConnection;
    /**
     * Fully qualified name of the domain the Pipeline Service belongs to.
     */
    dataAssetDir?: string;
    /**
     * List of fully qualified names of data products this entity is part of.
     */
    dataProducts?: string[];
    /**
     * Description of pipeline service entity.
     */
    description?: string;
    /**
     * Display Name that identifies this pipeline service.
     */
    displayName?: string;
    /**
     * Fully qualified name of the domain the Pipeline Service belongs to.
     */
    domain?: string;
    /**
     * Life Cycle of the entity
     */
    lifeCycle?: LifeCycle;
    /**
     * Name that identifies the this entity instance uniquely
     */
    name: string;
    /**
     * Owner of this pipeline service.
     */
    owner?: EntityReference;
    /**
     * Scheduler Interval for the pipeline in cron format.
     */
    scheduleInterval?: string;
    serviceType:       PipelineServiceType;
    /**
     * Tags for this Pipeline Service.
     */
    tags?: TagLabel[];
}

/**
 * Pipeline Connection.
 */
export interface PipelineConnection {
    config?: Connection;
}

/**
 * Airflow Metadata Database Connection Config
 *
 * Glue Pipeline Connection Config
 *
 * Airbyte Metadata Database Connection Config
 *
 * Fivetran Metadata Database Connection Config
 *
 * Dagster Metadata Database Connection Config
 *
 * Nifi Metadata Pipeline Connection Config
 *
 * Domo Pipeline Connection Config
 *
 * Custom Pipeline Service connection to build a source that is not supported by
 * OpenMetadata yet.
 *
 * Databricks Connection Config
 *
 * Spline Metadata Database Connection Config
 *
 * Spark Metadata Pipeline Connection Config
 *
 * OpenLineage Connection Config
 *
 * KafkaConnect Connection Config
 */
export interface Connection {
    /**
     * Underlying database connection. See
     * https://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html for
     * supported backends.
     */
    connection?: MetadataDatabaseConnection;
    /**
     * Pipeline Service Management/UI URI.
     *
     * Pipeline Service Management/UI URL.
     *
     * Host and port of the Databricks service.
     *
     * Spline REST Server Host & Port.
     *
     * KafkaConnect Service Management/UI URI.
     */
    hostPort?: string;
    /**
     * Pipeline Service Number Of Status
     */
    numberOfStatus?:             number;
    supportsMetadataExtraction?: boolean;
    /**
     * Service Type
     *
     * Custom pipeline service type
     */
    type?:      PipelineServiceType;
    awsConfig?: AWSCredentials;
    /**
     * Password to connect to Airbyte.
     */
    password?: string;
    /**
     * Username to connect to Airbyte.
     */
    username?: string;
    /**
     * Fivetran API Secret.
     */
    apiKey?: string;
    /**
     * Fivetran API Secret.
     */
    apiSecret?: string;
    /**
     * Fivetran API Limit For Pagination.
     */
    limit?: number;
    /**
     * URL to the Dagster instance
     */
    host?: string;
    /**
     * Connection Time Limit Between OM and Dagster Graphql API in second
     */
    timeout?: number;
    /**
     * To Connect to Dagster Cloud
     *
     * Generated Token to connect to Databricks.
     */
    token?: string;
    /**
     * We support username/password or client certificate authentication
     */
    nifiConfig?: NifiCredentialsConfiguration;
    /**
     * Access token to connect to DOMO
     */
    accessToken?: string;
    /**
     * API Host to connect to DOMO instance
     */
    apiHost?: string;
    /**
     * Client ID for DOMO
     */
    clientId?: string;
    /**
     * URL of your Domo instance, e.g., https://openmetadata.domo.com
     */
    instanceDomain?: string;
    /**
     * Secret token to connect to DOMO
     */
    secretToken?:       string;
    connectionOptions?: { [key: string]: string };
    /**
     * Source Python Class Name to instantiated by the ingestion workflow
     */
    sourcePythonClass?:   string;
    connectionArguments?: { [key: string]: any };
    /**
     * Databricks compute resources URL.
     */
    httpPath?: string;
    /**
     * Spline UI Host & Port.
     */
    uiHostPort?: string;
    /**
     * service type of the messaging source
     */
    brokersUrl?: string;
    /**
     * consumer group name
     */
    consumerGroupName?: string;
    /**
     * initial Kafka consumer offset
     */
    consumerOffsets?: InitialConsumerOffsets;
    /**
     * max allowed wait time
     */
    poolTimeout?: number;
    /**
     * Kafka security protocol config
     */
    securityProtocol?: KafkaSecurityProtocol;
    /**
     * max allowed inactivity time
     */
    sessionTimeout?: number;
    /**
     * SSL Configuration details.
     */
    sslConfig?: Config;
    /**
     * topic from where Open lineage events will be pulled
     */
    topicName?: string;
    /**
     * We support username/password or No Authentication
     */
    KafkaConnectConfig?: UsernamePasswordAuthentication;
    /**
     * Name of the Kafka Messaging Service associated with this KafkaConnect Pipeline Service.
     * e.g. local_kafka
     */
    messagingServiceName?: string;
    /**
     * Boolean marking if we need to verify the SSL certs for KafkaConnect REST API. True by
     * default.
     */
    verifySSL?: boolean;
}

/**
 * We support username/password or No Authentication
 *
 * username/password auth
 */
export interface UsernamePasswordAuthentication {
    /**
     * KafkaConnect password to authenticate to the API.
     */
    password?: string;
    /**
     * KafkaConnect user to authenticate to the API.
     */
    username?: string;
}

/**
 * AWS凭据配置
 */
export interface AWSCredentials {
    /**
     * 要假设的角色的Amazon资源名称（ARN），在假设角色的情况下，这是必需的字段
     */
    assumeRoleArn?: string;
    /**
     * 假定角色会话的标识符，在同一角色由不同主体或出于不同原因假定时，使用角色会话名称唯一标识会话，在假设角色的情况下，这是必需的字段
     */
    assumeRoleSessionName?: string;
    /**
     * 要假设的角色的Amazon资源名称（ARN），在假设角色的情况下，这是可选的字段
     */
    assumeRoleSourceIdentity?: string;
    /**
     * AWS访问密钥ID
     */
    awsAccessKeyId?: string;
    /**
     * AAWS区域
     */
    awsRegion: string;
    /**
     * AWS秘密访问密钥
     */
    awsSecretAccessKey?: string;
    /**
     * AWS会话令牌
     */
    awsSessionToken?: string;
    /**
     * AWS的终端点URL
     */
    endPointURL?: string;
    /**
     * 与boto会话一起使用的配置文件的名称
     */
    profileName?: string;
}

/**
 * Underlying database connection. See
 * https://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html for
 * supported backends.
 *
 * Lineage Backend Connection Config
 *
 * Mysql Database Connection Config
 *
 * Postgres Database Connection Config
 *
 * SQLite Database Connection Config
 */
export interface MetadataDatabaseConnection {
    /**
     * Service Type
     *
     * 服务类型
     */
    type?: Type;
    /**
     * Choose Auth Config Type.
     *
     * 选择鉴权的配置
     */
    authType?:            AuthConfigurationType;
    connectionArguments?: { [key: string]: any };
    connectionOptions?:   { [key: string]: string };
    /**
     * Optional name to give to the database in OpenMetadata. If left blank, we will use default
     * as the database name.
     */
    databaseName?: string;
    /**
     * Database Schema of the data source. This is optional parameter, if you would like to
     * restrict the metadata reading to a single schema. When left blank, OpenMetadata Ingestion
     * attempts to scan all the schemas.
     */
    databaseSchema?: string;
    /**
     * Host and port of the MySQL service.
     *
     * 服务源的host和port
     *
     * Host and port of the SQLite service. Blank for in-memory database.
     */
    hostPort?:                string;
    sampleDataStorageConfig?: SampleDataStorageConfig;
    /**
     * SQLAlchemy driver scheme options.
     *
     * SQLAlchemy 驱动架构选项
     */
    scheme?: Scheme;
    /**
     * SSL Configuration details.
     */
    sslConfig?:                  Config;
    supportsDBTExtraction?:      boolean;
    supportsMetadataExtraction?: boolean;
    supportsProfiler?:           boolean;
    supportsQueryComment?:       boolean;
    /**
     * Username to connect to MySQL. This user should have privileges to read all the metadata
     * in Mysql.
     *
     * 要连接到 Postgres 的用户名。此用户应具有读取 Postgres 中所有元数据的权限。
     *
     * Username to connect to SQLite. Blank for in-memory database.
     */
    username?: string;
    /**
     * Custom OpenMetadata Classification name for Postgres policy tags.
     */
    classificationName?: string;
    /**
     * 数据源的数据库。此参数是可选的，如果您希望将元数据读取限制在单个数据库中，可以填写此参数。如果留空，OpenMetadata Ingestion 会尝试扫描所有数据库。
     *
     * Database of the data source. This is optional parameter, if you would like to restrict
     * the metadata reading to a single database. When left blank, OpenMetadata Ingestion
     * attempts to scan all the databases.
     */
    database?: string;
    /**
     * 从 Postgres 中的所有数据库获取数据。您可以在此基础上使用 databaseFilterPattern。
     */
    ingestAllDatabases?:        boolean;
    sslMode?:                   SSLMode;
    supportsDatabase?:          boolean;
    supportsLineageExtraction?: boolean;
    supportsUsageExtraction?:   boolean;
    /**
     * How to run the SQLite database. :memory: by default.
     */
    databaseMode?: string;
    /**
     * Password to connect to SQLite. Blank for in-memory database.
     */
    password?: string;
}

/**
 * Choose Auth Config Type.
 *
 * 选择鉴权的配置
 *
 * Common Database Connection Config
 *
 * IAM Auth Database Connection Config
 *
 * Azure Database Connection Config
 */
export interface AuthConfigurationType {
    /**
     * Password to connect to source.
     */
    password?:    string;
    awsConfig?:   AWSCredentials;
    azureConfig?: AzureCredentials;
}

/**
 * Azure Datalake凭据
 */
export interface AzureCredentials {
    /**
     * 您的存储账户的账户名称
     */
    accountName?: string;
    /**
     * 您的服务主体应用程序ID（客户端ID）
     */
    clientId?: string;
    /**
     * 您的服务主体密码（客户端密钥）
     */
    clientSecret?: string;
    /**
     * Scopes to get access token, for e.g. api://6dfX33ab-XXXX-49df-XXXX-3459eX817d3e/.default
     */
    scopes?: string;
    /**
     * Azure订阅的租户ID
     */
    tenantId?: string;
    /**
     * Key Vault Name
     */
    vaultName?: string;
}

/**
 * Storage config to store sample data
 */
export interface SampleDataStorageConfig {
    config?: DataStorageConfig;
}

/**
 * Storage config to store sample data
 */
export interface DataStorageConfig {
    /**
     * Bucket Name
     */
    bucketName?: string;
    /**
     * Provide the pattern of the path where the generated sample data file needs to be stored.
     */
    filePathPattern?: string;
    /**
     * When this field enabled a single parquet file will be created to store sample data,
     * otherwise we will create a new file per day
     */
    overwriteData?: boolean;
    /**
     * Prefix of the data source.
     */
    prefix?:        string;
    storageConfig?: AwsCredentials;
    [property: string]: any;
}

/**
 * AWS凭据配置
 */
export interface AwsCredentials {
    /**
     * 要假设的角色的Amazon资源名称（ARN），在假设角色的情况下，这是必需的字段
     */
    assumeRoleArn?: string;
    /**
     * 假定角色会话的标识符，在同一角色由不同主体或出于不同原因假定时，使用角色会话名称唯一标识会话，在假设角色的情况下，这是必需的字段
     */
    assumeRoleSessionName?: string;
    /**
     * 要假设的角色的Amazon资源名称（ARN），在假设角色的情况下，这是可选的字段
     */
    assumeRoleSourceIdentity?: string;
    /**
     * AWS访问密钥ID
     */
    awsAccessKeyId?: string;
    /**
     * AAWS区域
     */
    awsRegion?: string;
    /**
     * AWS秘密访问密钥
     */
    awsSecretAccessKey?: string;
    /**
     * AWS会话令牌
     */
    awsSessionToken?: string;
    /**
     * AWS的终端点URL
     */
    endPointURL?: string;
    /**
     * 与boto会话一起使用的配置文件的名称
     */
    profileName?: string;
}

/**
 * SQLAlchemy driver scheme options.
 *
 * SQLAlchemy 驱动架构选项
 */
export enum Scheme {
    MysqlPymysql = "mysql+pymysql",
    PgspiderPsycopg2 = "pgspider+psycopg2",
    PostgresqlPsycopg2 = "postgresql+psycopg2",
    SqlitePysqlite = "sqlite+pysqlite",
}

/**
 * SSL Configuration details.
 *
 * 客户端SSL配置
 *
 * OpenMetadata客户端配置以验证SSL证书
 */
export interface Config {
    /**
     * CA证书路径，例如，/path/to/public.cert。如果Verify SSL设置为`validate`，将使用该路径。
     */
    caCertificate?: string;
    /**
     * The SSL certificate used for client authentication.
     */
    sslCertificate?: string;
    /**
     * The private key associated with the SSL certificate.
     */
    sslKey?: string;
}

/**
 * SSL Mode to connect to database.
 */
export enum SSLMode {
    Allow = "allow",
    Disable = "disable",
    Prefer = "prefer",
    Require = "require",
    VerifyCA = "verify-ca",
    VerifyFull = "verify-full",
}

/**
 * Service Type
 *
 * Service type.
 *
 * 服务类型
 */
export enum Type {
    Backend = "Backend",
    Mysql = "Mysql",
    Postgres = "Postgres",
    SQLite = "SQLite",
}

/**
 * initial Kafka consumer offset
 */
export enum InitialConsumerOffsets {
    Earliest = "earliest",
    Latest = "latest",
}

/**
 * We support username/password or client certificate authentication
 *
 * username/password auth
 *
 * client certificate auth
 */
export interface NifiCredentialsConfiguration {
    /**
     * Nifi password to authenticate to the API.
     */
    password?: string;
    /**
     * Nifi user to authenticate to the API.
     */
    username?: string;
    /**
     * Boolean marking if we need to verify the SSL certs for Nifi. False by default.
     */
    verifySSL?: boolean;
    /**
     * Path to the root CA certificate
     */
    certificateAuthorityPath?: string;
    /**
     * Path to the client certificate
     */
    clientCertificatePath?: string;
    /**
     * Path to the client key
     */
    clientkeyPath?: string;
}

/**
 * Kafka security protocol config
 */
export enum KafkaSecurityProtocol {
    Plaintext = "PLAINTEXT",
    SSL = "SSL",
}

/**
 * Service Type
 *
 * Service type.
 *
 * Custom pipeline service type
 *
 * Type of pipeline service - Airflow or Prefect.
 */
export enum PipelineServiceType {
    Airbyte = "Airbyte",
    Airflow = "Airflow",
    CustomPipeline = "CustomPipeline",
    Dagster = "Dagster",
    DatabricksPipeline = "DatabricksPipeline",
    DomoPipeline = "DomoPipeline",
    Fivetran = "Fivetran",
    GluePipeline = "GluePipeline",
    KafkaConnect = "KafkaConnect",
    Nifi = "Nifi",
    OpenLineage = "OpenLineage",
    Spark = "Spark",
    Spline = "Spline",
}

/**
 * Life Cycle of the entity
 *
 * 此模式定义生命周期属性
 */
export interface LifeCycle {
    /**
     * 关于数据资产访问方面的访问详细信息
     */
    accessed?: AccessDetails;
    /**
     * 关于数据资产创建方面的访问详细信息
     */
    created?: AccessDetails;
    /**
     * 关于数据资产更新方面的访问详细信息
     */
    updated?: AccessDetails;
}

/**
 * 关于数据资产访问方面的访问详细信息
 *
 * 实体的访问详细信息
 *
 * 关于数据资产创建方面的访问详细信息
 *
 * 关于数据资产更新方面的访问详细信息
 */
export interface AccessDetails {
    /**
     * 创建、更新或访问数据资产的用户、流水线或查询
     */
    accessedBy?: EntityReference;
    /**
     * 访问数据资产的任何未在 OpenMetadata 中捕获的过程
     */
    accessedByAProcess?: string;
    /**
     * 数据资产创建、更新或读取的时间戳
     */
    timestamp: number;
}

/**
 * 创建、更新或访问数据资产的用户、流水线或查询
 *
 * This schema defines the EntityReference type used for referencing an entity.
 * EntityReference is used for capturing relationships from one entity to another. For
 * example, a table has an attribute called database of type EntityReference that captures
 * the relationship of a table `belongs to a` database.
 *
 * Owner of this pipeline service.
 */
export interface EntityReference {
    /**
     * If true the entity referred to has been soft-deleted.
     */
    deleted?: boolean;
    /**
     * Optional description of entity.
     */
    description?: string;
    /**
     * Display Name that identifies this entity.
     */
    displayName?: string;
    /**
     * Fully qualified name of the entity instance. For entities such as tables, databases
     * fullyQualifiedName is returned in this field. For entities that don't have name hierarchy
     * such as `user` and `team` this will be same as the `name` field.
     */
    fullyQualifiedName?: string;
    /**
     * Link to the entity resource.
     */
    href?: string;
    /**
     * Unique identifier that identifies an entity instance.
     */
    id: string;
    /**
     * If true the relationship indicated by this entity reference is inherited from the parent
     * entity.
     */
    inherited?: boolean;
    /**
     * Name of the entity instance.
     */
    name?: string;
    /**
     * Entity type/class name - Examples: `database`, `table`, `metrics`, `databaseService`,
     * `dashboardService`...
     */
    type: string;
}

/**
 * 此模式定义了用于使用标签标记实体的标签类型
 */
export interface TagLabel {
    /**
     * 标签标签的描述
     */
    description?: string;
    /**
     * 标识此标签的显示名称
     */
    displayName?: string;
    /**
     * 到标签资源的链接
     */
    href?: string;
    /**
     *
     * 标签类型描述标签标签是如何应用的，'Manual'表示标签标签是由人员应用的。'Derived'表示使用关联的标签关系导出了标签标签（有关详细信息，请参见Classification.json）。'Propagated`表示标签标签是根据血统从上游传播的。'Automated'在使用工具确定标签标签时使用。
     */
    labelType: LabelType;
    /**
     * 标签或词汇术语的名称
     */
    name?: string;
    /**
     * 标签来自标签还是词汇表
     */
    source: TagSource;
    /**
     * 'Suggested'状态在用户或工具建议标签标签时使用。实体的所有者必须在将其标记为'Confirmed'之前确认建议的标签
     */
    state:  State;
    style?: Style;
    tagFQN: string;
}

/**
 *
 * 标签类型描述标签标签是如何应用的，'Manual'表示标签标签是由人员应用的。'Derived'表示使用关联的标签关系导出了标签标签（有关详细信息，请参见Classification.json）。'Propagated`表示标签标签是根据血统从上游传播的。'Automated'在使用工具确定标签标签时使用。
 */
export enum LabelType {
    Automated = "Automated",
    Derived = "Derived",
    Manual = "Manual",
    Propagated = "Propagated",
}

/**
 * 标签来自标签还是词汇表
 */
export enum TagSource {
    Classification = "Classification",
    Glossary = "Glossary",
}

/**
 * 'Suggested'状态在用户或工具建议标签标签时使用。实体的所有者必须在将其标记为'Confirmed'之前确认建议的标签
 */
export enum State {
    Confirmed = "Confirmed",
    Suggested = "Suggested",
}

/**
 * UI Style is used to associate a color code and/or icon to entity to customize the look of
 * that entity in UI.
 */
export interface Style {
    /**
     * Hex Color Code to mark an entity such as GlossaryTerm, Tag, Domain or Data Product.
     */
    color?: string;
    /**
     * An icon to associate with GlossaryTerm, Tag, Domain or Data Product.
     */
    iconURL?: string;
}
