import json
import time
from datetime import datetime, timedelta
import re
from sqlalchemy import Column, Integer, String, Index, JSON, Boolean, TEXT
from sqlalchemy.orm.session import Session

from airflow import DAG
from airflow.patsnap.service.data_factory import HiveArgsModel, JdbcArgsModel, KafkaArgsModel, DorisModel, EsModel
from airflow.patsnap.util import cmd
from airflow.patsnap.util.etl_util import EtlUtil
from airflow.exceptions import AirflowException
from airflow.models.base import Base
from airflow.models.idata_datasource import Datasource
from airflow.models.idata_env import Env
from airflow.models.idata_project import Project
from airflow.operators import spark_submit, flink_submit_bak, logstash, dag_run, flink_submit
from airflow.operators.bash import BashOperator
from airflow.operators.doris_load import DorisLoadOperator
from airflow.utils.session import provide_session
from airflow.configuration import conf

PYTHON2_ARCHIVES = conf.get('spark', 'python2_archives')
PYTHON2_BINARY = conf.get('spark', 'python2_binary')
PYTHON3_ARCHIVES = conf.get('spark', 'python3_archives')
PYTHON3_BINARY = conf.get('spark', 'python3_binary')
PYTHON3_DRIVER_BINARY = conf.get('spark', 'python3_driver_binary')

PYTHON310_ARCHIVES = conf.get('spark', 'python310_archives')
PYTHON310_BINARY = conf.get('spark', 'python310_binary')
PYTHON310_DRIVER_BINARY = conf.get('spark', 'python310_driver_binary')

SPARK3_BINARY = conf.get('spark', 'spark3_binary')
SPARK2_BINARY = conf.get('spark', 'spark2_binary')
K8S_MASTER = conf.get('kubernetes', 'master')
K8S_SPARK_BASE_IMAGE = conf.get('kubernetes', 'image')
K8S_SERVICEACCOUNTNAME = conf.get('kubernetes', 'service_account_name')
K8S_FILE_UPLOAD_PATH = conf.get('kubernetes', 'upload_path')
K8S_S3_ENDPOINT = conf.get('kubernetes', 's3_endpoint')
K8S_S3_ACCESS_KEY = conf.get('kubernetes', 's3_access_key')
K8S_S3_SECRET_KEY = conf.get('kubernetes', 's3_secret_key')
K8S_S3_IMPL = conf.get('kubernetes', 's3_impl')

K8S_S3_SPARKEVENTLOGDIR = conf.get('kubernetes', 'spark_eventlog_dir')

METRICS_KAFKA_SERVERS = conf.get('datafactory', 'kafka_servers')
METRICS_KAFKA_TOPIC = conf.get('datafactory', 'kafka_topic')
WARN_CALLBACK_URL = conf.get('datafactory', 'callback_url')
NAMESPACE = conf.get('kubernetes', 'work_namespace')


K8S_COSN_SECRETID = conf.get('kubernetes', 'cosn_secret_id')
K8S_COSN_SECRETKEY = conf.get('kubernetes', 'cosn_secret_key')
K8S_COSN_IMPL = conf.get('kubernetes', 'cosn_impl')
K8S_COSN_CREDENTIALS_PROVIDER = conf.get('kubernetes', 'cosn_credentials_provider')
K8S_COSN_BUCKET_ENDPOINT_SUFFIX = conf.get('kubernetes', 'cosn_bucket_endpoint_suffix')
K8S_COSN_BUCKET_REGION = conf.get('kubernetes', 'cosn_bucket_region')
K8S_OFS_USER_APPID = conf.get('kubernetes', 'ofs_user_appid')
K8S_OFS_TMP_CACHE_DIR = conf.get('kubernetes', 'ofs_tmp_cache_dir')

class OperatorType:
    SHELL = 'SHELL'
    SPARK = "SPARK"
    FLINK = "FLINK"
    FLINK_SQL = "FLINK_SQL"
    LOGSTASH = "LOGSTASH"
    DATAFACTORY = "DATAFACTORY"
    DAGRUN = "DAGRUN"
    DORIS_LOAD = "DORIS_LOAD"


class TaskType:
    STREAM = 1
    BATCH = 0


class Task(Base):
    """the class to get Task info."""

    __tablename__ = "idata_task"

    id = Column(Integer, primary_key=True)
    project_id = Column(Integer)
    folder_id = Column(Integer)
    name = Column(String(255))
    description = Column(String(255))
    operator = Column(String(32))
    content = Column(TEXT)
    param = Column(JSON)
    env = Column(JSON)
    files = Column(String)
    pool = Column(String(64))
    owner = Column(String(64))
    email_alert = Column(Boolean, default=True)
    email = Column(String(256))
    version = Column(String(32))
    retries = Column(Integer, default=0)
    task_concurrency = Column(Integer, default=8)
    execution_timeout = Column(Integer)
    type = Column(Integer, default=0)
    blood = Column(JSON)

    __table_args__ = (
        Index('project_id_name', project_id, name),
        Index('project_id', project_id),
        Index('folder_id', folder_id),
    )

    def error_content(self):
        '''
        Task validation
        :return: error message
        '''

        if self.param and self.param.get('date_format', ''):
            fmt = self.param['date_format']
            msg = '日期格式不合法: {} example %Y-%m-%d %H:%M:%S'.format(fmt)
            if fmt.find('%') < 0:
                return msg
            try:
                datetime.now().strftime(fmt)
            except ValueError as e:
                return msg

        try:
            self.create_operator()
        except Exception as e:
            return str(e)

    def parse_task_args(self, content=None):
        if content is None:
            content = self.content
        if self.operator == OperatorType.SPARK:
            content = content.replace('\n', ' ')
            rs = cmd.split_cmd(content)
            data = spark_submit.parse_args(rs)
            return data
        elif self.operator == OperatorType.FLINK:
            content = content.replace('\n', ' ')
            rs = cmd.split_cmd(content)
            data = flink_submit.parse_args(rs)
            return data

    def get_task_command(self):
        if self.operator == OperatorType.SPARK:
            task = self.create_operator()
            return task.get_spark_submit_command()
        elif self.operator == OperatorType.SHELL:
            return self.content
        elif self.operator == OperatorType.FLINK:
            task = self.create_operator()
            return task.get_flink_submit_command()

    @provide_session
    def insert(self, session: Session = None):
        if self.content:
            self.parse_task_args()
        session.add(self)

    @provide_session
    def update(self, session: Session = None):
        session.merge(self)

    @staticmethod
    @provide_session
    def update_blood(task_id, blood, session: Session = None):
        p = session.query(Task).filter(Task.id == task_id)
        if p:
            p.update({"blood": blood})

    @staticmethod
    @provide_session
    def get_task(task_id, session: Session = None):
        """
        Get the Task with specific task id.

        :param task_id: The task id of the Task to get.
        :param session: SQLAlchemy ORM Session
        :return: the Task object
        """
        return session.query(Task).filter(Task.id == task_id).first()

    @staticmethod
    @provide_session
    def query_task(project_id=None, folder_id=None, name=None, type=None, session: Session = None):
        '''
        :param project_id:
        :param folder_id:
        :param name:
        :param session:
        :return:
        '''
        qry = session.query(Task)
        if project_id:
            qry = qry.filter(Task.project_id == project_id)
        if folder_id:
            qry = qry.filter(Task.folder_id == folder_id)
        if type:
            qry = qry.filter(Task.type == type)
        if name:
            qry = qry.filter(Task.name.like('%' + name + '%'))
        return qry.all()

    @staticmethod  # 静态方法
    @provide_session
    def get_stream(project_id=None, name=None, page=None, operator=None, session: Session = None):
        q = session.query(Task) \
            .filter(Task.project_id == project_id).filter(Task.type == TaskType.STREAM)
        if name and name != "":
            q = q.filter(Task.name.like('%' + name + '%'))
        if operator and operator != "":
            q = q.filter(Task.operator == operator)
        if page:
            q = q.paginate(page, 10, error_out=False)
            return q
        return q.all()

    def to_json(self, task_instance=None):
        """
        Get the Task in a json structure

        :return: the task object in json format
        """
        data = {
            'id': self.id,
            'project_id': self.project_id,
            'folder_id': self.folder_id,
            'name': self.name,
            'description': self.description,
            'operator': self.operator,
            'content': self.content,
            'param': self.param,
            'env': self.env,
            'files': self.files,
            'pool': self.pool,
            'owner': self.owner,
            'email_alert': self.email_alert,
            'retries': self.retries,
            'task_concurrency': self.task_concurrency,
            'execution_timeout': self.execution_timeout,
            'type': self.type,
            'blood': self.blood,
            'task_instance': task_instance,
            'email': self.email
        }
        return data

    def create_operator(self, dag=None):
        '''
        convert task to airflow Operator
        :return:
        '''

        if not self.id:
            raise AirflowException('invalid task Id')

        if dag is None:
            dag = DAG(
                'dag-task-{}'.format(self.id),
                default_args={
                    'owner': 'airflow',
                    'depends_on_past': False,
                },
                start_date=datetime.now(),
                description='A random tmp DAG for airflow task',
            )

        converted_content = self.content

        if self.content and self.param and self.param.get('date_format', ''):
            pat = r'\{\{\s*ts\s*\}\}'
            ts = re.findall(pat, self.content)
            if len(ts) > 0:
                ns = "{{macros.datetime.fromisoformat(ts).strftime('" + \
                     self.param['date_format'] + "')}}"
                converted_content = re.sub(pat, ns, self.content)

        if self.operator == OperatorType.SHELL:
            task = BashOperator(task_id='task-{}'.format(self.id),
                                task_name=self.name,
                                bash_command=converted_content,
                                owner=self.owner,
                                dag=dag,
                                retries=self.retries,
                                pool=self.pool,
                                queue=self.pool)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@.com'
            if self.files:
                task.files = self.files.split(',')
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)

            task.params = self.param
            task.env = self.env
            return task

        if self.operator == OperatorType.DORIS_LOAD:
            task = DorisLoadOperator(task_id='task-{}'.format(self.id),
                                     task_name=self.name,
                                     content=self.content,
                                     owner=self.owner,
                                     dag=dag,
                                     retries=self.retries,
                                     pool=self.pool,
                                     queue=self.pool)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@.com'
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            task.params = self.param
            task.env = self.env
            return task

        elif self.operator == OperatorType.SPARK:
            params = self.parse_task_args(content=converted_content)
            fs = []
            if self.files:
                fs = self.files.split(',')

            if not 'name' in params:
                params['name'] = self.name

            params['task_id'] = 'task-{}'.format(self.id)
            params['task_name'] = self.name
            params['dag'] = dag
            params['remote_files'] = fs

            if 'class' in params:
                params['java_class'] = params['class']
                params.pop('class')

            if self.param and self.param.get('version', '2.3').startswith('3'):
                params['spark_binary'] = SPARK3_BINARY
            else:
                params['spark_binary'] = SPARK3_BINARY

            params['master'] = K8S_MASTER

            if self.param:
                if 'conf' not in params:
                    params['conf'] = dict()
                if self.param.get('pyspark', ''):
                    if self.param['pyspark'] == 'python2':
                        params['archives'] = PYTHON2_ARCHIVES
                        params['conf']['spark.yarn.appMasterEnv.PYSPARK_PYTHON'] = PYTHON2_BINARY
                    if self.param['pyspark'] == 'python3.9':
                        params['archives'] = PYTHON3_ARCHIVES
                        params['conf']['spark.pyspark.python'] = PYTHON3_BINARY
                        params['conf']['spark.pyspark.driver.python'] = PYTHON3_DRIVER_BINARY
                    if self.param['pyspark'] == 'python3.10':
                        params['archives'] = PYTHON310_ARCHIVES
                        params['conf']['spark.pyspark.python'] = PYTHON310_BINARY
                        params['conf']['spark.pyspark.driver.python'] = PYTHON310_DRIVER_BINARY
                if 'spark.kubernetes.container.image' not in params['conf']:
                    params['conf']['spark.kubernetes.container.image'] = K8S_SPARK_BASE_IMAGE
                if 'spark.kubernetes.authenticate.driver.serviceAccountName' not in params['conf']:
                    params['conf']['spark.kubernetes.authenticate.driver.serviceAccountName'] = K8S_SERVICEACCOUNTNAME
                if 'spark.kubernetes.file.upload.path' not in params['conf']:
                    params['conf']['spark.kubernetes.file.upload.path'] = K8S_FILE_UPLOAD_PATH
                if 'spark.hadoop.fs.s3a.endpoint' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.endpoint'] = K8S_S3_ENDPOINT
                if 'spark.hadoop.fs.s3a.impl' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.impl'] = K8S_S3_IMPL
                if 'spark.hadoop.fs.s3.impl' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3.impl'] = K8S_S3_IMPL
                if 'spark.hadoop.fs.s3a.fast.upload' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.fast.upload'] = 'true'
                if 'spark.hadoop.fs.s3a.access.key' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.access.key'] = K8S_S3_ACCESS_KEY
                if 'spark.hadoop.fs.s3a.secret.key' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.secret.key'] = K8S_S3_SECRET_KEY

                if 'spark.hadoop.fs.ofs.user.appid' not in params['conf']:
                    params['conf']['spark.hadoop.fs.ofs.user.appid'] = K8S_OFS_USER_APPID
                if 'spark.hadoop.fs.ofs.tmp.cache.dir' not in params['conf']:
                    params['conf']['spark.hadoop.fs.ofs.tmp.cache.dir'] = K8S_OFS_TMP_CACHE_DIR
                if 'spark.hadoop.fs.cosn.credentials.provider' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.credentials.provider'] = K8S_COSN_CREDENTIALS_PROVIDER
                if 'spark.hadoop.fs.cosn.impl' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.impl'] = K8S_COSN_IMPL
                if 'spark.hadoop.fs.cosn.bucket.region' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.bucket.region'] = K8S_COSN_BUCKET_REGION
                if 'spark.hadoop.fs.cosn.bucket.endpoint_suffix' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.bucket.endpoint_suffix'] = K8S_COSN_BUCKET_ENDPOINT_SUFFIX
                if 'spark.hadoop.fs.cosn.userinfo.secretId' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.userinfo.secretId'] = K8S_COSN_SECRETID
                if 'spark.hadoop.fs.cosn.userinfo.secretKey' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.userinfo.secretKey'] = K8S_COSN_SECRETKEY
                if 'spark.hadoop.fs.AbstractFileSystem.cosn.impl' not in params['conf']:
                    params['conf']['spark.hadoop.fs.AbstractFileSystem.cosn.impl'] = 'org.apache.hadoop.fs.CosN'

                if 'spark.eventLog.dir' not in params['conf'] and K8S_S3_SPARKEVENTLOGDIR:
                    params['conf']['spark.eventLog.dir'] = K8S_S3_SPARKEVENTLOGDIR
                    params['conf']['spark.eventLog.enabled'] = 'true'
                    params['conf']['spark.eventLog.compress'] = 'true'

                params['conf']['spark.kubernetes.driver.podTemplateFile'] = '/usr/hdp/3.1.5.0-152/spark3/kubernetes/template/driver-template.yml'
                params['conf']['spark.kubernetes.executor.podTemplateFile'] = '/usr/hdp/3.1.5.0-152/spark3/kubernetes/template/executor-template.yml'
                params['conf']['spark.kubernetes.executor.label.job_name'] = self.name

                params['conf']['spark.kubernetes.executor.volumes.persistentVolumeClaim.spark-local-dir-1.options.claimName'] = 'OnDemand'
                params['conf']['spark.kubernetes.executor.volumes.persistentVolumeClaim.spark-local-dir-1.options.storageClass'] = 'nfs-client'
                if 'spark.kubernetes.executor.volumes.persistentVolumeClaim.spark-local-dir-1.options.sizeLimit' not in params['conf']:
                    params['conf']['spark.kubernetes.executor.volumes.persistentVolumeClaim.spark-local-dir-1.options.sizeLimit'] = '100G'
                params['conf']['spark.kubernetes.executor.volumes.persistentVolumeClaim.spark-local-dir-1.mount.path'] = '/data'
                params['conf']['spark.kubernetes.executor.volumes.persistentVolumeClaim.spark-local-dir-1.mount.readOnly'] = 'false'

                # params['conf']['spark.ui.custom.executor.log.url'] = 'executor_log/download?spark_exec_id={{EXECUTOR_ID}}&spark_app={{APP_ID}}&start=-1000'
                # params['conf']['spark.history.fs.logDirectory'] = 'cosn://testpatsnapus-1251949819/patent2/spark3/share_log/spark_history_server/'
                # params['conf']['spark.eventLog.dir'] = 'cosn://testpatsnapus-1251949819/patent2/spark3/share_log/spark_history_server/'
                # params['conf']['spark.eventLog.enabled'] = True

            params['queue'] = self.pool
            params['pool'] = self.pool
            if self.owner:
                params['owner'] = self.owner
                params['proxy_user'] = self.owner
            params['namespace'] = NAMESPACE

            task = spark_submit.SparkSubmitOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@.com'

            return task

        elif self.operator == OperatorType.FLINK:
            params = self.parse_task_args()
            fs = []
            if self.files:
                fs = self.files.split(',')

            params['task_id'] = 'task-{}'.format(self.id)
            params['dag'] = dag
            params['remote_files'] = fs
            params['flink_binary'] = '/usr/hdp/3.1.5.0-152/flink-1.13.6/bin/flink'

            if 'class' in params:
                params['java_class'] = params['class']
                params.pop('class')
            params['pool'] = self.pool
            params['queue'] = self.pool
            if self.owner:
                params['proxy_user'] = self.owner
            params['namespace'] = NAMESPACE

            task = flink_submit.FlinkSubmitOperator(**params)
            if self.retries:
                task._flink_retries = self.retries
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@4.com'

            return task

        elif self.operator == OperatorType.LOGSTASH:
            params = dict()
            params['task_id'] = 'task-{}'.format(self.id)
            params['dag'] = dag
            params['logstash_binary'] = '/opt/logstash/bin/logstash'
            params['content'] = self.content
            task = logstash.LogstashOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@4.com'
            return task

        elif self.operator == OperatorType.DATAFACTORY:
            params = self.foramt_args(dag)
            task = spark_submit.SparkSubmitOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@.com'
            return task

        elif self.operator == OperatorType.DAGRUN:
            params = dict()
            task_param = self.param
            workflow_id = task_param.get("workflow_id", None)
            params['task_id'] = 'task-{}'.format(self.id)
            params['task_name'] = self.name
            params['dag'] = dag
            params['pool'] = self.pool
            params['queue'] = self.pool
            params['workflow_id'] = workflow_id
            task = dag_run.DagRunOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@.com'
            return task

    def foramt_args(self, dag):
        task_param = self.param
        env_id = task_param['envId']
        path_env = Env.get_env(env_id)
        is_monitor = task_param['isMetricsMonitor']
        is_streaming = task_param['isStreaming']
        reader_model = task_param['readerModel']
        writer_model = task_param['writerModel']
        filter_model = task_param['filterModel']
        reader_model_type = reader_model.get('type', None)
        writer_model_type = writer_model.get('type', None)
        filter_model_type = filter_model.get('type', None)
        sql = filter_model.get("sql", None)
        field_types = filter_model.get("fieldTypes", None)
        decoder_class = filter_model.get("decoderClass", None)
        if filter_model_type and filter_model_type == EtlUtil.DECODE_DEFAULT_TYPE:
            if writer_model_type:
                if writer_model_type == EtlUtil.SINK_TYPE_ES:
                    field_types = EtlUtil.DECODE_DEFAULT_ES_FILEDS
                    decoder_class = EtlUtil.DECODE_DEFAULT_ES_DECODERCLASS
                else:
                    field_types = EtlUtil.DECODE_DEFAULT_FILEDS
                    decoder_class = EtlUtil.DECODE_DEFAULT_DECODERCLASS

        spark_args = task_param['sparkArgs']
        args = ["-isMetricsMonitor=" + str(is_monitor)]
        params = {}
        jars = set()
        jars.add(path_env.etl_path)
        jars.add(path_env.common_jar_path)

        if reader_model_type:
            if reader_model['type'] == EtlUtil.SOURCE_TYPE_HIVE:
                hive_model = reader_model['hiveModel']
                datasource_id = hive_model['datasourceId']
                datasource = Datasource.get_datasource(datasource_id)
                args_sql = EtlUtil.sql_cluster_decoder(sql)
                db_table = ''
                for k in args_sql.keys():
                    for t in args_sql[k]:
                        db_table += k + ":" + t + ","

                hive_args_model = HiveArgsModel(streaming=False, reader_type=EtlUtil.HIVE_READER_TYPE, reader_sql=sql,
                                                reader_db_tables=db_table,
                                                reader_udfs=hive_model['udf'] if 'udf' in hive_model else [],
                                                reader_udafs=hive_model['udaf'] if 'udaf' in hive_model else [])
                args = args + hive_args_model.to_list_reader()
                format_udf_udaf_jars(hive_args_model.reader_udfs, hive_args_model.reader_udafs, jars, path_env, self.id,
                                     self.name)
                jars.add(path_env.hive_jar_path)
            elif reader_model['type'] == EtlUtil.SOURCE_TYPE_JDBC:
                jdbc_model = reader_model['jdbcModel']
                datasource_id = jdbc_model['datasourceId']
                datasource = Datasource.get_datasource(datasource_id)
                args_sql = EtlUtil.sql_cluster_decoder(sql)
                db_table = ''
                for k in args_sql.keys():
                    for t in args_sql[k]:
                        db_table += k + ":" + t + ","
                jdbc_args_model = JdbcArgsModel(streaming=False, reader_type=EtlUtil.JDBC_READER_TYPE, reader_sql=sql,
                                                reader_db_tables=db_table, reader_ip=datasource.ip,
                                                reader_port=datasource.port,
                                                reader_user=datasource.user_name,
                                                reader_password=datasource.password,
                                                reader_udfs=jdbc_model['udf'] if 'udf' in jdbc_model else [],
                                                reader_udafs=jdbc_model['udaf'] if 'udaf' in jdbc_model else [])

                args = args + jdbc_args_model.to_list_reader()
                format_udf_udaf_jars(jdbc_args_model.reader_udfs, jdbc_args_model.reader_udafs, jars, path_env, self.id,
                                     self.name)
                jars.add(path_env.jdbc_jar_path)
            elif reader_model['type'] == EtlUtil.SOURCE_TYPE_KAFKA:
                kafka_model = reader_model['kafkaModel']
                datasource_id = kafka_model['datasourceId']
                datasource = Datasource.get_datasource(datasource_id)
                servers = format_server(datasource)
                kafka_args_model = KafkaArgsModel(streaming=True, reader_type=EtlUtil.KAFKA_READER_TYPE,
                                                reader_servers=servers,
                                                reader_topics=kafka_model['topics'],
                                                reader_field_types=field_types,
                                                reader_decoder_class=decoder_class,
                                                reader_max_bytes=kafka_model['maxBytes'],
                                                reader_max_offset_pertrigger=kafka_model['maxOffsetPertrigger'])

                args = args + kafka_args_model.to_list_reader()
                jars.add(path_env.kafka_jar_path)
        if writer_model_type:
            if writer_model['type'] == EtlUtil.SINK_TYPE_HIVE:
                hive_model = writer_model['hiveModel']
                writer_path = path_env.hive_writer_path + hive_model['database'] + ".db/" + hive_model['table']
                hive_args_model = HiveArgsModel(writer_type=EtlUtil.HIVE_WRITER_TYPE, writer_path=writer_path,
                                                writer_partition_by=hive_model['partitionBy'],
                                                writer_repartitions=hive_model['repartitions'],
                                                writer_format=hive_model['format'])
                # todo 清空表或者覆盖表
                args = args + hive_args_model.to_list_writer()
                jars.add(path_env.hive_jar_path)
            elif writer_model['type'] == EtlUtil.SINK_TYPE_JDBC:
                jdbc_model = writer_model['jdbcModel']
                datasource_id = jdbc_model['datasourceId']
                datasource = Datasource.get_datasource(datasource_id)
                jdbc_args_model = JdbcArgsModel(writer_type=EtlUtil.JDBC_WRITER_TYPE,
                                                writer_clean_table=jdbc_model['cleanTable'], writer_ip=datasource.ip,
                                                writer_port=datasource.port, writer_db=jdbc_model['database'],
                                                writer_table=jdbc_model['table'], writer_user=datasource.user_name,
                                                writer_password=datasource.password)
                args = args + jdbc_args_model.to_list_writer()
                jars.add(path_env.jdbc_jar_path)
            elif writer_model['type'] == EtlUtil.SINK_TYPE_KAFKA:
                kafka_model = writer_model['kafkaModel']
                datasource_id = kafka_model['datasourceId']
                datasource = Datasource.get_datasource(datasource_id)
                servers = format_server(datasource)
                kafka_args_model = KafkaArgsModel(streaming=False,
                                                  writer_type=EtlUtil.KAFKA_WRITER_TYPE,
                                                  writer_servers=servers,
                                                  writer_topic=kafka_model['topic'],
                                                  writer_acks=kafka_model['acks']
                                                  )

                args = args + kafka_args_model.to_list_writer()
                jars.add(path_env.kafka_jar_path)
            elif writer_model['type'] == EtlUtil.SINK_TYPE_DORIS:
                doris_model = writer_model['dorisModel']
                datasource_id = doris_model['datasourceId']
                datasource = Datasource.get_datasource(datasource_id)
                fenodes = format_server(datasource)
                doris_args_model = DorisModel(streaming=False,
                                                  writer_type=EtlUtil.DORIS_WRITER_TYPE,
                                                  writer_fenodes=fenodes,
                                                  writer_table=doris_model['table'],
                                                  writer_user=datasource.user_name,
                                                  writer_password=datasource.password
                                                  )

                args = args + doris_args_model.to_list_writer()
                jars.add(path_env.doris_jar_path)
            elif writer_model['type'] == EtlUtil.SINK_TYPE_ES:
                es_model = writer_model['esModel']
                datasource_id = es_model['datasourceId']
                datasource = Datasource.get_datasource(datasource_id)
                es_args_model = EsModel(streaming=False,
                                              writer_type=EtlUtil.ES_WRITER_TYPE,
                                              nodes=datasource.ip,
                                              port=datasource.port,
                                              user=datasource.user_name,
                                              password=datasource.password,
                                              writer_index=es_model['index'],
                                              writer_index_type=es_model['index_type'],
                                              )
                args = args + es_args_model.to_list_writer()
                jars.add(path_env.es_jar_path)

        if is_streaming:
            ck_path = path_env.checkpoint_path + "/" + str(self.id)
            args.append("-writer.checkpointLocation=" + ck_path)
            args.append("-writer.triggerPeriod=60")
        local_jars = set()
        fs = []
        if self.files:
            fs = self.files.split(',')
            for f in fs:
                if "/" in f:
                    local_jars.add(f.split("/")[-1])
                else:
                    local_jars.add(f)
        jars = set.union(jars, local_jars)
        jar_conf = ','.join(jars)
        conf_param = {'spark.executor.extraJavaOptions': '-Dfile.encoding=utf-8',
                      'spark.driver.extraJavaOptions': '-Dfile.encoding=utf-8',
                      'spark.jars': jar_conf}
        spark_conf = spark_args.get("sparkConf", None)
        if spark_conf:
            for s_conf in spark_conf:
                key = str(s_conf.get("key", None))
                value = str(s_conf.get("value", None))
                if key and value and key.strip() != "" and value.strip() != "":
                    conf_param[key] = value
        task_name = self.name
        if isinstance(self.name, tuple):
            task_name = str(self.name[0])

        args.append("-metrics.kafka.bootstrap.servers=" + str(METRICS_KAFKA_SERVERS))
        args.append("-metrics.topics=" + str(METRICS_KAFKA_TOPIC))
        args.append("-metrics.metricsName=" + task_name + "_" + str(self.id))
        args.append("-warn.taskId=" + str(self.id))
        args.append("-warn.taskName=" + task_name)
        project = Project.get_project(self.project_id)
        args.append("-warn.projectName=" + project.project_name)
        args.append("-warn.callBackUrl=" + str(WARN_CALLBACK_URL))

        params['remote_files'] = fs
        params['application_args'] = args
        params['application'] = path_env.temporary_file_path
        params['executor_cores'] = spark_args['executorCores']
        params['executor_memory'] = spark_args['executorMemory']
        params['driver_memory'] = spark_args['driverMemory']
        # params['driver_cores'] = spark_args['driverCores']
        params['num_executors'] = spark_args['numExecutors']
        params['conf'] = conf_param
        params['name'] = self.name
        params['task_id'] = 'task-{}'.format(self.id)
        params['dag'] = dag
        params['spark_binary'] = path_env.binary_path
        params['java_class'] = EtlUtil.CLASS_NAME
        params['queue'] = self.pool
        params['pool'] = self.pool
        if self.owner:
            params['proxy_user'] = self.owner

        return params


def format_udf_udaf_jars(udfs, udafs, jars, path_env, task_id, name):
    if udfs:
        for udf in udfs:
            jars.add(path_env.decoder_path + str(task_id) + "_" + name + "/udf/" + udf['fileName'])
    if udafs:
        for udaf in udafs:
            jars.add(path_env.decoder_path + str(task_id) + "_" + name + "/udaf/" + udaf['fileName'])


def format_server(datasource: Datasource):
    port = datasource.port
    ips = datasource.ip.split(",")
    ips_len = len(ips)
    servers = ""
    for index in range(ips_len):
        if index == ips_len - 1:
            servers += ips[index] + ":" + str(port)
        else:
            servers += ips[index] + ":" + str(port) + ","
    return servers




if __name__ == '__main__':
    # datasource = Datasource(name="das", type=1, ip="10.12.6.10,10.12.6.13", port=9092, user_name="user_name", password="password", oa_user_name="oa_user_name", create_time=datetime.now())
    # format_server(datasource)
    # s = ".2/dsadsa.d"
    # print(s.split("/")[-1])
    task = Task()
    task.operator = OperatorType.FLINK
    task.id = 1
    task.content = "-jc 1\n-tc 1\n-jm 1G\n-tm 1G\n-ts 2\n-p 2\n-kd faf=1\n-kd sss=2\n-c com.patsnap.data.patent.dw.etl.job.legal.LegalDwsAppealJob\ndata-patent-dw-etl-stream-1.0.0-SNAPSHOT-with-dependencies.jar\n--apollo.app.cluster na-ashburn\n--apollo.app.profile data.legal_ods_trial_appeal\n--apollo.app.id s-data-patent-dw-pipeline\n--source_count_per_sec 500\n--checkpoint_timeout_sec 60"
    params = task.parse_task_args()
    fs = []
    if task.files:
        fs = task.files.split(',')

    params['task_id'] = 'task-{}'.format(task.id)
    params['remote_files'] = fs
    params['flink_binary'] = '/usr/hdp/3.1.5.0-152/flink-1.13.6/bin/flink'

    if 'class' in params:
        params['java_class'] = params['class']
        params.pop('class')
    params['namespace'] = 'NAMESPACE'

    task = flink_submit.FlinkSubmitOperator(**params)
    task.get_flink_submit_command()
    print(task)

