import random
import re
from datetime import datetime, timedelta

from sqlalchemy import Column, Integer, String, Index, JSON, Boolean, TEXT
from sqlalchemy.orm.session import Session

from airflow import DAG
from airflow.patsnap.service.data_factory import HiveArgsModel, JdbcArgsModel
from airflow.patsnap.util.etl_util import EtlUtil
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models.base import Base
from airflow.models.idata_datasource import Datasource
from airflow.models.idata_env import Env
from airflow.operators import spark_submit, flink_submit_bak, logstash, dag_run, flink_submit
from airflow.operators.bash import BashOperator
from airflow.operators.doris_load import DorisLoadOperator
from airflow.utils import dates
from airflow.utils.session import provide_session

K8S_MASTER = conf.get('kubernetes', 'master')
K8S_SPARK_BASE_IMAGE = conf.get('kubernetes', 'image')
K8S_SERVICEACCOUNTNAME = conf.get('kubernetes', 'service_account_name')
K8S_FILE_UPLOAD_PATH = conf.get('kubernetes', 'upload_path')
K8S_S3_ENDPOINT = conf.get('kubernetes', 's3_endpoint')
K8S_S3_ACCESS_KEY = conf.get('kubernetes', 's3_access_key')
K8S_S3_SECRET_KEY = conf.get('kubernetes', 's3_secret_key')
K8S_S3_IMPL = conf.get('kubernetes', 's3_impl')
K8S_COSN_SECRETID = conf.get('kubernetes', 'cosn_secret_id')
K8S_COSN_SECRETKEY = conf.get('kubernetes', 'cosn_secret_key')
K8S_COSN_IMPL = conf.get('kubernetes', 'cosn_impl')
K8S_COSN_CREDENTIALS_PROVIDER = conf.get('kubernetes', 'cosn_credentials_provider')
K8S_COSN_BUCKET_ENDPOINT_SUFFIX = conf.get('kubernetes', 'cosn_bucket_endpoint_suffix')
K8S_COSN_BUCKET_REGION = conf.get('kubernetes', 'cosn_bucket_region')
K8S_OFS_USER_APPID = conf.get('kubernetes', 'ofs_user_appid')
K8S_OFS_TMP_CACHE_DIR = conf.get('kubernetes', 'ofs_tmp_cache_dir')



NAMESPACE = conf.get('kubernetes', 'work_namespace')

class OperatorType:
    SHELL = 'SHELL'
    SPARK = "SPARK"
    FLINK = "FLINK"
    LOGSTASH = "LOGSTASH"
    DATAFACTORY = "DATAFACTORY"
    DAGRUN = "DAGRUN"
    DORIS_LOAD = "DORIS_LOAD"


class Task(Base):
    """the class to get Task info."""

    __tablename__ = "idata_task"

    id = Column(Integer, primary_key=True)
    project_id = Column(Integer)
    folder_id = Column(Integer)
    name = Column(String(64))
    operator = Column(String(32))
    content = Column(TEXT)
    param = Column(JSON)
    env = Column(JSON)
    files = Column(String)
    pool = Column(String(64))
    owner = Column(String(64))
    email_alert = Column(Boolean, default=True)
    email = Column(String(256))
    version = Column(String(32))
    retries = Column(Integer, default=0)
    task_concurrency = Column(Integer, default=8)
    execution_timeout = Column(Integer)
    type = Column(Integer, default=0)
    blood = Column(JSON)

    __table_args__ = (
        Index('project_id_name', project_id, name),
        Index('project_id', project_id),
        Index('folder_id', folder_id),
    )

    def parse_task_args(self):
        if self.operator == OperatorType.SPARK:
            self.content.replace('\n', ' ')
            rs = filter(lambda x: not x.startswith('#'), re.split(r'\s+', self.content))
            data = spark_submit.parse_args(rs)
            return data
        elif self.operator == OperatorType.FLINK:
            self.content.replace('\n', ' ')
            rs = filter(lambda x: not x.startswith('#'), re.split(r'\s+', self.content))
            data = flink_submit.parse_args(rs)
            return data

    def get_task_command(self):
        if self.operator == OperatorType.SPARK:
            task = self.create_operator()
            return task.get_spark_submit_command()
        elif self.operator == OperatorType.SHELL:
            return self.content
        elif self.operator == OperatorType.FLINK:
            task = self.create_operator()
            return task.get_flink_submit_command()

    @provide_session
    def insert(self, session: Session = None):
        session.add(self)

    @provide_session
    def update(self, session: Session = None):
        session.merge(self)

    @staticmethod
    @provide_session
    def update_blood(task_id, blood, session: Session = None):
        p = session.query(Task).filter(Task.id == task_id)
        if p:
            p.update({"blood": blood})

    @staticmethod
    @provide_session
    def get_task(task_id, session: Session = None):
        """
        Get the Task with specific task id.

        :param task_id: The task id of the Task to get.
        :param session: SQLAlchemy ORM Session
        :return: the Task object
        """
        return session.query(Task).filter(Task.id == task_id).first()

    @staticmethod
    @provide_session
    def query_task(project_id=None, folder_id=None, name=None, session: Session = None):
        '''
        :param project_id:
        :param folder_id:
        :param name:
        :param session:
        :return:
        '''
        qry = session.query(Task)
        if project_id:
            qry = qry.filter(Task.project_id == project_id)
        if folder_id:
            qry = qry.filter(Task.folder_id == folder_id)
        if name:
            qry = qry.filter(Task.name.like('%' + name + '%'))
        return qry.all()

    def to_json(self):
        """
        Get the Task in a json structure

        :return: the task object in json format
        """
        data = {
            'id': self.id,
            'project_id': self.project_id,
            'name': self.name,
            'operator': self.operator,
            'content': self.content,
            'param': self.param,
            'env': self.env,
            'files': self.files,
            'pool': self.pool,
            'owner': self.owner,
            'email_alert': self.email_alert,
            'retries': self.retries,
            'task_concurrency': self.task_concurrency,
            'execution_timeout': self.execution_timeout,
            'type': self.type,
            'blood': self.blood,
            'email': self.email
        }
        return data

    def create_operator(self, dag=None):
        '''
        convert task to airflow Operator
        :return:
        '''

        if not self.id:
            raise AirflowException('invalid task Id')

        if dag is None:
            dag = DAG(
                'dag-task-{}'.format(self.id),
                default_args={
                    'owner': 'airflow',
                    'depends_on_past': False,
                },
                start_date=datetime.now(),
                description='A random tmp DAG for airflow task',
            )

        if self.operator == OperatorType.SHELL:
            task = BashOperator(task_id='task-{}'.format(self.id),
                                bash_command=self.content,
                                owner=self.owner,
                                dag=dag,
                                retries=self.retries)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@7.com'
            if self.files:
                task.files = self.files.split(',')
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)

            task.params = self.param
            task.env = self.env
            return task

        if self.operator == OperatorType.DORIS_LOAD:
            task = DorisLoadOperator(task_id='task-{}'.format(self.id),
                                     task_name=self.name,
                                     content=self.content,
                                     owner=self.owner,
                                     dag=dag,
                                     retries=self.retries,
                                     pool=self.pool,
                                     queue=self.pool)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@7.com'
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            task.params = self.param
            task.env = self.env
            return task

        elif self.operator == OperatorType.SPARK:
            params = self.parse_task_args()
            fs = []
            if self.files:
                fs = self.files.split(',')

            if not 'name' in params:
                params['name'] = self.name

            params['task_id'] = 'task-{}'.format(self.id)
            params['dag'] = dag
            params['remote_files'] = fs
            params['spark_binary'] = '/usr/hdp/3.1.5.0-152/spark3/bin/spark-submit' if self.version == '3' \
                else '/usr/hdp/3.1.5.0-152/spark2/bin/spark-submit'

            if self.param:
                if 'conf' not in params:
                    params['conf'] = dict()
                if 'spark.kubernetes.container.image' not in params['conf']:
                    params['conf']['spark.kubernetes.container.image'] = K8S_SPARK_BASE_IMAGE
                if 'spark.kubernetes.authenticate.driver.serviceAccountName' not in params['conf']:
                    params['conf']['spark.kubernetes.authenticate.driver.serviceAccountName'] = K8S_SERVICEACCOUNTNAME
                if 'spark.kubernetes.file.upload.path' not in params['conf']:
                    params['conf']['spark.kubernetes.file.upload.path'] = K8S_FILE_UPLOAD_PATH
                if 'spark.hadoop.fs.s3a.endpoint' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.endpoint'] = K8S_S3_ENDPOINT
                if 'spark.hadoop.fs.s3a.impl' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.impl'] = K8S_S3_IMPL
                if 'spark.hadoop.fs.s3a.fast.upload' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.fast.upload'] = 'true'
                if 'spark.hadoop.fs.s3a.access.key' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.access.key'] = K8S_S3_ACCESS_KEY
                if 'spark.hadoop.fs.s3a.secret.key' not in params['conf']:
                    params['conf']['spark.hadoop.fs.s3a.secret.key'] = K8S_S3_SECRET_KEY

                if 'spark.hadoop.fs.ofs.user.appid' not in params['conf']:
                    params['conf']['spark.hadoop.fs.ofs.user.appid'] = K8S_OFS_USER_APPID
                if 'spark.hadoop.fs.ofs.tmp.cache.dir' not in params['conf']:
                    params['conf']['spark.hadoop.fs.ofs.tmp.cache.dir'] = K8S_OFS_TMP_CACHE_DIR
                if 'spark.hadoop.fs.cosn.credentials.provider' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.credentials.provider'] = K8S_COSN_CREDENTIALS_PROVIDER
                if 'spark.hadoop.fs.cosn.impl' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.impl'] = K8S_COSN_IMPL
                if 'spark.hadoop.fs.cosn.bucket.region' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.bucket.region'] = K8S_COSN_BUCKET_REGION
                if 'spark.hadoop.fs.cosn.bucket.endpoint_suffix' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.bucket.endpoint_suffix'] = K8S_COSN_BUCKET_ENDPOINT_SUFFIX
                if 'spark.hadoop.fs.cosn.userinfo.secretId' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.userinfo.secretId'] = K8S_COSN_SECRETID
                if 'spark.hadoop.fs.cosn.userinfo.secretKey' not in params['conf']:
                    params['conf']['spark.hadoop.fs.cosn.userinfo.secretKey'] = K8S_COSN_SECRETKEY
                if 'spark.hadoop.fs.AbstractFileSystem.cosn.impl' not in params['conf']:
                    params['conf']['spark.hadoop.fs.AbstractFileSystem.cosn.impl'] = 'org.apache.hadoop.fs.CosN'

                params['conf']['spark.eventLog.enabled'] = True
                params['conf']['spark.history.fs.logDirectory'] = 'file:///data/share_log/spark_history_server/'
                params['conf']['spark.eventLog.dir'] = 'file:///data/share_log/spark_history_server/'

            if 'class' in params:
                params['java_class'] = params['class']
                params.pop('class')

            params['queue'] = self.pool
            params['pool'] = self.pool
            if self.owner:
                params['proxy_user'] = self.owner
            params['namespace'] = NAMESPACE

            task = spark_submit.SparkSubmitOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@7.com'

            return task

        elif self.operator == OperatorType.FLINK:
            params = self.parse_task_args()
            fs = []
            if self.files:
                fs = self.files.split(',')

            params['task_id'] = 'task-{}'.format(self.id)
            params['dag'] = dag
            params['remote_files'] = fs
            params['flink_binary'] = '/usr/hdp/3.1.5.0-152/flink-1.13.6/bin/flink'

            if 'class' in params:
                params['java_class'] = params['class']
                params.pop('class')
            params['pool'] = self.pool
            params['queue'] = self.pool
            if self.owner:
                params['proxy_user'] = self.owner
            params['namespace'] = NAMESPACE

            task = flink_submit.FlinkSubmitOperator(**params)
            if self.retries:
                task._flink_retries = self.retries
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@4.com'

            return task

        elif self.operator == OperatorType.LOGSTASH:
            params = dict()
            params['task_id'] = 'task-{}'.format(self.id)
            params['dag'] = dag
            params['logstash_binary'] = '/opt/logstash/bin/logstash'
            params['content'] = self.content
            task = logstash.LogstashOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@4.com'
            return task

        elif self.operator == OperatorType.DATAFACTORY:
            params = self.foramt_args(dag)
            task = spark_submit.SparkSubmitOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@4.com'
            return task

        elif self.operator == OperatorType.DAGRUN:
            params = dict()
            task_param = self.param
            workflow_id = task_param.get("workflow_id", None)
            params['task_id'] = 'task-{}'.format(self.id)
            params['dag'] = dag
            params['pool'] = self.pool
            params['queue'] = self.pool
            params['workflow_id'] = workflow_id

            task = dag_run.DagRunOperator(**params)
            if self.retries:
                task.retries = self.retries
            if self.execution_timeout:
                task.execution_timeout = timedelta(seconds=self.execution_timeout)
            if self.email_alert:
                if self.email:
                    task.email_on_failure = True
                    task.email = self.email
                elif self.owner:
                    task.email_on_failure = True
                    task.email = self.owner + '@4.com'
            return task

    def foramt_args(self, dag):
        task_param = self.param
        env_id = task_param['envId']
        path_env = Env.get_env(env_id)
        is_monitor = task_param['isMetricsMonitor']
        is_streaming = task_param['isStreaming']
        source_model = task_param['sourceModel']
        sink_model = task_param['sinkModel']
        spark_args = task_param['sparkArgs']
        args = ["-isMetricsMonitor=" + str(is_monitor)]
        params = {}
        jars = set()
        jars.add(path_env.etl_path)
        jars.add(path_env.common_jar_path)
        if source_model['type'] == EtlUtil.SOURCE_TYPE_HIVE:
            hive_model = source_model['hiveModel']
            datasource_id = hive_model['datasourceId']
            datasource = Datasource.get_datasource(datasource_id)
            sql = hive_model['sql']
            args_sql = EtlUtil.sql_cluster_decoder(sql)
            db_table = ''
            for k in args_sql.keys():
                for t in args_sql[k]:
                    db_table += k + ":" + t + ","

            hive_args_model = HiveArgsModel(streaming=False, reader_type=EtlUtil.HIVE_READER_TYPE, reader_sql=sql,
                                            reader_db_tables=db_table, reader_udfs=hive_model['udf'] if 'udf' in hive_model else [],
                                            reader_udafs=hive_model['udaf'] if 'udaf' in hive_model else [])
            args = args + hive_args_model.to_list_reader()
            format_udf_udaf_jars(hive_args_model.reader_udfs, hive_args_model.reader_udafs, jars, path_env, self.id, self.name)
            jars.add(path_env.hive_jar_path)
        elif source_model['type'] == EtlUtil.SOURCE_TYPE_JDBC:
            jdbc_model = source_model['jdbcModel']
            datasource_id = jdbc_model['datasourceId']
            datasource = Datasource.get_datasource(datasource_id)
            sql = jdbc_model['sql']
            args_sql = EtlUtil.sql_cluster_decoder(sql)
            db_table = ''
            for k in args_sql.keys():
                for t in args_sql[k]:
                    db_table += k + ":" + t + ","
            jdbc_args_model = JdbcArgsModel(streaming=False, reader_type=EtlUtil.JDBC_READER_TYPE, reader_sql=sql,
                                            reader_db_tables=db_table, reader_ip=datasource.ip, reader_port=datasource.port,
                                            reader_user=datasource.user_name,
                                            reader_password=datasource.password, reader_udfs=jdbc_model['udf'] if 'udf' in jdbc_model else [],
                                            reader_udafs=jdbc_model['udaf'] if 'udaf' in jdbc_model else [])

            args = args + jdbc_args_model.to_list_reader()
            format_udf_udaf_jars(jdbc_args_model.reader_udfs, jdbc_args_model.reader_udafs, jars, path_env, self.id, self.name)
            jars.add(path_env.jdbc_jar_path)

        if sink_model['type'] == EtlUtil.SINK_TYPE_HIVE:
            hive_model = sink_model['hiveModel']
            writer_path = EtlUtil.HIVE_WRITER_PATH + hive_model['database'] + ".db/" + hive_model['table']
            hive_args_model = HiveArgsModel(writer_type=EtlUtil.HIVE_WRITER_TYPE, writer_path=writer_path,
                                            writer_partition_by=hive_model['partitionBy'],
                                            writer_repartitions=hive_model['repartitions'],
                                            writer_format=hive_model['format'])
            args = args + hive_args_model.to_list_writer()
            jars.add(path_env.hive_jar_path)
        elif sink_model['type'] == EtlUtil.SINK_TYPE_JDBC:
            jdbc_model = sink_model['jdbcModel']
            datasource_id = jdbc_model['datasourceId']
            datasource = Datasource.get_datasource(datasource_id)
            jdbc_args_model = JdbcArgsModel(writer_type=EtlUtil.JDBC_WRITER_TYPE,
                                            writer_clean_table=jdbc_model['cleanTable'], writer_ip=datasource.ip,
                                            writer_port=datasource.port, writer_db=jdbc_model['database'],
                                            writer_table=jdbc_model['table'], writer_user=datasource.user_name,
                                            writer_password=datasource.password)
            args = args + jdbc_args_model.to_list_writer()
            jars.add(path_env.jdbc_jar_path)

        if is_streaming:
            ck_path = path_env.checkpoint_path + "/" + self.id
            args.append("-writer.checkpointLocation=" + ck_path)
            args.append("-writer.triggerPeriod=60")

        jar_conf = ','.join(jars)
        conf_param = {'spark.executor.extraJavaOptions': '-Dfile.encoding=utf-8',
                      'spark.driver.extraJavaOptions': '-Dfile.encoding=utf-8',
                      'spark.jars': jar_conf}

        params['application_args'] = args
        params['application'] = path_env.temporary_file_path
        params['executor_cores'] = spark_args['executorCores']
        params['executor_memory'] = spark_args['executorMemory']
        params['driver_memory'] = spark_args['driverMemory']
        # params['driver_cores'] = spark_args['driverCores']
        params['num_executors'] = spark_args['numExecutors']
        params['conf'] = conf_param
        params['name'] = self.name
        params['task_id'] = 'task-{}'.format(self.id)
        params['dag'] = dag
        params['spark_binary'] = path_env.binary_path
        params['java_class'] = EtlUtil.CLASS_NAME
        params['queue'] = self.pool
        params['pool'] = self.pool
        if self.owner:
            params['proxy_user'] = self.owner

        return params


def format_udf_udaf_jars(udfs, udafs, jars, path_env, task_id, name):
    if udfs:
        for udf in udfs:
            jars.add(path_env.decoder_path + str(task_id) + "_" + name + "/udf/" + udf['fileName'])
    if udafs:
        for udaf in udafs:
            jars.add(path_env.decoder_path + str(task_id) + "_" + name + "/udaf/" + udaf['fileName'])
