# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import re

import pymysql
import pytz
import subprocess
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Variable
from datetime import datetime, timedelta
import jinja2
from datetime import datetime, timedelta
from  utils.macros import *

def sql_parse(data1,execution_date):
    execution_date = execution_date.split('+')[0]
    original_time = datetime.strptime(execution_date, '%Y-%m-%dT%H:%M:%S')
    execution_date = original_time + timedelta(hours=8)
    env = jinja2.Environment()
    env.filters = macros_dict
    if len(data1) > 10:
        render_string = env.from_string(data1).render(execution_date=execution_date)
    else:
        render_string = env.from_string(data1).render(execution_date=execution_date)
    return render_string

#taskid=['jms_dwd__dwd_barscan_arrival_dt']
import re
import subprocess
dir='taskid/task.txt'
def read_hdfs_file(file_path):
    command = ["hadoop", "fs", "-cat", file_path]
    try:
        output = subprocess.check_output(command)
        content = output.decode("utf-8")
    except subprocess.CalledProcessError as e:
        print("Error executing Hadoop command: {}".format(e))
        return
    return content
filedir="/user/hive/"+dir
sql=read_hdfs_file(filedir)
taskid=sql.splitlines()
taskid=['jms_dwd__dwd_barscan_arrival_dt']
class SparkSqlHook(BaseHook):
    """
    This hook is a wrapper around the spark-sql binary. It requires that the
    "spark-sql" binary is in the PATH.
    参考 airflow.contrib.operators.spark_sql_operator.SparkSqlHook 进行二次开发:
    - conf 参数由 string 改为 dict，与 SparkSubmitOperator 保持一致
    - 添加 hiveconf 参数
    - 添加 driver_cores 参数
    - 添加 driver_memory 参数
    - 解决了原生 Operator 会在日志中打印大量空行的问题

    :param sql: The SQL query to execute
    :type sql: str
    :param conf: Arbitrary Spark configuration properties
    :type conf: dict
    :param hiveconf: Arbitrary Hive configuration properties
    :type hiveconf: dict
    :param conn_id: connection_id string
    :type conn_id: str
    :param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
        (Default: all the available cores on the worker)
    :type total_executor_cores: int
    :param executor_cores: (Standalone & YARN only) Number of cores per
        executor (Default: 2)
    :type executor_cores: int
    :param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
    :type executor_memory: str
    :param driver_cores: CPU cores Number of driver
    :type driver_cores: int
    :param driver_memory: Memory of driver
    :type driver_memory: str
    :param keytab: Full path to the file that contains the keytab
    :type keytab: str
    :param master: spark://host:port, mesos://host:port, yarn, or local
    :type master: str
    :param name: Name of the job.
    :type name: str
    :param num_executors: Number of executors to launch
    :type num_executors: int
    :param verbose: Whether to pass the verbose flag to spark-sql
    :type verbose: bool
    :param yarn_queue: The YARN queue to submit to (Default: "default")
    :type yarn_queue: str
    """

    def __init__(self,
                 sql,
                 conf=None,
                 hiveconf=None,
                 conn_id='spark_default',
                 total_executor_cores=None,
                 executor_cores=None,
                 executor_memory=None,
                 driver_cores=None,
                 driver_memory=None,
                 keytab=None,
                 principal=None,
                 master='yarn',
                 name='default-name',
                 num_executors=None,
                 verbose=True,
                 yarn_queue='default',
                 exec_date=None,
                 task_instance_key_str=None
                 ):
        self._sql = sql
        self._conf = conf or {}
        self._hiveconf = hiveconf or {}
        self._conn = self.get_connection(conn_id)
        self._total_executor_cores = total_executor_cores
        self._executor_cores = executor_cores
        self._executor_memory = executor_memory
        self._driver_cores = driver_cores
        self._driver_memory = driver_memory
        self._keytab = keytab
        self._principal = principal
        self._master = master
        self._name = name
        self._exec_date = exec_date
        self._task_instance_key_str = task_instance_key_str
        self._num_executors = num_executors
        self._verbose = verbose
        self._yarn_queue = yarn_queue
        self._sp = None
        self.log.info("log_##_exec_date" + self._exec_date)
        self.log.info("log_##task_instance_key_str" + self._task_instance_key_str)

    def get_conn(self):
        pass

    def collectSqlDetails(self, sql_parse_name, sql_parse_detail):
        airflow_customize_config = json.loads(Variable.get(key='airflow_tianxiang_config_uat', default_var=''))
        db = pymysql.connect(host=airflow_customize_config.get("host"),
                             user=airflow_customize_config.get("username"),
                             password=airflow_customize_config.get("password"),
                             port=3306,  # 端口
                             database=airflow_customize_config.get("db"),
                             charset='utf8')
        cursor = db.cursor()
        # 注意事项：1、sql_name
        sql = """insert into bgdm.metadata_sql_parser (sql_name, sql_detail, is_delete) values ("{}","{}","1") on duplicate key update sql_name  = values(sql_name),sql_detail = values(sql_detail),is_delete = values(is_delete)""".format(
            sql_parse_name[:-11], sql_parse_detail.replace('"', "'"))
        self.log.info(sql)
        try:
            cursor.execute(sql)
            self.log.info("collect sql successfully ~ ")
            db.commit()
        except Exception as err:
            self.log.error("collect sql failed ,err: {}".format(err))
            db.rollback()
        cursor.close()
        db.close()

    def _prepare_command(self, cmd):
        """
        Construct the spark-sql command to execute. Verbose output is enabled
        as default.

        :param cmd: command to append to the spark-sql command
        :type cmd: str or list[str]
        :return: full command to be executed
        """
        connection_cmd = ["spark-submit"]
        sql_parse_name = ''
        sql_parse_detail = ''
        if self._conf:
            for key in self._conf:
                connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
            connection_cmd += ["--conf", "spark.yarn.appMasterEnv.PYSPARK_PYTHON=/usr/local/python3.7.6/bin/python3"]
            connection_cmd += ["--conf", "spark.pyspark.driver.python=/usr/local/python3.7.6/bin/python3"]
            connection_cmd += ["--conf", "spark.pyspark.python=python3env/usr/local/python3.7.6/bin/python3"]
            #connection_cmd += ["--conf", "spark.yarn.dist.jars=hdfs://yl-bg-hdp:8020/user/hive/lineage-spark.jar"]
            connection_cmd += ["--py-files", "hdfs://yl-bg-hdp:8020/user/hive/jinja2.zip,hdfs://yl-bg-hdp:8020/user/hive/markupsafe.zip"]
            result = str(self._task_instance_key_str).split("__")
            begin = result[0]
            end = result[-1]
            s11 = str(self._task_instance_key_str).replace(begin + '__', '')
            s12 = s11.replace('__' + end, '')
            connection_cmd += ["--conf", "spark.taskid="+begin+"+"+s12]
        if self._hiveconf:
            for key in self._hiveconf:
                connection_cmd += ["--conf", "spark.hadoop.{}={}".format(key, str(self._hiveconf[key]))]

        if self._total_executor_cores:
            connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
        if self._executor_cores:
            connection_cmd += ["--executor-cores", str(self._executor_cores)]
        if self._executor_memory:
            connection_cmd += ["--executor-memory", self._executor_memory]
        if self._driver_cores:
            connection_cmd += ["--driver-cores", str(self._driver_cores)]
        if self._driver_memory:
            connection_cmd += ["--driver-memory", self._driver_memory]
        if self._keytab:
            connection_cmd += ["--keytab", self._keytab]
        if self._principal:
            connection_cmd += ["--principal", self._principal]
        if self._num_executors:
            connection_cmd += ["--num-executors", str(self._num_executors)]
        if self._master:
            connection_cmd += ["--master", self._master]
        if self._name:
            connection_cmd += ["--name", self._name]
            sql_parse_name = self._name
            print("nameNNNNNN:" + sql_parse_name)
            self.log.info("nameNNNNNN:" + sql_parse_name)
        if self._verbose:
            connection_cmd += ["--verbose"]
        if self._yarn_queue:
            connection_cmd += ["--queue", self._yarn_queue]
            for i in taskid:
                if i  in  str(self._task_instance_key_str):
                    connection_cmd += ["--conf", "spark.reducer.maxSizeInFlight=96m"]
                    connection_cmd += ["--conf", "spark.shuffle.file.buffer=64k"]
        if self._sql:
            connection_cmd += ["--deploy-mode", "cluster"]
            connection_cmd += ["hdfs://yl-bg-hdp:8020/user/hive/sparklib/SparkSubmitYarnClusterForSqlFilePro.py", str(self._sql)]
            connection_cmd += [str(self._name)]
            connection_cmd += [str(self._exec_date)]

            filedir = "/user/hive/" + str(self._sql)
            command = ["hadoop", "fs", "-cat", filedir]

            sql_string = subprocess.check_output(command,encoding='utf-8', universal_newlines=True)
            sql_query_str = sql_parse(str(sql_string),str(self._exec_date))
            self.log.info("Your sql is : %s", "\n"+sql_query_str+"\n")

        if isinstance(cmd, str):
            connection_cmd += cmd.split()
        elif isinstance(cmd, list):
            connection_cmd += cmd
        else:
            raise AirflowException("Invalid additional command: {}".format(cmd))

        self.log.info("Spark-Submit cmd: %s", connection_cmd)
        try:
            self.collectSqlDetails(sql_parse_name, sql_parse_detail)
        except Exception as err:
            self.log.error("collectSqlDetails failed , {}".format(err))
        return connection_cmd

    def run_query(self, cmd="", **kwargs):
        """
        Remote Popen (actually execute the Spark-sql query)

        :param cmd: command to append to the spark-sql command
        :type cmd: str or list[str]
        :param kwargs: extra arguments to Popen (see subprocess.Popen)
        :type kwargs: dict
        """
        spark_sql_cmd = self._prepare_command(cmd)
        print("cmd:" + " ".join(spark_sql_cmd))
        self._sp = subprocess.Popen(spark_sql_cmd,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT,
                                    bufsize=-1,
                                    universal_newlines=True,
                                    **kwargs)

        # yarn_app_pattern = re.compile(r"application_\d{13}_\d{6}")
        yarn_app_pattern = re.compile(r"application_\d{13}_\d+")
        global application_id
        for line in iter(self._sp.stdout.readline, ''):
            self.log.info(line.strip())
            match = yarn_app_pattern.search(line)
            if match:
                application_id = match.group(0)

        returncode = self._sp.wait()

        if returncode:
            raise AirflowException(
                "Cannot execute {} on {}. Process exit code: {}.".format(
                    cmd, self._conn.host, returncode
                )
            )

    def kill(self):
        if self._sp and self._sp.poll() is None:
            self.log.info("Killing the Spark-submit job")
            self._sp.kill()
            if application_id:
                self.log.info("Killing the YARN application: %s", application_id)
                yarn_kill_cmd = ["yarn", "application", "-kill", application_id]
                yarn_kill_sp = subprocess.Popen(yarn_kill_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
                yarn_kill_sp.wait()
            else:
                self.log.warning("YARN ApplicationID not found. Unable to kill the YARN application.")

