import datetime
import json
import os
import socket
import time
import traceback
from datetime import datetime, timedelta
from multiprocessing import Process
from threading import Thread

# import schedule
import pandas as pd
from dateutil.relativedelta import relativedelta

from air_web.config.config import config
from air_web.data_platform import mysql_con
from air_web.dw import gen_task_log as gtl
from air_web.dw import sql_form
from air_web.dw.data_mapping import TimeDimension
from air_web.dw.logger import init_log
from air_web.dw.parse_config import parse_config, proc_task

"""
修改了任务，直接加就行，is_remove代码来维护
建立主键，维护分布式冲突
0失败，1成功，2等待开始，3执行中，4重写
1生成成功，2等待开始,3增量任务
增加维度，应该让dws来决定跳过，避免所有维度重新计算
不然数据源怎么加载利用都是问题
alter_type应该只有一个，先增加维度，补齐历史数据，然后增量计算，写任务依赖
配置里可以支持一些参数传递
分区建表不用任务化，增加分区自动化就行
config时间双闭合
"""
try:
    if not os.path.exists("/home/zshield/logs/dw/"):
        os.mkdir("/home/zshield/logs/dw/")
    os.system("chmod -R 777 /home/zshield/logs/dw/")
except:
    pass
log = init_log("/home/zshield/logs/dw/")


class TaskSchedule:
    def __init__(self):
        self.compared_task = []
        self.depend_log = []
        self.mode = config["dw_execute_mode"]
        self.gen_once = False
        self.only_execute_task = config["dw_only_execute_task"]
        self.gen_increment = False

    def get_tasks(self, is_remove=None, task_name=None, task_status=None):
        where_sql = ""
        where_sql_list = []
        value = []
        if is_remove is not None:
            where_sql_list.append(" is_remove = %s ")
            value.append(is_remove)
        if task_name is not None:
            where_sql_list.append(" task_name = %s ")
            value.append(task_name)
        if task_status is not None:
            where_sql_list.append(" task_status = %s ")
            value.append(task_status)

        if len(where_sql_list) >= 1:
            where_sql = " where " + " and ".join(where_sql_list)

        sql = (
            """select task_id, task_name, timedelta, config_text, depend_task, task_rank, alter_type, task_status, 
        is_remove, start_time, end_time, create_time, generated_time from task_config """
            + where_sql
            + " order by task_rank, create_time desc"
        )
        result = mysql_con.get(sql, value)
        return result

    def get_task_logs(
        self,
        task_status: list = None,
        start_time=None,
        end_time=None,
        task_id=None,
        task_name=None,
        node_name=None,
        now_time=None,
        alter_type=None,
    ):
        where_sql = ""
        where_sql_list = []
        value = []
        if task_status is not None:
            where_sql_list.append(" t1.task_status in %s ")
            value.append(tuple(task_status))
        if task_id is not None:
            where_sql_list.append(" t1.task_id = %s ")
            value.append(task_id)
        if task_name is not None:
            where_sql_list.append(" t1.task_name = %s ")
            value.append(task_name)
        if start_time is not None:
            where_sql_list.append(" t1.start_time >= %s ")
            value.append(start_time)
        if end_time is not None:
            where_sql_list.append(" t1.end_time <= %s ")
            value.append(end_time)
        if node_name is not None:
            where_sql_list.append(" t1.node_name = %s ")
            value.append(node_name)
        # if now_time is not None:
        #     where_sql_list.append(" t1.start_time <= %s and t1.end_time >= %s ")
        #     value.extend([now_time, now_time])
        if alter_type is not None:
            where_sql_list.append(" t2.alter_type = %s ")
            value.append(alter_type)

        if len(where_sql_list) >= 1:
            where_sql = " and " + " and ".join(where_sql_list)

        # 分布式会任务执行冲突
        sql = (
            """select t1.log_id, t1.task_id, t1.task_name, t1.task_status, t1.create_time, t1.pid, t1.node_name, 
        t1.start_time, t1.end_time, t1.is_remove, t2.config_text, t2.depend_task, t2.timedelta 
        from task_log t1 
        left join task_config t2 on t1.task_id = t2.task_id 
        where t1.is_remove = 0 and t2.is_remove = 0 """
            + where_sql
            + " order by t1.create_time, t2.task_rank"
        )
        result = mysql_con.get(sql, value)
        return result

    def judge_depend(self, start_time, end_time, depend_task: str):
        if depend_task is None:
            # 说明该任务不需要依赖
            return False
        # 依赖的多个任务，必须时间段相同
        gtl_res = []
        # 依赖多个任务
        for dtn in depend_task.split(","):
            tmp_gtl_res = self.get_task_logs(
                [0, 2, 3, 4],
                start_time=start_time,
                end_time=end_time,
                task_name=dtn,
            )
            gtl_res.extend(tmp_gtl_res)
        # 即使结果为空，但是有可能配置错误，这个时间段内，依赖的作业并未创建，导致判断失误
        # 因为时间段要补充，所以end_time无法对上
        self.depend_log = [i["log_id"] for i in gtl_res]

        return False if gtl_res == [] else True
        # if gtl_res == []:
        #     # 说明依赖任务生成失败
        #     log.info(f"依赖任务未生成:{depend_task},{start_time},{end_time}")
        #     return True
        # elif gtl_res['task_status'] in [1]:
        #     return True
        # else:
        #     return False

    def judge_task(self, task_id, start_time, end_time):
        result = self.get_task_logs(
            task_id=task_id, start_time=start_time, end_time=end_time
        )
        if result != []:
            return True
        return False

    def gen_end_time(self, start_time, task_timedelta):
        tag = task_timedelta.split(" ")[1]
        num = int(task_timedelta.split(" ")[0])
        start_time = pd.to_datetime(start_time)
        if tag == TimeDimension.MIN15:
            end = start_time + pd.Timedelta(minutes=num)
        elif tag == TimeDimension.HOUR:
            end = start_time + pd.Timedelta(hours=num)
        elif tag == TimeDimension.DAY:
            end = start_time + pd.Timedelta(days=num)
        elif tag == TimeDimension.WEEK:
            end = start_time + pd.Timedelta(weeks=num)
        elif tag == TimeDimension.MONTH:
            end = start_time + relativedelta(months=num)
        elif tag == TimeDimension.QUARTER:
            month = start_time.month + 3
            # 可能跨多个年,但是切割过的只会跨一个年
            if month > 12:
                end = start_time + relativedelta(years=1, month=1, day=1)
            else:
                end = start_time.replace(month=month)
        elif tag == TimeDimension.YEAR:
            end = start_time + relativedelta(years=num)
        else:
            raise PermissionError
        return str(end)

    def handle_start_time(self, start_time, task_timedelta):
        tag = task_timedelta.split(" ")[1]
        num = int(task_timedelta.split(" ")[0])
        start_time = pd.to_datetime(start_time)
        if tag == TimeDimension.MIN15:
            start_time = start_time.replace(second=0)
        elif tag == TimeDimension.HOUR:
            start_time = start_time.replace(second=0, minute=0)
        elif tag == TimeDimension.DAY:
            start_time = start_time.replace(second=0, minute=0, hour=0)
        elif tag == TimeDimension.WEEK:
            start_time = (
                start_time.replace(second=0, minute=0, hour=0)
                - relativedelta(weeks=1)
                + relativedelta(weekday=0)
            )
        elif tag == TimeDimension.MONTH:
            start_time = start_time.replace(second=0, minute=0, hour=0, day=1)
        elif tag == TimeDimension.QUARTER:
            month = ((start_time.month - 1) // 3) + 1
            start_time = start_time.replace(
                second=0, minute=0, hour=0, day=1, month=month
            )
        elif tag == TimeDimension.YEAR:
            start_time = start_time.replace(
                second=0, minute=0, hour=0, day=1, month=1
            )
        else:
            raise PermissionError
        return start_time

    def remove_other_task(self, task_id, task_name):
        sql = "update task_config set is_remove = 1 where is_remove = 0 and task_id != %s and task_name = %s"
        up_sql = "update task_log set is_remove = 1 where is_remove = 0 and task_id != %s and task_name = %s"
        sql_list = sql_form(sql, [task_id, task_name])
        sql_list.extend(sql_form(up_sql, [task_id, task_name]))
        result = mysql_con.execute(sql_list)
        if sum(result) > 0:
            log.info(f"移除旧任务。task_id:{task_id},task_name:{task_name}")

    def remove_task(self, task_id=None):
        if task_id is not None:
            sql = "update task_config set is_remove = 1 where is_remove = 0 and task_id = %s"
            up_sql = "update task_log set is_remove = 1 where is_remove = 0 and task_id = %s"
            sql_list = sql_form(sql, [task_id])
            sql_list.extend(sql_form(up_sql, [task_id]))
            result = mysql_con.execute(sql_list)
        else:
            sql = """update task_log set is_remove = 1 where is_remove = 0 and task_id in 
            (select task_id from task_config where alter_type = 'delete_task') """
            sql_cf = """update task_config set is_remove = 1 where is_remove = 0 and alter_type = 'delete_task' """
            sql_list = sql_form(sql)
            sql_list.extend(sql_form(sql_cf))
            result = mysql_con.execute(sql_list)

        if sum(result) > 0:
            log.info(f"删除任务和作业")

    def gen_rewrite_task_log(self, task_id, last_task_id, task_status=4):
        select_sql = """select log_id from task_log where task_id = %s and is_remove = 0"""
        select_value = [last_task_id]
        result = mysql_con.get(select_sql, select_value)
        if result == []:
            log.info(f"没有重写作业")
            return
        sql = """insert into task_log(task_id, task_name, task_status, start_time, end_time)
        select distinct %s task_id, task_name, %s task_status, start_time, end_time from task_log 
        where task_id = %s and is_remove = 0"""
        value = [task_id, task_status, last_task_id]
        update_sql = (
            """update task_log set is_remove = 1 where log_id in %s """
        )
        update_value = [tuple([i["log_id"] for i in result])]

        sql_list = sql_form(sql, value)
        sql_list.extend(sql_form(update_sql, update_value))
        mysql_con.execute(sql_list)
        log.info(
            f"重写旧task_id:{last_task_id}作业生成。新task_id:{task_id},log_id:{update_value}"
        )

    def unlock_error_log(self):
        sql = """update task_log set node_name = null where node_name is not null and task_status in (2, 4) 
        and date_add(create_time, interval 1 minute) <= now() and is_remove = 0"""
        mysql_con.execute(sql_form(sql))

    def gen_increment_rewrite_task_log(self, now_time=None, task_status=4):
        # 执行中的或者完成的，都代表被人领过，都要重新搞
        # 不能等于end
        select_sql = """select log_id from task_log where is_remove = 0 and task_status in (1,3) 
        and start_time <= %s and date_add(end_time, interval %s minute) > %s """
        select_value = [now_time, config["dw_increase_delay"], now_time]
        print(1111, select_sql, select_value)
        result = mysql_con.get(select_sql, select_value)
        if result == []:
            log.info(f"没有增量重写作业")
            return
        sql = """insert into task_log(task_id, task_name, task_status, start_time, end_time)
        select distinct task_id, task_name, %s task_status, start_time, end_time from task_log 
        where task_status in (1,3) and start_time <= %s and date_add(end_time, interval %s minute) > %s and is_remove = 0 """
        value = [task_status, now_time, config["dw_increase_delay"], now_time]
        # update_sql = """update task_log set is_remove = 1 where is_remove = 0 and task_status in (1,3)
        #         # and start_time <= %s and date_add(end_time, interval %s minute) > %s """
        update_sql = (
            """update task_log set is_remove = 1 where log_id in %s """
        )
        update_value = [tuple([i["log_id"] for i in result])]

        sql_list = sql_form(sql, value)
        sql_list.extend(sql_form(update_sql, update_value))
        mysql_con.execute(sql_list)
        self.gen_increment = True
        log.info(f"增量重写作业生成。log_id:{update_value}")

    def compare_task(
        self, task_name, tasks, alter_type, task_config, tid, task_id
    ):
        # 追加聚合列
        # 重新写数据,状态置为4
        # 只增加新表
        if task_name in self.compared_task:
            return

        # TODO:可能一次会有多个修改
        if alter_type == "add_indicators":
            for tmp_t in tasks[tid + 1 :]:
                if task_name == tmp_t["task_name"]:
                    ttc = set(json.dumps(i) for i in task_config.values())
                    otc = set(
                        json.dumps(i)
                        for i in json.loads(tmp_t["config_text"]).values()
                    )
                    addc = ttc - otc
                    for c in addc:
                        c = json.loads(c)
                        if c.get("rename") is None:
                            continue
                        cs = c["rename"].values()
                        table = c["save_table"]
                        self.add_column(cs, table)
                    # 利用上次任务的信息，重新生成task_log,4
                    self.gen_rewrite_task_log(task_id, tmp_t["task_id"])
        elif alter_type is None:
            # 只是更换了数据源、添加了must函数，文件类的操作，也不用处理
            # 如果是减少就没事，把旧的制成is_remove=1，也可以直接修改原来的config，不用增加一条记录
            # 还要把该任务的历史log都废弃掉
            pass
        self.remove_other_task(task_id, task_name)
        self.compared_task.append(task_name)

    def add_column(self, column_list, table):
        # 循环添加，避免有的字段已经存在导致全都失效
        for c in column_list:
            try:
                sql = f"alter table {table} add column {c} decimal(20, 6)"
                mysql_con.execute(sql_form(sql))
                log.info(f"增加字段。表:{table},字段:{c}")
            except:
                pass

    def up_task_config_status(self, task_status, task_id, generated_time=None):
        sql = "update task_config set task_status = %s, generated_time = %s where task_id = %s"
        mysql_con.execute(
            sql_form(sql, [task_status, generated_time, task_id])
        )
        log.info(
            f"更新任务配置状态。task_id:{task_id},task_status:{task_status},generated_time:{generated_time}"
        )

    def gen_main(self):
        while True:
            tasks = self.get_tasks(0)
            # 不要让每个任务生成后都要重复判断
            for tid, task in enumerate(tasks):
                # 上面需要查询历史配置
                task_status = task["task_status"]
                if task_status not in [2, 3]:
                    continue

                task_id = task["task_id"]
                task_name = task["task_name"]
                task_config = json.loads(task["config_text"])
                alter_type = task["alter_type"]
                start_time = task["start_time"]
                org_end_time = task["end_time"]
                current_time = datetime.now()
                # end_time = str(current_time - timedelta(days=1)) if org_end_time is None else org_end_time
                end_time = (
                    str(current_time) if org_end_time is None else org_end_time
                )
                generated_time = task["generated_time"]
                task_timedelta = task["timedelta"]

                if (
                    self.only_execute_task != []
                    and task_id not in self.only_execute_task
                ):
                    # must的任务也允许指定，万一有一个失败了呢
                    # 指定了仅要执行的任务，跳过其他task_id的作业生成
                    continue

                if (
                    TimeDimension.MIN15 in task_timedelta
                    or TimeDimension.HOUR in task_timedelta
                ):
                    task_timedelta = "1 d"
                if end_time <= start_time:
                    continue

                self.compare_task(
                    task_name, tasks, alter_type, task_config, tid, task_id
                )

                # 开始切割时间
                # start_time，最好可以向前补齐
                start_time = self.handle_start_time(start_time, task_timedelta)
                time_list = pd.date_range(
                    start=start_time, end=end_time, freq=task_timedelta
                ).strftime("%Y-%m-%d %H:%M:%S")

                if (
                    generated_time is not None
                    and generated_time
                    >= self.gen_end_time(time_list[-1], task_timedelta)
                ):
                    continue

                for tmp_start_time in time_list:
                    # 处理成左闭右开
                    # end_time后延没有问题，因为数据有一个月的，就要属于一个季度指标
                    tmp_end_time = self.gen_end_time(
                        tmp_start_time, task_timedelta
                    )
                    # 更新结束值
                    generated_time = tmp_end_time
                    judge_res = self.judge_task(
                        task_id, tmp_start_time, tmp_end_time
                    )
                    if judge_res:
                        log.info(
                            f"task_name:{task_name}作业生成过，跳过。"
                            f"task_id:{task_id},start_time:{tmp_start_time},end_time:{tmp_end_time}"
                        )
                        continue

                    # 生成记录
                    self.gen_task_log(
                        task_id, task_name, 2, tmp_start_time, tmp_end_time
                    )
                    log.info(
                        f"task_name:{task_name}作业生成。task_id:{task_id},start_time:{tmp_start_time},end_time:{tmp_end_time}"
                    )

                # 增量模式的长久任务，需要依赖算法的输出完毕
                if org_end_time is not None:
                    self.up_task_config_status(1, task_id)
                else:
                    self.up_task_config_status(3, task_id, generated_time)

            self.gen_once = True
            if self.mode == "loop":
                pass
            elif self.mode == "finish":
                break
            else:
                break

    def gen_task_log(self, *args, **kwargs):
        gtl(*args, *kwargs)

    def lock_task_log(self, node_name, task_id, start_time, end_time):
        sql = "update task_log set node_name = %s where task_id = %s and start_time = %s and end_time = %s and node_name is null and task_status in (2, 4)"
        result = mysql_con.execute(
            sql_form(sql, [node_name, task_id, start_time, end_time])
        )
        if sum(result) > 0:
            return True
        return False
        # gtl_res = self.get_task_logs(node_name=node_name, task_id=task_id, start_time=start_time, end_time=end_time, task_status=[2, 4])
        # if gtl_res != []:
        #     return True
        # return False

    def commit_job(self, task, must=False):
        log_id = task["log_id"]
        task_id = task["task_id"]
        task_name = task["task_name"]
        task_timedelta = task["timedelta"]
        task_status = task["task_status"]
        start_time = task["start_time"]
        end_time = task["end_time"]
        depend_task = task["depend_task"]
        create_time = pd.to_datetime(task["create_time"])

        if (
            self.only_execute_task != []
            and task_id not in self.only_execute_task
        ):
            # must的任务也允许指定，万一有一个失败了呢
            # 指定了仅要执行的任务，跳过其他task_id的作业
            return

        # 执行中超时的变成失败状态
        if (
            task_status == 3
            and not must
            and datetime.now()
            < create_time + relativedelta(minutes=config["dw_task_timeout"])
        ):
            return
        # must的任务不确定需要多久，所以超时时间要多一点
        elif (
            task_status == 3
            and must
            and datetime.now()
            < create_time
            + relativedelta(minutes=config["dw_must_task_timeout"])
        ):
            return
        elif task_status == 3:
            self.gen_task_log(task_id, task_name, 0, start_time, end_time)
            task_status = 0
            log.info(
                f"task_name:{task_name}作业执行过久，强制结束，变为失败状态。"
                f"log_id:{log_id},task_id:{task_id},start_time:{start_time},end_time:{end_time}"
            )

        if task_status == 0:
            # 失败的要重写一个4的状态
            self.gen_task_log(task_id, task_name, 4, start_time, end_time)
            task_status = 4

        # alter_type的在上面compare处理好任务状态
        rewrite = True if task_status in [0, 4] else False

        # 校验依赖任务是否满足分配
        jd_res = self.judge_depend(start_time, end_time, depend_task)
        if jd_res:
            log.info(
                f"task_name:{task_name}作业存在依赖{depend_task}，无法分配。log_id:{log_id},"
                f"task_id:{task_id},start_time:{start_time},end_time:{end_time},"
                f"依赖log_id:{self.depend_log}"
            )
            return

        # 分配任务，可能集群资源不够，就分配不出去
        hostname = socket.gethostname()
        ip_address = socket.gethostbyname(hostname)

        # 先校验资源剩余
        nn_gtl_res = self.get_task_logs(
            task_status=[2, 3, 4], node_name=ip_address
        )
        log.info(f"当前节点node_name:{ip_address}已占用作业数{len(nn_gtl_res)}")
        if len(nn_gtl_res) >= config["dw_task_max_process"]:
            log.info(
                f"node_name:{ip_address}已经计算满作业，跳过分配。"
                f"task_id:{task_id},start_time:{start_time},end_time:{end_time}"
            )
            time.sleep(int(max(config["dw_busy_wait"], 1)))
            return
        # 抢占
        ltl_res = self.lock_task_log(ip_address, task_id, start_time, end_time)
        if not ltl_res:
            log.info(
                f"node_name:{ip_address}没有抢到作业。"
                f"task_id:{task_id},start_time:{start_time},end_time:{end_time}"
            )
            return

        if must:
            # must任务必须串行保证执行的成功
            parse_config(
                log,
                task,
                start_time,
                end_time,
                task_timedelta,
                rewrite=rewrite,
            )
        else:
            proc_task(
                log,
                task,
                start_time,
                end_time,
                task_timedelta,
                rewrite=rewrite,
            )
        log.info(
            f"task_name:{task_name}作业分配。task_id:{task_id},start_time:{start_time},end_time:{end_time}"
        )
        # # 分配完毕，生成记录
        # if pid is None:
        #     # 说明任务分配不成功
        #     continue
        # self.gen_task_log(task_id, task_name, 3, start_time, end_time, pid, node_name)

    def attribute_main(self):
        while True:
            if not self.gen_once:
                if config["dw_execute_mode"] != "loop":
                    break
                else:
                    continue
            # 对于当天增量的，可以先做一次状态更新，即使中间有没执行完毕的任务，也不一影响，重写就是了
            # 只需要到天，分钟的只能做浪费、
            # init任务的检测
            self.unlock_error_log()

            init_tasks = self.get_task_logs([0, 2, 3, 4], alter_type="must")
            for task in init_tasks:
                self.commit_job(task, must=True)
            if init_tasks != []:
                log.info(f"必须执行的作业。log_id:{[i['log_id'] for i in init_tasks]}")
            init_tasks = self.get_task_logs([0, 2, 3, 4], alter_type="must")
            if init_tasks != []:
                log.info(
                    f"必须执行的作业存在未完成情况，跳过其他全部作业执行。log_id:{[i['log_id'] for i in init_tasks]}"
                )
                continue

            self.remove_task()

            if self.mode == "once":
                tasks = self.get_task_logs([0, 2, 3, 4])
                for task in tasks:
                    self.commit_job(task)

            if self.mode in ["finish", "once"] and not self.gen_increment:
                self.gen_increment_rewrite_task_log(
                    str(
                        datetime.now()
                    )
                )

            tasks = self.get_task_logs([0, 2, 3, 4])
            for task in tasks:
                self.commit_job(task)

            if self.mode == "loop":
                time.sleep(300)
                pass
            elif self.mode == "finish":
                time.sleep(30)
                if self.get_task_logs([0, 2, 3, 4]) == []:
                    break
            else:
                break


# schedule.every(5).minutes.do(TaskSchedule().gen_main)
# schedule.every(5).minutes.do(TaskSchedule().attribute_main)
# while True:
#     time.sleep(60)
#     schedule.run_pending()

ts = TaskSchedule()
tgm = Thread(target=ts.gen_main)
tgm.start()
if config["dw_execute_mode"] != "loop":
    tgm.join()
ts.attribute_main()
