"""15min的后处理脚本"""
import time
from datetime import datetime, timedelta

import numpy as np
import pandas as pd

from air_web.dw.dws_common import AggCommon


class AggCityReport(AggCommon):
    """最小颗粒度的聚合"""

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_base_dict,
        timedel,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_base_dict,
            timedel,
            rewrite,
        )
        self.baseline_date = self.start_time - timedelta(days=1)
        self.baseline_date_end = self.end_time - timedelta(days=1)

    def convert_struct(self, df):
        df.sort_values("type_id", inplace=True)
        type_id_list = df["type_id"].drop_fuplicates().tolist()
        df.groupby(["org_no"])["p_kt_diff"].apply(list).reset_index()
        df[type_id_list] = pd.DataFrame(df['p_kt_diff'].values.tolist())
        return df

    def query_diff_df(self, type_id):
        s_sql = f"""   
                   select a.org_no, a.type_id,a.quarter, b.p_kt_sum-a.p_kt_sum as p_kt_diff
                   from
                   ((select
                       org_no,type_id,
                       date_format( data_time, '%%H:%%i' ) quarter,
                       p_kt_sum
                   from
                       orgno_typeid_15min 
                   where
                        data_time >= '{self.start_time}' and data_time < '{self.end_time}'
                       and type_id in ({type_id})
                       and org_no in (select org_no from real_org_no where org_level < 3)
                   ) a
                   left join (	select
                       org_no,type_id,
                       date_format( data_time, '%%H:%%i' ) quarter,
                       p_kt_sum
                   from
                       orgno_typeid_15min 
                   where
                       data_time >= '{self.baseline_date}' and data_time<'{self.baseline_date_end}'
                       and type_id in ({type_id})
                       and org_no in (select org_no from real_org_no where org_level < 3)
                   ) b on a.org_no=b.org_no and a.type_id=b.type_id and a.quarter = b.quarter) 
                 """
        diff_df = self.sql_engine.query(s_sql)
        return diff_df

    def get_diff_data(self):
        """
        当天调节平均负荷：（基线日负荷-当前日负荷） 96组数据  点对点计算，取平均值
        当天调节最大负荷：（基线日负荷-当前日负荷） 96组数据  点对点计算，取其中最大值
        """
        # 省地县-全行业的调节负荷
        org_diff_df = self.query_diff_df(0)
        org_diff_df = org_diff_df.sort_values("p_kt_diff", ascending=False)
        max_org_diff_df = org_diff_df.drop_duplicates(["org_no", "type_id"])
        avg_org_diff_df = org_diff_df.groupby(["org_no", "type_id"])["p_kt_diff"].mean().reset_index()

        # 省地县-各行业的调节负荷
        type_sql = "select trade_id from trade_code_id_rela"
        type_diff_df = self.query_diff_df(type_sql)
        max_type_diff_df = pd.merge(max_org_diff_df[["org_no", "quarter"]],
                                    type_diff_df, on=["org_no", "quarter"], how="left")
        avg_type_diff_df = type_diff_df.groupby(["org_no", "type_id"])[
            "p_kt_diff"].mean().reset_index()

        max_df = pd.concat([max_org_diff_df[["org_no", "type_id", "p_kt_diff"]],
                            max_type_diff_df[["org_no", "type_id", "p_kt_diff"]]])
        avg_df = pd.concat([avg_org_diff_df, avg_type_diff_df])

        self.res_df = pd.merge(max_df, avg_df, on=["org_no", "type_id"], suffixes=('_max', '_avg'))
        self.res_df.rename(columns={"p_kt_diff_max": "max_p_kt_diff",
                                    "p_kt_diff_avg": "avg_p_kt_diff"}, inplace=True)
        self.res_df["data_time"] = self.start_time

    def get_last_time_data(self):
        task = self.task
        depend_task = task["depend_task"]
        # 有log无论状态，至少会有值，可以失败。无log，就是无昨天
        sql = f"""select task_status from task_log
        where is_remove = 0 and task_name = '{depend_task}' and start_time = '{self.start_time}' and end_time = '{self.end_time}' """
        result_df = self.sql_engine.query(sql)
        if not result_df.empty and int(result_df["task_status"]) == 1:
            sql = f"""select count(1) as count from orgno_typeid_15min
                      where data_time >= '{self.start_time}' and data_time < '{self.end_time}' """
            result_df = self.sql_engine.query(sql)
            if int(result_df["count"]) > 0:
                return True
        return False

    def gen_max_kt_df(self):
        sql = f"""select org_no, type_id,  max(p_kt_sum) max_p_kt 
                  from {self.source_table} 
                  where data_time >= '{self.start_time}' and data_time < '{self.end_time}'
                    and org_no in (select org_no from real_org_no where org_level < 3)
                  group by org_no, type_id
                """
        max_kt_df = self.sql_engine.query(sql)
        return max_kt_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()
        # if not self.get_last_time_data():
        #     self.logger.warning(
        #         "今日依赖结果未生成,task_id:{},step:{},star_time:{} ...".format(
        #             self.task_id, self.step, self.start_time
        #         )
        #     )
        #     return

        max_kt_df = self.gen_max_kt_df()
        self.get_diff_data()
        self.res_df = self.res_df.merge(max_kt_df, on=["org_no", "type_id"])
        self.res_df = self.res_df.replace({np.nan: None})
        self.save_data_to_table()
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time
            )
        )


if __name__ == "__main__":
    import os

    from air_web.dw.logger import init_log

    if not os.path.exists("./logs/"):
        os.mkdir("./logs/")
    log = init_log("./logs/")

    step_dict = {"source_table":"orgno_typeid_15min","save_table": "city_report"}
    
    start_date = "2024-07-24"
    end_date = "2024-07-25"
    date_list = pd.date_range(start=start_date, end=end_date, freq="1 D").strftime("%Y-%m-%d %H:%M:%S")
    for i in range(len(date_list) - 1):
        start_time = date_list[i]
        end_time = date_list[i + 1]
        AggCityReport(
            log,
            {"task_id": 4},
            start_time,
            end_time,
            "step",
            step_dict,
            "1 D",
            rewrite=False,
        ).main()
