"""15min的后处理脚本"""
import time
from collections import defaultdict

import numpy as np
import pandas as pd

import ruleng.es
from air_web.config.config import config, dws_agg_conf
from air_web.dw.dws_common import AggCommon


class AggMain(AggCommon):
    """基于配置中type为agg的聚合"""

    INDICATORS_COLS = [
        "p_total_sum",
        "p_kt_sum",
        "p_base_sum",
        "p_total_count",
        "p_kt_count",
        "p_base_count",
    ]
    BASELINE_INDICATORS_COLS = [
        "p_total_sum",
        "p_kt_sum",
        "p_total_count",
        "p_kt_count",
    ]

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        rewrite,
        base_df,
        is_baseline,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )
        self.base_df = base_df
        self.is_baseline = is_baseline
        if self.is_baseline:
            self.indicators = {
                col: "sum" for col in self.BASELINE_INDICATORS_COLS
            }
        else:
            self.indicators = {col: "sum" for col in self.INDICATORS_COLS}

    def gen_bulk_actions(self):
        actions = []
        tdf = self.res_df[
            [
                "org_no",
                "type_id",
                "data_time",
                "p_total_sum",
                "p_kt_sum",
                "p_base_sum",
            ]
        ]
        tdf["org_no"] = tdf["org_no"].astype(int)
        tdf["type_id"] = tdf["type_id"].astype(int)
        trans_format_d = defaultdict(lambda: {})
        for value in tdf.values:
            exact_time = value[2]
            day = exact_time.strftime("%Y-%m-%dT00:00:00")
            vcons_id = f"{value[1]}_{value[0]}"
            p_total_sum = "%.4f" % value[3]
            p_kt_sum = "%.4f" % value[4]
            p_base_sum = "%.4f" % value[5]
            if day not in trans_format_d[vcons_id]:
                trans_format_d[vcons_id][day] = [""] * 96
            offset = (exact_time.hour * 4) + (exact_time.minute // 15)
            trans_format_d[vcons_id][day][offset] = (
                p_total_sum,
                p_kt_sum,
                p_base_sum,
            )

        for vcons_id, vcons_v_set in trans_format_d.items():
            for data_date, value in vcons_v_set.items():
                p_total_l = []
                p_kt_l = []
                p_base_l = []
                for v in value:
                    if isinstance(v, tuple):
                        p_total_l.append(v[0])
                        p_kt_l.append(v[1])
                        p_base_l.append(v[2])
                    else:
                        try:  # 可能是空字符串,直接跳过
                            p_total_l.append(p_total_l[-1])
                            p_kt_l.append(p_kt_l[-1])
                            p_base_l.append(p_base_l[-1])
                        except:
                            p_total_l.append("0")
                            p_kt_l.append("0")
                            p_base_l.append("0")
                            print(f"vcons_id:{vcons_id},########,{v}")
                source = {
                    "data_date": data_date,
                    "id": vcons_id,
                    "value": ",".join(p_total_l),
                    "p_base_l": ",".join(p_base_l),
                    "p_kt_l": ",".join(p_kt_l),
                    "ct": 1,
                    "pt": 1,
                }
                action = {
                    "_op_type": "index",
                    "_index": config["POWER96BYDAY_VIRTUAL"],
                    "_source": source,
                    "_id": f"{vcons_id}_{data_date}",
                }
                actions.append(action)
        return actions

    def save_data_to_es(self):
        if (
            "on7" in self.dimensions and "type_id" in self.dimensions
        ):  # 过滤配置文件中的区县和行业
            virtual_on7 = dws_agg_conf["virtual_on7"]
            virtual_type = dws_agg_conf["virtual_type"]
            if virtual_on7 and virtual_type:
                self.res_df = self.res_df.loc[
                    (self.res_df["org_no"].isin(virtual_on7))
                    | (self.res_df["type_id"].isin(virtual_type))
                ]
            elif virtual_on7:
                self.res_df = self.res_df.loc[
                    self.res_df["org_no"].isin(virtual_on7)
                ]
            elif virtual_type:
                self.res_df = self.res_df.loc[
                    self.res_df["type_id"].isin(virtual_type)
                ]
            else:
                pass
        elif (
            "type_code_sort" in self.dimensions
            and "pare_type_id" not in self.dimensions
        ):  # 公专变要合并为全省(后期加了聚合全行业type_id=0的，也可改为这个直接存)
            self.res_df = (
                self.res_df.groupby(["org_no", "data_time"])
                .agg(
                    {
                        "p_total_sum": "sum",
                        "p_kt_sum": "sum",
                        "p_base_sum": "sum",
                    }
                )
                .reset_index()
            )
            self.res_df["type_id"] = "99999"
        self.res_df = self.res_df.loc[~self.res_df["p_total_sum"].isna()]
        actions = self.gen_bulk_actions()
        t1 = time.time()
        self.do_bulk(actions)
        t2 = time.time()
        self.logger.info(
            "虚拟用户写入es,time:{},task_id:{},step:{},star_time:{},数据条数:{}".format(
                t2 - t1, self.task_id, self.step, self.start_time, len(actions)
            )
        )

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        if self.base_df.empty:
            self.logger.error(
                "初始聚合数据为空，不能继续聚合task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return
        self.add_agg_dimensions(dws_agg_conf)
        self.agg_data()
        self.rename_add_dimensions(dws_agg_conf)
        self.rename_agg_indicators()
        self.add_max_time_flag()
        self.res_df["time_display_name"] = self.start_time

        if self.step in config['dw_task_base']:
            no_update_col_list = config['dw_task_base'][self.step]
        else:
            no_update_col_list = []
        self.save_data_to_table(no_update_col_list=no_update_col_list)

        if dws_agg_conf["save_es"] and not self.is_baseline:
            self.logger.info(
                "正在保存es..task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            self.save_data_to_es()


class AggBase(AggCommon):
    """最小颗粒度的聚合:只统计高压和低压总表"""

    def __init__(
        self, logger, task, start_time, end_time, step, step_base_dict
    ):
        super().__init__(
            logger, task, start_time, end_time, step, step_base_dict
        )
        self.is_baseline = step_base_dict.get("is_baseline")
        self.indicators = ["p_total", "p_kt", "p_base"]
        self.agg_columns = ["type_id", "data_time"]
        self.agg_types = ["sum", "count"]

    def agg_base_df(self, merge_df):
        """
        聚合最小颗粒读: 最子级地区，最子级行业，最小时间15min
        """
        merge_df = merge_df.sort_values(["on5", "on7"])
        on7_dict = (
            merge_df[["on7", "on5"]]
            .drop_duplicates()
            .set_index("on7")["on5"]
            .to_dict()
        )
        print(on7_dict)
        aggs_dict = self.get_aggs_dict()
        group_source = self.get_group_source()

        base_df = pd.DataFrame()
        for on7, on5 in on7_dict.items():
            self.logger.info(
                "正在聚合task_id:{},step:{},star_time:{},org:{} ...".format(
                    self.task_id, self.step, self.start_time, on7
                )
            )

            must_not_query = {"must_not": [{"term": {"type_code_sort": 3}}]}
            add_query = [{"term": {"on7": on7}}]

            if not self.is_baseline:
                # 实测数据聚合
                index_name = self.get_power_96_idx_name(on5, on7)
                query = self.get_es_query(
                    on7, group_source, aggs_dict, add_query=add_query, must_not_query=must_not_query
                )
            else:
                # 基线数据聚合
                index_name = self.get_baseline_idx_name()
                query = self.get_es_query(
                    on7,
                    group_source,
                    aggs_dict,
                    end_time=self.baseline_end_time,
                    add_query=add_query,
                    must_not_query=must_not_query
                )
            # 查询es负荷数据
            agg_res = self.search_es(index_name, query, on7)
            org_df = self.proc_agg_data(agg_res)

            if org_df.empty:
                continue

            org_df["on7"] = on7
            base_df = pd.concat([base_df, org_df])
        if base_df.empty:
            return pd.DataFrame()

        base_df = base_df.astype({"data_time": "datetime64[ns]"})
        # 添加其他字段
        base_df = base_df.merge(merge_df, on=["on7", "type_id"], how="inner")
        return base_df

    def get_merge_df(self):
        """获取需要merge的字段"""
        s_sql = "select * from merge_cons where type_code_sort !=3"
        f_sql = config.get("merge_cons_filter")
        if f_sql:
            s_sql += "where "
            s_sql += f_sql
        merge_df = self.sql_engine.query(s_sql)
        return merge_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()
        merge_df = self.get_merge_df()

        base_df = self.agg_base_df(merge_df)
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{},数据条数:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time, len(base_df)
            )
        )
        return base_df


class AggLow(AggCommon):
    """最小颗粒度的聚合:只统计低压户表"""

    def __init__(
        self, logger, task, start_time, end_time, step, step_base_dict
    ):
        super().__init__(
            logger, task, start_time, end_time, step, step_base_dict
        )
        self.is_baseline = step_base_dict.get("is_baseline")
        self.indicators = ["p_total", "p_kt", "p_base"]
        self.agg_columns = ["type_id", "data_time"]
        self.agg_types = ["sum", "count"]

    def agg_base_df(self, merge_df):
        """
        聚合最小颗粒读: 最子级地区，最子级行业，最小时间15min
        """
        merge_df = merge_df.sort_values(["on5", "on7"])
        on7_dict = (
            merge_df[["on7", "on5"]]
            .drop_duplicates()
            .set_index("on7")["on5"]
            .to_dict()
        )
        print(on7_dict)
        aggs_dict = self.get_aggs_dict()
        group_source = self.get_group_source()

        base_df = pd.DataFrame()
        for on7, on5 in on7_dict.items():
            self.logger.info(
                "正在聚合task_id:{},step:{},star_time:{},org:{} ...".format(
                    self.task_id, self.step, self.start_time, on7
                )
            )

            add_query = [{"term": {"on7": on7}}, {"term": {"type_code_sort": 3}}]

            if not self.is_baseline:
                # 实测数据聚合
                index_name = self.get_power_96_idx_name(on5, on7)
                query = self.get_es_query(
                    on7, group_source, aggs_dict, add_query=add_query
                )
            else:
                # 基线数据聚合
                index_name = self.get_baseline_idx_name()
                query = self.get_es_query(
                    on7,
                    group_source,
                    aggs_dict,
                    end_time=self.baseline_end_time,
                    add_query=add_query
                )

            agg_res = self.search_es(index_name, query, on7)
            org_df = self.proc_agg_data(agg_res)

            if org_df.empty:
                continue

            org_df["on7"] = on7
            base_df = pd.concat([base_df, org_df])
        if base_df.empty:
            return pd.DataFrame()

        base_df = base_df.astype({"data_time": "datetime64[ns]"})
        # 添加其他字段
        base_df = base_df.merge(merge_df, on=["on7", "type_id"], how="inner")
        return base_df

    def get_merge_df(self):
        """获取需要merge的字段"""
        s_sql = "select * from merge_cons where type_code_sort = 3"
        f_sql = config.get("merge_cons_filter")
        if f_sql:
            s_sql += "where "
            s_sql += f_sql
        merge_df = self.sql_engine.query(s_sql)
        return merge_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()
        merge_df = self.get_merge_df()
        base_df = self.agg_base_df(merge_df)
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{},数据条数:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time, len(base_df)
            )
        )
        return base_df