"""15min的后处理脚本"""
import time

import pandas as pd

import ruleng.es
from air_web.config.config import config
from air_web.dw.data_mapping import DataType, DateType
from air_web.dw.dws_common import AggCommon

ConnectionTimeout = ruleng.es.es_mod.exceptions.ConnectionTimeout


class AggConsDay(AggCommon):
    """最小颗粒度的聚合"""

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_base_dict,
        timedelta,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_base_dict,
            timedelta,
            rewrite,
        )

    def proc_agg_data(self, agg_res):
        res_list = []
        for o_bucket in agg_res:
            cons_no = o_bucket["key"]
            i_bucket = o_bucket["group_by_date"]["buckets"]
            for bucket in i_bucket:
                try:
                    tmp_hits = bucket["filter_is_day_max_2"][
                        "top_hits_is_day_max_2"
                    ]["hits"]["hits"][0]["_source"]
                    tmp_hits_1 = bucket["filter_is_day_max_1"][
                        "top_hits_is_day_max_1"
                    ]["hits"]["hits"][0]["_source"]
                    tmp_dict = {
                        "cons_no": cons_no,
                        "data_time": str(self.start_time)[:10],
                        "max_p_kt": tmp_hits["p_kt"],
                        "max_p_kt_total": tmp_hits["p_total"],
                        "max_p_kt_rate": tmp_hits["kt_ratio"],
                        "max_p_kt_time": tmp_hits["data_time"],
                        "avg_p_kt": bucket["avg_p_kt_all_day_max"]["value"],
                        "avg_p_total": bucket["avg_p_total_all_day_max"][
                            "value"
                        ],
                        "max_p_total_kt": tmp_hits_1["p_kt"],
                        "max_p_total": tmp_hits_1["p_total"],
                        "max_p_total_kt_rate": tmp_hits_1["kt_ratio"],
                        "max_p_total_time": tmp_hits_1["data_time"],
                    }
                    res_list.append(tmp_dict)
                except:
                    pass
        df = pd.DataFrame(res_list)
        return df

    def agg_base_df(self, merge_df):
        """
        聚合最小颗粒读: 最子级地区，最子级行业，最小时间15min
        """
        is_day_max_list = [1, 3] if config["is_total_max"] else [2, 3]

        merge_df = merge_df.sort_values(["on5", "on7"])
        on7_dict = (
            merge_df[["on7", "on5"]]
            .drop_duplicates()
            .set_index("on7")["on5"]
            .to_dict()
        )

        base_df = pd.DataFrame()
        for on7, on5 in on7_dict.items():
            self.logger.info(
                "正在聚合task_id:{},step:{},star_time:{},org:{} ...".format(
                    self.task_id, self.step, self.start_time, on7
                )
            )

            query = {
                "size": 0,
                "query": {
                    "bool": {
                        "must": [
                            {
                                "range": {
                                    "data_time": {
                                        "gte": self.start_time,
                                        "lt": self.end_time,
                                    }
                                }
                            }
                        ]
                    }
                },
                "aggs": {
                    "group_by_cid_date": {
                        "terms": {"size": 10000, "field": "cons_no"},
                        "aggs": {
                            "group_by_date": {
                                "date_histogram": {
                                    "field": "data_time",
                                    "calendar_interval": "day",
                                },
                                "aggs": {
                                    "filter_is_day_max_2": {
                                        "filter": {
                                            "terms": {"is_day_max": [2, 3]}
                                        },
                                        "aggs": {
                                            "top_hits_is_day_max_2": {
                                                "top_hits": {
                                                    "size": 1,
                                                    "_source": [
                                                        "p_kt",
                                                        "p_total",
                                                        "kt_ratio",
                                                        "data_time",
                                                        "cons_no",
                                                    ],
                                                }
                                            }
                                        },
                                    },
                                    "avg_p_kt_all_day_max": {
                                        "avg": {"field": "p_kt"}
                                    },
                                    "avg_p_total_all_day_max": {
                                        "avg": {"field": "p_total"}
                                    },
                                    "filter_is_day_max_1": {
                                        "filter": {
                                            "terms": {"is_day_max": [1, 3]}
                                        },
                                        "aggs": {
                                            "top_hits_is_day_max_1": {
                                                "top_hits": {
                                                    "size": 1,
                                                    "_source": [
                                                        "p_kt",
                                                        "p_total",
                                                        "kt_ratio",
                                                        "data_time",
                                                        "cons_no",
                                                    ],
                                                }
                                            }
                                        },
                                    },
                                },
                            }
                        },
                    }
                },
            }

            is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
            index_name = (
                config["POWERLOAD"]
                .replace("on5", str(on5))
                .replace("on7", str(on7))
                .replace("cal01", is_cal)
            )
            t1 = time.time()
            for i in range(10):
                try:
                    result = self.es.search(
                        index=index_name, body=query, request_timeout=300
                    )
                    break
                except ConnectionTimeout:
                    # time.sleep(5)
                    self.logger.info(
                        "search失败次数:{},task_id:{},step:{},star_time:{},index:{},org:{} ...".format(
                            i + 1,
                            self.task_id,
                            self.step,
                            self.start_time,
                            index_name,
                            on7,
                        )
                    )
            t2 = time.time()
            self.logger.info(
                "time:{},task_id:{},step:{},star_time:{},index:{},org:{} ...".format(
                    t2 - t1,
                    self.task_id,
                    self.step,
                    self.start_time,
                    index_name,
                    on7,
                )
            )
            agg_res = result["aggregations"]["group_by_cid_date"]["buckets"]
            if len(agg_res) == 0:
                self.logger.info(
                    "数据为空，跳过task_id:{},step:{},star_time:{},index:{},org:{} ...".format(
                        self.task_id,
                        self.step,
                        self.start_time,
                        index_name,
                        on7,
                    )
                )
                continue
            org_df = self.proc_agg_data(agg_res)

            base_df = pd.concat([base_df, org_df])
        if base_df.empty:
            return pd.DataFrame()

        base_df = base_df.astype({"data_time": "datetime64[ns]"})
        base_df = base_df.sort_values(by="max_p_total", ascending=False)
        base_df["max_p_total_rank"] = range(1, len(base_df) + 1)
        self.res_df = base_df
        return base_df

    def get_merge_df(self):
        """获取需要merge的字段"""
        merge_df = self.sql_engine.query("select * from merge_cons")
        return merge_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()
        merge_df = self.get_merge_df()
        base_df = self.agg_base_df(merge_df)
        self.save_data_to_table()
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{},数据条数:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time, len(base_df)
            )
        )
        return base_df


class ConsDayStat(AggCommon):
    """最小颗粒度的聚合"""

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_base_dict,
        timedelta,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_base_dict,
            timedelta,
            rewrite,
        )

    def get_day_df(self, on5_df, on5):
        rules = [
            ("data_date", "query", ">=", self.start_time),
            ("data_date", "query", "<", self.end_time),
        ]
        index_name = config["ACLR_MODEL_INTERNAL_IDX"].replace("on5", str(on5))
        day_df = self.es_dal.query_dataframe(
            rules, index_name, doc_time_field="data_date"
        )
        if day_df.empty:
            self.logger.info(
                "查询空调日标识数据为空,task_id:{},step:{},star_time:{},index:{},org:{} ...".format(
                    self.task_id, self.step, self.start_time, index_name, on5
                )
            )
        else:
            day_df = day_df[["cons_no", "is_base_day", "outlier"]]
            on5_df = on5_df.merge(day_df, on="cons_no", how="left")
        return on5_df

    def proc_baseline_agg_data(self, agg_res, field_list):
        res_list = []
        for row in agg_res:
            cons_dict = {}
            for field in field_list:
                cons_dict.update({field: row["_source"][field]})
            res_list.append(cons_dict)
        res_df = pd.DataFrame(res_list)
        return res_df

    def get_baseline_df(self, org_df, on7):
        table_name = config["ACLR_RES_BASELINE"]

        time_list = pd.date_range(self.start_time, periods=96, freq="15min")
        time_df = pd.DataFrame(time_list, columns=["data_time"])
        time_df["idx"] = time_df["data_time"].map(
            lambda x: int((x - self.start_time).total_seconds() // 60 / 15) + 1
        )
        time_df["data_time"] = time_df["data_time"].dt.strftime(
            "%Y-%m-%d %H:%M:%S"
        )

        for key in ["max_p_kt", "max_p_total"]:
            time_col = f"{key}_time"
            cons_df = org_df[["cons_no", time_col]].rename(
                columns={time_col: "data_time"}
            )
            cons_df = cons_df.merge(time_df, on="data_time", how="left")

            cons_dict = (
                cons_df[["cons_no", "idx"]]
                .groupby("idx")["cons_no"]
                .agg(list)
                .to_dict()
            )

            baseline_df = pd.DataFrame()
            for idx, cons_list in cons_dict.items():
                p_field = f"p_{idx}"
                sql = f"""select a.cons_no,
                                 a.{p_field} as {key}_bl_total,
                                 b.{p_field} as {key}_bl_kt
                          from {table_name} a
                          join {table_name} b
                          on a.cons_no=b.cons_no
                            and a.data_date=b.data_date
                            and b.cons_no in ('{"','".join(cons_list)}')
                            and b.data_date = '{self.start_time}'
                            and b.data_type = {DataType.P_KT}
                          where a.cons_no in ('{"','".join(cons_list)}')
                            and a.data_date = '{self.start_time}'
                            and a.data_type = {DataType.P_TOTAL}
                       """
                df = self.sql_engine_cons.query(sql)
                if df.empty:
                    self.logger.info(
                        "查询基线数据为空,task_id:{},step:{},star_time:{},table:{},org:{} ...".format(
                            self.task_id,
                            self.step,
                            self.start_time,
                            table_name,
                            on7,
                        )
                    )
                    continue
                df["idx"] = idx
                baseline_df = pd.concat([baseline_df, df])

            if not baseline_df.empty:
                baseline_df = baseline_df.merge(time_df, on="idx", how="left")
                baseline_df = baseline_df.drop("idx", axis=1)
                baseline_df.rename(
                    columns={"data_time": time_col}, inplace=True
                )
                org_df = org_df.merge(
                    baseline_df, on=["cons_no", time_col], how="left"
                )

        return org_df

    def proc_agg_data(self, agg_res):
        res_list = []
        for o_bucket in agg_res:
            cons_no = o_bucket["key"]
            i_bucket = o_bucket["group_by_date"]["buckets"]
            for bucket in i_bucket:
                try:
                    tmp_hits = bucket["filter_is_day_max_2"][
                        "top_hits_is_day_max_2"
                    ]["hits"]["hits"][0]["_source"]
                    tmp_hits_1 = bucket["filter_is_day_max_1"][
                        "top_hits_is_day_max_1"
                    ]["hits"]["hits"][0]["_source"]
                    tmp_dict = {
                        "cons_no": cons_no,
                        "date_type": DateType.DAY,
                        "data_date": str(self.start_time)[:10],
                        "max_p_kt": tmp_hits["p_kt"],
                        "max_p_kt_total": tmp_hits["p_total"],
                        "max_p_kt_time": tmp_hits["data_time"],
                        "avg_p_kt": bucket["avg_p_kt_all_day_max"]["value"],
                        "avg_p_total": bucket["avg_p_total_all_day_max"][
                            "value"
                        ],
                        "max_p_total_kt": tmp_hits_1["p_kt"],
                        "max_p_total": tmp_hits_1["p_total"],
                        "max_p_total_time": tmp_hits_1["data_time"],
                    }
                    res_list.append(tmp_dict)
                except:
                    pass
        df = pd.DataFrame(res_list)
        return df

    def agg_base_df(self, merge_df, tmp_df):
        """
        聚合最小颗粒读: 最子级地区，最子级行业，最小时间15min
        """
        merge_df = merge_df.sort_values(["on5", "on7"])
        on5_on7_dict = (
            merge_df[["on7", "on5"]]
            .drop_duplicates()
            .groupby("on5")["on7"]
            .agg(list)
            .to_dict()
        )
        on7_region_dict = (
            merge_df[["on7", "region_code"]]
            .drop_duplicates()
            .set_index("on7")["region_code"]
            .to_dict()
        )

        base_df = pd.DataFrame()
        for on5, on7_list in on5_on7_dict.items():
            on5_df = pd.DataFrame()
            for on7 in on7_list:
                self.logger.info(
                    "正在聚合task_id:{},step:{},star_time:{},org:{} ...".format(
                        self.task_id, self.step, self.start_time, on7
                    )
                )
                region_code = on7_region_dict[on7]

                query = {
                    "size": 0,
                    "query": {
                        "bool": {
                            "must": [
                                {
                                    "range": {
                                        "data_time": {
                                            "gte": self.start_time,
                                            "lt": self.end_time,
                                        }
                                    }
                                }
                            ]
                        }
                    },
                    "aggs": {
                        "group_by": {
                            "terms": {"size": 10000, "field": "cons_no"},
                            "aggs": {
                                "group_by_date": {
                                    "date_histogram": {
                                        "field": "data_time",
                                        "calendar_interval": "day",
                                    },
                                    "aggs": {
                                        "filter_is_day_max_2": {
                                            "filter": {
                                                "terms": {"is_day_max": [2, 3]}
                                            },
                                            "aggs": {
                                                "top_hits_is_day_max_2": {
                                                    "top_hits": {
                                                        "size": 1,
                                                        "_source": [
                                                            "p_kt",
                                                            "p_total",
                                                            "kt_ratio",
                                                            "data_time",
                                                            "cons_no",
                                                        ],
                                                        "sort": [
                                                            {"p_kt": {"order": "desc"}}
                                                        ]
                                                    }
                                                }
                                            },
                                        },
                                        "avg_p_kt_all_day_max": {
                                            "avg": {"field": "p_kt"}
                                        },
                                        "avg_p_total_all_day_max": {
                                            "avg": {"field": "p_total"}
                                        },
                                        "filter_is_day_max_1": {
                                            "filter": {
                                                "terms": {"is_day_max": [1, 3]}
                                            },
                                            "aggs": {
                                                "top_hits_is_day_max_1": {
                                                    "top_hits": {
                                                        "size": 1,
                                                        "_source": [
                                                            "p_kt",
                                                            "p_total",
                                                            "kt_ratio",
                                                            "data_time",
                                                            "cons_no",
                                                        ],
                                                        "sort": [
                                                            {"p_total": {"order": "desc"}}
                                                        ]
                                                    }
                                                }
                                            },
                                        },
                                    },
                                }
                            },
                        }
                    },
                }

                is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
                index_name = (
                    config["POWERLOAD"]
                    .replace("on5", str(on5))
                    .replace("on7", str(on7))
                    .replace("cal01", is_cal)
                )
                agg_res = self.search_es(index_name, query, on7)
                on7_df = self.proc_agg_data(agg_res)
                if on7_df.empty:
                    continue

                on7_df["on5"] = on5
                on7_df["on7"] = on7
                on7_df["region_code"] = region_code

                # 合并基线
                on7_df = self.get_baseline_df(on7_df, on7)

                on5_df = pd.concat([on5_df, on7_df])

            # 合并空调日/异常日标识
            on5_df = self.get_day_df(on5_df, on5)

            base_df = pd.concat([base_df, on5_df])
        if base_df.empty:
            return pd.DataFrame()

        # 合并温度
        if not tmp_df.empty:
            base_df = base_df.merge(tmp_df, on="region_code", how="left")
        base_df = base_df.drop(["region_code"], axis=1)

        base_df = base_df.sort_values(by="max_p_total", ascending=False)
        base_df["max_p_total_rank"] = range(1, len(base_df) + 1)
        self.res_df = base_df
        return base_df

    def get_merge_df(self):
        """获取需要merge的字段"""
        sql = """select mc.*, region_code 
                 from merge_cons mc
                 left join 
                    (select distinct on7, region_code
                    from aclr_base_doc_all
                    )ab on ab.on7=mc.on7
              """
        merge_df = self.sql_engine.query(sql)
        return merge_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()
        merge_df = self.get_merge_df()
        tmp_df = self.get_temperature_data()
        base_df = self.agg_base_df(merge_df, tmp_df)
        self.save_data_to_table()
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{},数据条数:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time, len(base_df)
            )
        )
        return base_df


if __name__ == "__main__":
    from air_web.dw.logger import init_log

    log = init_log(None)
    step_dict = {"save_table": "cons_ymd_stat"}
    ConsDayStat(
        log,
        {"task_id": 4},
        "2024-07-31 00:00:00",
        "2024-08-01 00:00:00",
        "step",
        step_dict,
        "1 D",
        rewrite=False,
    ).main()
