"""15min的有序用电和需求响应用户后处理脚本"""
import time

import numpy as np
import pandas as pd

import ruleng.es
from air_web.config.config import config, dws_agg_conf
from air_web.dw.dws_common import AggCommon

ConnectionTimeout = ruleng.es.es_mod.exceptions.ConnectionTimeout


class SpecConsAggMain(AggCommon):
    """基于配置中type为agg的聚合"""

    INDICATORS_COLS = [
        "p_total_sum",
        "p_kt_sum",
        "p_total_baseline",
        "p_kt_baseline",
    ]

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        rewrite,
        base_df,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )
        self.base_df = base_df
        self.indicators = {col: "sum" for col in self.INDICATORS_COLS}

    def add_max_time_flag(self):
        """"""
        # 获取当天最大点的数据
        group_field = ["org_no", "type_id"]
        day_max_df = self.get_day_max(group_field)
        day_max_df["is_day_max"] = 1

        # 给聚合结果添加最大点标识字段
        self.res_df = self.res_df.merge(
            day_max_df[["org_no", "type_id", "data_time", "is_day_max"]],
            on=["org_no", "type_id", "data_time"],
            how="left",
        )
        self.res_df["is_day_max"].fillna(0, inplace=True)

    def rename_add_dimensions(self):
        """新添加的数据维度列重命名"""
        for col in self.dimensions:
            if col in dws_agg_conf:
                rename = dws_agg_conf[col]["rename"]
                self.res_df.rename(columns=rename, inplace=True)
        for col in ["ad_org_name", "p_type_id"]:
            if col in self.res_df:
                self.res_df = self.res_df.drop(col, axis=1)

    def judge_nan_data(self, x):
        if x.isna().all():
            return np.nan
        else:
            return x.sum()

    def modify_indicators(self):
        # 如果是实时计算，基线为96点但实测没有96点，实测的nan值聚合后会变成0，所以需要特殊处理
        if self.end_time == self.baseline_end_time:
            return
        nan_cols = ["p_total_sum", "p_kt_sum"]
        new_indictors = {
            col: self.judge_nan_data if col in nan_cols else "sum"
            for col in self.indicators
        }
        self.indicators = new_indictors

    def add_agg_dimensions(self):
        """添加聚合维度"""
        add_cols = []
        for col in self.dimensions:
            if col in dws_agg_conf:
                add_cols.extend(dws_agg_conf[col]["columns"])
        self.dimensions.extend(add_cols)
        self.dimensions.append("measure_name")

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        if self.base_df.empty:
            self.logger.error(
                "初始聚合数据为空，不能继续聚合task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return
        self.add_agg_dimensions()
        self.modify_indicators()
        self.agg_data()
        self.rename_add_dimensions()
        self.rename_agg_indicators()
        self.add_max_time_flag()
        self.res_df["data_date"] = self.start_time.strftime("%Y-%m-%d")
        self.save_data_to_table()


class SpecConsAggBase(AggCommon):
    """最小颗粒度的聚合"""

    def __init__(
        self, logger, task, start_time, end_time, step, step_base_dict
    ):
        super().__init__(
            logger, task, start_time, end_time, step, step_base_dict
        )
        self.indicators = ["p_total", "p_kt"]
        self.agg_columns = ["type_id", "data_time"]
        self.agg_types = ["sum"]

    def agg_base_df(self, merge_df, cid_list):
        """
        聚合最小颗粒读: 最子级地区，最子级行业，调控措施,最小时间15min
        """

        on7_dict = (
            merge_df[["on7", "on5"]]
            .drop_duplicates()
            .set_index("on7")["on5"]
            .to_dict()
        )

        aggs_dict = self.get_aggs_dict()
        group_source = self.get_group_source()

        base_df = pd.DataFrame()

        for cid_dict in cid_list:
            on7 = cid_dict["on7"]
            on5 = on7_dict[on7]
            measure_code = cid_dict["measure_code"]
            measure_name = cid_dict["measure_name"]
            cids = cid_dict["cons_no"]
            self.logger.info(
                "正在聚合task_id:{},step:{},star_time:{},org:{} ...".format(
                    self.task_id, self.step, self.start_time, on7
                )
            )

            add_query = {"terms": {"cons_no": cids}}
            add_log = ",measure:{}".format(measure_code)

            # 实测数据聚合
            index_name = self.get_power_96_idx_name(on5, on7)
            query = self.get_es_query(
                on7, group_source, aggs_dict, add_query=add_query
            )
            agg_res = self.search_es(index_name, query, on7, add_log)
            org_df = self.proc_agg_data(agg_res)
            if org_df.empty:
                continue

            # 基线数据聚合
            index_name = self.get_baseline_idx_name()
            query = self.get_es_query(
                on7,
                group_source,
                aggs_dict,
                end_time=self.baseline_end_time,
                add_query=add_query,
            )
            baseline_agg_res = self.search_es(index_name, query, on7, add_log)
            baseline_org_df = self.proc_agg_data(baseline_agg_res)
            if baseline_org_df.empty:
                continue

            baseline_org_df = baseline_org_df.rename(
                columns={
                    "p_total_sum": "p_total_baseline",
                    "p_kt_sum": "p_kt_baseline",
                }
            )

            org_df = pd.merge(
                org_df,
                baseline_org_df,
                on=["type_id", "data_time"],
                how="right",
            )
            org_df["on7"] = on7
            org_df["measure_code"] = measure_code
            org_df["measure_name"] = measure_name

            base_df = pd.concat([base_df, org_df])
        if base_df.empty:
            return pd.DataFrame()

        base_df = base_df.astype({"data_time": "datetime64[ns]"})
        # 添加其他字段
        base_df = base_df.merge(merge_df, on=["on7", "type_id"], how="left")
        return base_df

    def get_cid_df(self, merge_df):
        if self.flag_field == "orderly_flag":
            measure_code_field = "orderly_measure_code"
            measure_name_field = "orderly_measure_name"
        elif self.flag_field == "demand_flag":
            measure_code_field = "demand_measure_code"
            measure_name_field = "demand_measure_name"

        on7_list = merge_df["on7"].astype(str).drop_duplicates().tolist()
        where_sql = "where {flag_field}=1 and on7 in ({on7_list})".format(
            flag_field=self.flag_field, on7_list=",".join(on7_list)
        )
        f_sql = config.get("merge_cons_filter")
        if f_sql:
            where_sql = f"{where_sql} and {f_sql}"

        sql = f"""SELECT distinct on7, 
                        {measure_code_field} as measure_code, 
                        {measure_name_field} as measure_name, 
                        cons_no 
                  from aclr_base_doc_all 
                  {where_sql} {f_sql}
               """
        cid_df = self.sql_engine.query(sql)
        cid_df = cid_df.sort_values(["on7"])
        cid_list = cid_df.groupby(["on7", "measure_code", "measure_name"])["cons_no"].apply(
            list).reset_index().to_dict("record")
        return cid_list

    def get_merge_df(self):
        """获取需要merge的字段"""
        s_sql = "select * from merge_cons "
        f_sql = config.get("merge_cons_filter")
        if f_sql:
            s_sql += "where "
            s_sql += f_sql
        merge_df = self.sql_engine.query(s_sql)
        return merge_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()
        merge_df = self.get_merge_df()
        cid_list = self.get_cid_df(merge_df)
        base_df = self.agg_base_df(merge_df, cid_list)
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{},数据条数:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time, len(base_df)
            )
        )
        return base_df
