import math
from datetime import datetime, timedelta

import numpy as np
import pandas as pd

from air_web.config.config import config
from air_web.data_platform import mysql_con
from air_web.dw.data_mapping import ConsType, IndustryMap
from air_web.web_flask.bll.base_bll import BaseBll
import ctypes;

class PredictBll(BaseBll):
    """总览页面视图函数业务逻辑类"""

    time_field = "data_time"
    data_date_field = "data_date"
    cons_no_field = "cons_no"
    ORGNO_TYPEID_15MIN = config.get("ORGNO_TYPEID_15MIN", "orgno_typeid_15min")
    C_CONS = config.get("C_CONS", "c_cons")
    ACLR_BASE_DOC_ALL = config.get("ACLR_BASE_DOC_ALL", "aclr_base_doc_all")
    ORGNO_TYPEID_CONS_NUM = config.get(
        "ORGNO_TYPEID_CONS_NUM", "orgno_typeid_cons_num"
    )
    TYPE_MAPE = "type_map"
    REAL_ORG_NO = "real_org_no"
    POWERLOAD_PREDICT_VIRTUAL_TABLE = config.get(
        "POWERLOAD_PREDICT_VIRTUAL_TABLE", "aclr_res_power_predict_virtual"
    )
    ACLR_WEATHER_HOUR = config.get("ACLR_WEATHER_HOUR", "aclr_weather_hour")
    IS_TOTAL_MAX = config.get("is_total_max", True)

    def __init__(self):
        """
        如果当前页面用到mysql则init_mysql_dal=True，self.sql_dal就会提供mysql实例，不用到则传False
        如果当前页面用到es则init_es_dal=True，self.es_dal就会提供es实例，不用到则传False
        """
        super().__init__(init_mysql_dal=True, init_es_dal=True)
        self.today = datetime.now().strftime("%Y-%m-%d")
        self.year = str(datetime.now().year)

    def query_sql_res(self, table_name, rules, is_one_day):
        res = self.sql_dal.sql_pandas.query_by_rules(table_name, rules)
        if not len(res):
            self.logger.warning(f"查询结果为空:{table_name}, {rules}")
            return res
        self.logger.info(f"查询:{table_name},结果数量:{len(res)}, {rules}")
        if not is_one_day:
            res[self.time_field] = res[self.time_field].dt.strftime("%Y-%m-%d")

        # res.drop_duplicates(subset=self.time_field, inplace=True, keep='last')
        return res

    def get_type_list(self, type_id, is_virtual=False):
        industry_map = config["industry_map"]
        industry_df = pd.DataFrame(
            {
                "type_id": industry_map["type_id"],
                "industry_id": industry_map["industry_id"],
            }
        )
        chan_type_map = (
            industry_df.groupby("industry_id")["type_id"].agg(list).to_dict()
        )

        if type_id in (31, 32, 33):
            return chan_type_map[type_id]
        elif type_id:
            return [type_id]
        else:
            if is_virtual:
                return industry_map["type_id"]
            else:
                return [1]

    def get_type_name(self, type_id):
        type_map = IndustryMap.MAP
        if type_id in type_map:
            return type_map[type_id]
        if type_id is None:
            type_id = 1
        sql = f"select distinct  type_id,type_code from {self.TYPE_MAPE} where type_id = {type_id}"
        res = self.sql_dal.sql_pandas.query(sql)
        res_dict = res.to_dict("records")
        return res_dict[0].get("type_code")

    def get_org_name(self, org_no):
        sql = f"select distinct  org_no,org_name from {self.REAL_ORG_NO} where org_no = {org_no}"
        res = self.sql_dal.sql_pandas.query(sql)
        res_dict = res.to_dict("records")
        return res_dict[0].get("org_name")

    def get_predict_result(
        self, area_code, type_id, start_date, end_date, add_sql=""
    ):
        type_list = self.get_type_list(type_id, is_virtual=True)
        cons_no = [f"'{t_id}_{area_code}'" for t_id in type_list]
        cons_str = ",".join(cons_no)
        cid_sql = f"""select distinct {self.cons_no_field} from {self.C_CONS} where  cons_no in ({cons_str}) """
        cid_df = self.sql_dal.sql_pandas.query(cid_sql)
        if not len(cid_df):
            self.logger.warning(f"查询cons_no结果为空: sql :{cid_sql}")
            return pd.DataFrame(columns=[self.time_field, "p_total", "p_kt"])
        cid_list = cid_df[self.cons_no_field].to_list()
        cid_str = ",".join([f"'{i}'" for i in cid_list])
        sql = f"""
        select {self.time_field}, sum(p_total) as p_total, sum(p_kt) as p_kt from {self.POWERLOAD_PREDICT_VIRTUAL_TABLE} 
        where  {self.time_field} >='{start_date}' and {self.time_field} < '{end_date}' and {self.cons_no_field} in 
        ({cid_str}) {add_sql}
        group by {self.time_field}
        """
        res_df = self.sql_dal.sql_pandas.query(sql)
        if not len(res_df):
            self.logger.warning(f"查询结果为空: sql :{sql}")
            return res_df
        self.logger.info(
            f"查询:{self.POWERLOAD_PREDICT_VIRTUAL_TABLE},结果数量:{len(res_df)}, sql:{sql}"
        )
        res_df = res_df.sort_values(
            [self.time_field, "p_total"],
            ascending=True,
        )
        res_df.drop_duplicates(
            subset=self.time_field, inplace=True, keep="last"
        )

        return res_df

    def get_15min_result(
        self,
        area_code,
        type_id,
        start_date,
        end_date,
        is_one_day=True,
        add_rules=None,
    ):
        date_rules = [
            (self.time_field, "query", ">=", start_date),
            (self.time_field, "query", "<", end_date),
        ]
        type_rules = [
            ("org_no", "query", "=", area_code),
            ("type_id", "query", "=", type_id),
        ]
        if add_rules:
            type_rules.append(add_rules)
        type_rules = type_rules + date_rules
        table_name = self.ORGNO_TYPEID_15MIN
        res_df = self.query_sql_res(
            table_name, type_rules, is_one_day=is_one_day
        )
        res_df.rename(
            columns={
                "p_total_sum": "p_total",
                "p_kt_sum": "p_kt",
            },
            inplace=True,
        )
        res_df = res_df[["p_total", "p_kt", self.time_field]]
        return res_df

    def get_cons_num(self, area_code, type_id, year):
        type_list = self.get_type_list(type_id)
        rules = [
            ("org_no", "query", "=", area_code),
            ("type_id", "query", "in", type_list),
            ("year", "query", "=", year),
        ]
        table_name = self.ORGNO_TYPEID_CONS_NUM
        res_df = self.sql_dal.sql_pandas.query_by_rules(table_name, rules)
        if not len(res_df):
            self.logger.warning(f"查询结果为空:{table_name}, {rules}")
            return res_df
        self.logger.info(f"查询:{table_name},结果数量:{len(res_df)}, {rules}")
        res_df = (
            res_df.groupby("org_no").agg({"cons_num": "sum"}).reset_index()
        )
        return res_df

    def get_table_data(
        self, res_dict, date_list, type_id, area_code, start_date
    ):
        type_name = self.get_type_name(type_id)
        org_name = self.get_org_name(area_code)

        p_value = [
            {
                "org_name": org_name,
                "type_name": type_name,
                "date": start_date,
                "type": "总负荷" if d_str == "total" else "空调负荷",
                "data_type": "实际值" if p_str == "true" else "预测值",
                "val": val_list,
            }
            for key, val_list in res_dict.items()
            for d_str, p_str in [key.split("_")[1:]]
        ]
        p_value.sort(
            key=lambda x: (x["type"] != "总负荷", x["data_type"] != "预测值")
        )
        res = {"data_list": date_list, "p_value": p_value}
        return res

    def get_picture_data(self, res_dict, data_list):
        res = [
            {
                "name": ("预测" if p_str == "pre" else "")
                + ("总负荷" if d_str == "total" else "空调负荷"),
                "date_list": data_list,
                "value": val_list,
            }
            for key, val_list in res_dict.items()
            for d_str, p_str in [key.split("_")[1:]]
        ]
        total_true = res_dict.get("p_total_pre", [])
        max_value = max(total_true) if total_true and any(total_true) else None
        max_list = [max_value if x == max_value else None for x in total_true]
        res.append(
            {"name": "预测最大总负荷", "date_list": data_list, "value": max_list}
        )
        return res

    def get_download_data(self, res_df, type_id, area_code, start_date):
        if res_df.empty:
            return res_df
        type_name = self.get_type_name(type_id)
        org_name = self.get_org_name(area_code)
        res_df = res_df.set_index(self.time_field).transpose()
        res_df["org_name"] = org_name
        res_df["type_name"] = type_name
        res_df["date"] = start_date
        res_df["type"] = res_df.index.map(
            lambda x: "总负荷" if "total" in x else "空调负荷"
        )
        res_df["data_type"] = res_df.index.map(
            lambda x: "实际值" if "_true" in x else "预测值"
        )
        res_df = res_df.reset_index(drop=True)
        res_df = res_df.sort_values(
            by=["type", "data_type"], ascending=[True, False]
        )
        return res_df

    def get_all_result(self, params, is_download):
        area_code = params.get("area_code")
        type_id = params.get("type_id")
        start_str = params.get("start_date")
        start_date = (datetime.strptime(start_str, "%Y-%m-%d")).strftime(
            "%Y-%m-%d"
        )
        end_date = (
            datetime.strptime(start_str, "%Y-%m-%d") + timedelta(days=1)
        ).strftime("%Y-%m-%d")
        year = datetime.strptime(start_str, "%Y-%m-%d").strftime("%Y")

        true_df = self.get_15min_result(
            area_code, type_id, start_date, end_date
        )
        predict_df = self.get_predict_result(
            area_code, type_id, start_date, end_date
        )
        cons_df = self.get_cons_num(area_code, type_id, year)
        return_dict = {
            "table_data": {},
            "picture_data": {},
            "statistic_data": {},
        }
        res_df = pd.merge(
            true_df,
            predict_df,
            on=self.time_field,
            suffixes=("_true", "_pre"),
            how="outer",
        )
        if not res_df.empty:
            res_df[self.time_field] = res_df[self.time_field].dt.strftime(
                "%H:%M"
            )
        res_df = res_df.round(2)
        res_df = res_df.replace({np.nan: None})
        res_df = res_df.where(res_df.notnull(), None)
        res_dict = res_df.to_dict("list")
        if is_download:
            return self.get_download_data(
                res_df, type_id, area_code, start_date
            )  # DataFrame
        date_list = res_dict[self.time_field]
        res_dict.pop(self.time_field)
        table_data = self.get_table_data(
            res_dict, date_list, type_id, area_code, start_date
        )
        picture_data = self.get_picture_data(res_dict, date_list)
        total_rate, kt_rate, _ = self.get_predict_rate(res_df)
        return_dict["table_data"] = table_data
        return_dict["picture_data"] = picture_data
        return_dict["statistic_data"] = {
            "total_rate": total_rate,
            "kt_rate": kt_rate,
        }
        if not cons_df.empty:
            cons_num = cons_df.to_dict("record")[0].get("cons_num")
            return_dict["statistic_data"].update({"cons_num": cons_num})
        else:
            return_dict["statistic_data"].update({"cons_num": None})
        return return_dict

    def get_province_no(self):
        sql = "select org_no from real_org_no where org_level=0"
        df = self.sql_dal.sql_pandas.query(sql)
        province_no = str(df["org_no"].tolist()[0])
        return province_no

    def get_all_fk_result(self, params, is_download):
        type_order = {
            "总负荷": 0,
            "监测高压用户空调负荷": 1,
            "监测低压用户空调负荷": 2,
            "专线用户空调负荷": 3,
        }
        data_type_order = {"预测值": 0, "实际值": 1}

        area_code = params.get("area_code")
        start_str = params.get("start_date")
        start_date = (datetime.strptime(start_str, "%Y-%m-%d")).strftime(
            "%Y-%m-%d"
        )
        end_date = (
            datetime.strptime(start_str, "%Y-%m-%d") + timedelta(days=1)
        ).strftime("%Y-%m-%d")
        year = datetime.strptime(start_str, "%Y-%m-%d").strftime("%Y")

        province_no = self.get_province_no()

        true_df = self.get_fk_15min_result(
            area_code, start_date, end_date, province_no
        )
        predict_df, cons_num_1, cons_num_2 = self.get_fk_predict_result(
            area_code, start_date, end_date, province_no
        )
        return_dict = {
            "table_data": {},
            "picture_data": {},
            "statistic_data": {},
        }
        res_df = pd.merge(
            true_df,
            predict_df,
            on=self.time_field,
            suffixes=("_true", "_pre"),
            how="outer",
        )
        res_df = res_df.round(2)
        res_df = res_df.where(res_df.notnull(), None)

        res_dict = res_df.to_dict("list")
        if is_download:
            return self.get_fk_download_data(
                res_df, area_code, start_date
            )  # DataFrame
        date_list = res_dict[self.time_field]
        res_dict.pop(self.time_field)
        table_data = self.get_fk_table_data(
            res_dict, date_list, area_code, start_date, province_no
        )
        picture_data = self.get_fk_picture_data(res_dict, date_list)
        total_rate = self.get_fk_predict_rate(res_df)

        table_data["p_value"] = sorted(
            table_data["p_value"],
            key=lambda x: (
                type_order[x["type"]],
                data_type_order[x["data_type"]],
            ),
        )
        return_dict["table_data"] = table_data
        return_dict["picture_data"] = picture_data
        return_dict["statistic_data"] = {"total_rate": total_rate}
        # return_dict["statistic_data"].update({"cons_num_gy":cons_num_1, "cons_num_dy":cons_num_2})

        gy_cons_df = self.get_fk_cons_num(area_code, 1, year, province_no)
        if not gy_cons_df.empty:
            cons_num = gy_cons_df.to_dict("record")[0].get("cons_num")
            return_dict["statistic_data"].update({"cons_num_gy": cons_num})
        else:
            return_dict["statistic_data"].update({"cons_num_gy": None})

        dy_cons_df = self.get_fk_cons_num(area_code, 2, year, province_no)
        if not dy_cons_df.empty:
            cons_num = dy_cons_df.to_dict("record")[0].get("cons_num")
            cons_count = dy_cons_df.to_dict("record")[0].get("cons_count")
            return_dict["statistic_data"].update({"cons_num_dy": cons_num})
            return_dict["statistic_data"].update({"cons_count_dy": cons_count})
        else:
            return_dict["statistic_data"].update({"cons_num_dy": None})
            return_dict["statistic_data"].update({"cons_count_dy": None})

        return return_dict

    def get_fk_picture_data(self, res_dict, data_list):
        res = []
        for key, val_list in res_dict.items():
            d_str, p_str = key.split("_")[1:]
            if d_str == "total":
                d_type = "总负荷"
            elif d_str == "jcgy":
                d_type = "监测高压用户空调负荷"
            elif d_str == "jcdy":
                d_type = "监测低压用户空调负荷"
            else:
                d_type = "专线用户空调负荷"
            p_type = "" if p_str == "true" else "预测"
            name = p_type + d_type
            if name == "总负荷" or name == "预测总负荷" or name == "预测专线用户空调负荷":
                val_list = [x/10000 if x else x for x in val_list]
            res.append(
                {"name": name, "date_list": data_list, "value": val_list}
            )

        total_true = res_dict["p_total_pre"]
        if total_true and not all(elem is None for elem in total_true):
            max_value = max(elem for elem in total_true if elem is not None)
            max_list = [
                max_value if x == max_value else None for x in total_true
            ]
        else:
            max_list = []
        res.append(
            {"name": "预测最大总负荷", "date_list": data_list, "value": max_list}
        )
        return res

    def get_fk_download_data(self, res_df, area_code, start_date):
        org_name = self.get_org_name(area_code)
        res_df = res_df.set_index(self.time_field).transpose()
        res_df["org_name"] = org_name
        res_df["date"] = start_date

        def tmp_x_name(d_str):
            d_str = d_str.split("_")[1]
            if d_str == "total":
                d_type = "总负荷"
            elif d_str == "jcgy":
                d_type = "监测高压用户空调负荷"
            elif d_str == "jcdy":
                d_type = "监测低压用户空调负荷"
            else:
                d_type = "专线用户空调负荷"
            return d_type

        res_df["type"] = res_df.index.map(lambda x: tmp_x_name(x))
        res_df["data_type"] = res_df.index.map(
            lambda x: "实际值" if "_true" in x else "预测值"
        )

        type_order = {
            "总负荷": 0,
            "监测高压用户空调负荷": 1,
            "监测低压用户空调负荷": 2,
            "专线用户空调负荷": 3,
        }
        data_type_order = {"预测值": 0, "实际值": 1}
        res_df_cs = res_df.columns
        # for i in res_df_cs:
        #     if ':' in i:
        #         res_df[i] = res_df[i].map(lambda x:None if x in [None, np.nan, 'Nan', 'nan', 'null'] else round(x/10000, 2))

        res_df["type_id"] = res_df["type"].map(lambda x: type_order[x])
        res_df["data_type_id"] = res_df["data_type"].map(
            lambda x: data_type_order[x]
        )
        res_df = res_df.reset_index(drop=True)

        res_df = res_df.sort_values(
            by=["type_id", "data_type_id"], ascending=[True, True]
        )
        res_df = res_df[res_df_cs]
        return res_df

    def get_fk_table_data(
        self, res_dict, date_list, area_code, start_date, province_no
    ):
        if area_code in [None, province_no]:
            org_name = "全省"
        else:
            org_name = self.get_org_name(area_code)

        p_value = []
        for key, val_list in res_dict.items():
            d_str, p_str = key.split("_")[1:]
            if d_str == "total":
                d_type = "总负荷"
            elif d_str == "jcgy":
                d_type = "监测高压用户空调负荷"
            elif d_str == "jcdy":
                d_type = "监测低压用户空调负荷"
            else:
                d_type = "专线用户空调负荷"
            p_type = "实际值" if p_str == "true" else "预测值"
            # 前端表格,总负荷和未检测用户 ,乘以10000
            # if d_type in ("总负荷", "专线用户空调负荷"):
            #    val_list = [i * 10000 if i else "" for i in val_list]
            res_app = {
                "org_name": org_name,
                "date": start_date,
                "type": d_type,
                "data_type": p_type,
                "val": val_list,
            }
            p_value.append(res_app)
        p_value = sorted(
            p_value,
            key=lambda x: (x["type"] != "总负荷", x["data_type"] != "预测值"),
        )
        res = {"data_list": date_list, "p_value": p_value}
        return res

    def get_cid(self, area_code, type_code_sort, province_no):
        sql = f"""select {self.cons_no_field} 
                    from {config['C_CONS']} 
                    where type_code_sort = {type_code_sort} and cons_type={ConsType.VIRTUAL_USER}"""
        if area_code not in [None, province_no]:
            sql += f" and (on5 = {area_code} or on7 = {area_code}) "
        return mysql_con.get(sql)

    def get_fk_predict_result(
        self, area_code, start_date, end_date, province_no
    ):
        """从虚拟用户预测索引查询某个地区全行业的p_total数据"""
        my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
        def zhuanxian_res(area_code, query):
            es = my_lib.zElasticsearch(config["ES_HOST"])
            if area_code in [None, "13102"]:
                index_name = f"aclr_res_predict_96-*"
            elif len(area_code) < 7:
                index_name = f"aclr_res_predict_96-{area_code[:5]}-*"
            else:
                index_name = f"aclr_res_predict_96-{area_code[:5]}-{area_code}"

            aggregations = es.search(
                index=index_name, body=query, request_timeout=300
            )
            print("----zhuanxian", index_name, query, aggregations)
            data = []
            if aggregations.get("aggregations") is not None:
                for bucket in aggregations["aggregations"][
                    "group_by_data_time"
                ]["buckets"]:
                    key_as_string = bucket["key_as_string"]
                    total_p_kt = bucket["total_p_kt"]["value"]
                    total_p_total = bucket["total_p_total"]["value"]
                    data.append([key_as_string, total_p_kt, total_p_total])

            df = pd.DataFrame(data, columns=["data_time", "p_kt", "p_total"])

            # 按 key_as_string 排序
            res_df = df.sort_values("data_time")
            if not len(res_df):
                self.logger.warning(
                    f"查询结果为空: index:{index_name}, es_query :{query}"
                )
                return res_df
            res_df[self.time_field] = pd.to_datetime(
                res_df[self.time_field]
            ).dt.strftime("%H:%M")
            res_df = res_df.sort_values(
                [self.time_field, "p_total"],
                ascending=True,
            )
            res_df.drop_duplicates(
                subset=self.time_field, inplace=True, keep="last"
            )
            return res_df

        def tmp_res(area_code, query):
            es = my_lib.zElasticsearch(config["ES_HOST"])
            if area_code in [None, province_no]:
                index_name = (
                    f"{config['POWERLOAD_PREDICT_VIRTUAL_IDX']}-99999-9999999"
                )
            elif len(area_code) < 7:
                index_name = f"{config['POWERLOAD_PREDICT_VIRTUAL_IDX']}-{area_code}-9999999"
            else:
                index_name = f"{config['POWERLOAD_PREDICT_VIRTUAL_IDX']}-{area_code[:5]}-{area_code}"
            aggregations = es.search(
                index=index_name, body=query, request_timeout=300
            )

            data = []
            if aggregations.get("aggregations") is not None:
                for bucket in aggregations["aggregations"][
                    "group_by_data_time"
                ]["buckets"]:
                    key_as_string = bucket["key_as_string"]
                    total_p_kt = bucket["total_p_kt"]["value"]
                    total_p_total = bucket["total_p_total"]["value"]
                    data.append([key_as_string, total_p_kt, total_p_total])

            df = pd.DataFrame(data, columns=["data_time", "p_kt", "p_total"])

            # 按 key_as_string 排序
            res_df = df.sort_values("data_time")
            if not len(res_df):
                self.logger.warning(
                    f"查询结果为空: index:{index_name}, es_query :{query}"
                )
                return res_df
            res_df[self.time_field] = pd.to_datetime(
                res_df[self.time_field]
            ).dt.strftime("%H:%M")
            res_df = res_df.sort_values(
                [self.time_field, "p_total"],
                ascending=True,
            )
            res_df.drop_duplicates(
                subset=self.time_field, inplace=True, keep="last"
            )
            return res_df

        # 聚合某个地区全行业的query条件
        if area_code in [None, province_no]:
            query = {
                "size": 0,
                "query": {
                    "bool": {
                        "must": [
                            {
                                "range": {
                                    "data_time": {
                                        "gte": start_date,
                                        "lt": end_date,
                                    }
                                }
                            },
                            {"term": {"org_no": 999999999}},
                            {"term": {"type_id": 99999}},
                        ]
                    }
                },
                "aggs": {
                    "group_by_data_time": {
                        "terms": {"field": "data_time", "size": 10000},
                        "aggs": {
                            "total_p_total": {"sum": {"field": "p_total"}},
                            "total_p_kt": {"sum": {"field": "p_kt"}},
                        },
                    }
                },
            }
        else:
            onn = "on5" if len(area_code) < 7 else "on7"
            query = {
                "size": 0,
                "query": {
                    "bool": {
                        "must": [
                            {"terms": {f"{onn}": [area_code]}},
                            {
                                "range": {
                                    "data_time": {
                                        "gte": start_date,
                                        "lt": end_date,
                                    }
                                }
                            },
                            {"term": {"org_no": 999999999}},
                            {"term": {"type_id": 99999}},
                        ]
                    }
                },
                "aggs": {
                    "group_by_data_time": {
                        "terms": {"field": "data_time", "size": 10000},
                        "aggs": {
                            "total_p_total": {"sum": {"field": "p_total"}},
                            "total_p_kt": {"sum": {"field": "p_kt"}},
                        },
                    }
                },
            }

        # 某个地区全行业的预测p_total数据
        res_df = tmp_res(area_code, query)
        res_df = res_df[["data_time", "p_total"]]

        # 聚合某个地区专线的query条件
        if area_code in [None, province_no]:
            query = {
                "size": 0,
                "query": {
                    "bool": {
                        "must": [
                            {
                                "range": {
                                    "data_time": {
                                        "gte": start_date,
                                        "lt": end_date,
                                    }
                                }
                            },
                            {"term": {"org_no": 777777778}},
                        ]
                    }
                },
                "aggs": {
                    "group_by_data_time": {
                        "terms": {"field": "data_time", "size": 10000},
                        "aggs": {
                            "total_p_total": {"sum": {"field": "p_total"}},
                            "total_p_kt": {"sum": {"field": "p_kt"}},
                        },
                    }
                },
            }
        else:
            onn = "on5" if len(area_code) < 7 else "on7"
            query = {
                "size": 0,
                "query": {
                    "bool": {
                        "must": [
                            {"terms": {f"{onn}": [area_code]}},
                            {
                                "range": {
                                    "data_time": {
                                        "gte": start_date,
                                        "lt": end_date,
                                    }
                                }
                            },
                            {"term": {"org_no": 777777778}},
                        ]
                    }
                },
                "aggs": {
                    "group_by_data_time": {
                        "terms": {"field": "data_time", "size": 10000},
                        "aggs": {
                            "total_p_total": {"sum": {"field": "p_total"}},
                            "total_p_kt": {"sum": {"field": "p_kt"}},
                        },
                    }
                },
            }
        # 某个地区专线的预测p_kt数据
        res_df_7 = zhuanxian_res(area_code, query)
        res_df_7 = res_df_7.rename(columns={"p_kt": "kt_wjc_pre"})
        res_df_7 = res_df_7[["data_time", "kt_wjc_pre"]]
        res_df = res_df.merge(res_df_7, on="data_time", how="outer")

        # 聚合某个地区高压的query条件
        cid_1 = self.get_cid(area_code, 1, province_no)
        query = {
            "size": 0,
            "query": {
                "bool": {
                    "must": [
                        {
                            "terms": {
                                self.cons_no_field: [
                                    i[self.cons_no_field] for i in cid_1
                                ]
                            }
                        },
                        {
                            "range": {
                                "data_time": {
                                    "gte": start_date,
                                    "lt": end_date,
                                }
                            }
                        },
                        {"term": {"org_no": 999999999}},
                    ]
                }
            },
            "aggs": {
                "group_by_data_time": {
                    "terms": {"field": "data_time", "size": 10000},
                    "aggs": {
                        "total_p_total": {"sum": {"field": "p_total"}},
                        "total_p_kt": {"sum": {"field": "p_kt"}},
                    },
                }
            },
        }
        # 某个地区高压的预测p_kt数据
        res_df_gy = tmp_res(area_code, query)
        res_df_gy = res_df_gy.rename(columns={"p_kt": "kt_jcgy_pre"})
        res_df_gy = res_df_gy[["data_time", "kt_jcgy_pre"]]
        res_df = res_df.merge(res_df_gy, on="data_time", how="outer")

        # 聚合某个地区低压的query条件
        cid_2 = self.get_cid(area_code, 2, province_no)
        query = {
            "size": 0,
            "query": {
                "bool": {
                    "must": [
                        {
                            "terms": {
                                self.cons_no_field: [
                                    i[self.cons_no_field] for i in cid_2
                                ]
                            }
                        },
                        {
                            "range": {
                                "data_time": {
                                    "gte": start_date,
                                    "lt": end_date,
                                }
                            }
                        },
                        {"term": {"org_no": 999999999}},
                    ]
                }
            },
            "aggs": {
                "group_by_data_time": {
                    "terms": {"field": "data_time", "size": 10000},
                    "aggs": {
                        "total_p_total": {"sum": {"field": "p_total"}},
                        "total_p_kt": {"sum": {"field": "p_kt"}},
                    },
                }
            },
        }
        # 某个地区低压的预测p_kt数据
        res_df_dy = tmp_res(area_code, query)
        res_df_dy = res_df_dy.rename(columns={"p_kt": "kt_jcdy_pre"})
        res_df_dy = res_df_dy[["data_time", "kt_jcdy_pre"]]
        res_df = res_df.merge(res_df_dy, on="data_time", how="outer")
        return res_df, len(cid_1), len(cid_2)

    def get_fk_15min_result(
        self, area_code, start_date, end_date, province_no
    ):
        """从虚拟用户索引查询某个地区全行业的p_total数据"""
        # 聚合某个地区全行业的query条件
        # if area_code in [None, province_no]:
        #     query = {
        #         "size": 0,
        #         "query": {
        #             "bool": {
        #                 "must": [
        #                     {
        #                         "range": {
        #                             "data_time": {
        #                                 "gte": start_date,
        #                                 "lt": end_date,
        #                             }
        #                         }
        #                     },
        #                     {"term": {"org_no": 999999999}},
        #                     {"term": {"type_id": 99999}},
        #                 ]
        #             }
        #         },
        #         "aggs": {
        #             "group_by_data_time": {
        #                 "terms": {"field": "data_time", "size": 10000},
        #                 "aggs": {
        #                     "total_p_total": {"sum": {"field": "p_total"}},
        #                     "total_p_kt": {"sum": {"field": "p_kt"}},
        #                 },
        #             }
        #         },
        #     }
        # else:
        #     onn = "on5" if len(area_code) < 7 else "on7"
        #     query = {
        #         "size": 0,
        #         "query": {
        #             "bool": {
        #                 "must": [
        #                     {"terms": {f"{onn}": [area_code]}},
        #                     {
        #                         "range": {
        #                             "data_time": {
        #                                 "gte": start_date,
        #                                 "lt": end_date,
        #                             }
        #                         }
        #                     },
        #                     {"term": {"org_no": 999999999}},
        #                     {"term": {"type_id": 99999}},
        #                 ]
        #             }
        #         },
        #         "aggs": {
        #             "group_by_data_time": {
        #                 "terms": {"field": "data_time", "size": 10000},
        #                 "aggs": {
        #                     "total_p_total": {"sum": {"field": "p_total"}},
        #                     "total_p_kt": {"sum": {"field": "p_kt"}},
        #                 },
        #             }
        #         },
        #     }
        #
        # es = zElasticsearch(config["ES_HOST"])
        # if area_code in [None, province_no]:
        #     index_name = f"{config['POWERLOAD_VIRTUAL_IDX']}-99999-9999999"
        # elif len(area_code) < 7:
        #     index_name = (
        #         f"{config['POWERLOAD_VIRTUAL_IDX']}-{area_code}-9999999"
        #     )
        # else:
        #     index_name = f"{config['POWERLOAD_VIRTUAL_IDX']}-{area_code[:5]}-{area_code}"
        # aggregations = es.search(
        #     index=index_name, body=query, request_timeout=300
        # )
        #
        # data = []
        # if aggregations.get("aggregations") is not None:
        #     for bucket in aggregations["aggregations"]["group_by_data_time"][
        #         "buckets"
        #     ]:
        #         key_as_string = bucket["key_as_string"]
        #         total_p_kt = bucket["total_p_kt"]["value"]
        #         total_p_total = bucket["total_p_total"]["value"]
        #         data.append([key_as_string, total_p_kt, total_p_total])
        #
        # df = pd.DataFrame(data, columns=["data_time", "p_kt", "p_total"])

        df = self.get_15min_result(
            area_code, '0', start_date, end_date
        )  # todo 此处行业若为int 0，则里面的逻辑会有问题
        df = df[["data_time", "p_total"]]

        # 按 key_as_string 排序
        res_df = df.sort_values("data_time")
        if not len(res_df):
            self.logger.warning(
                f"查询结果为空"
            )
            return res_df
        res_df[self.time_field] = pd.to_datetime(
            res_df[self.time_field]
        ).dt.strftime("%H:%M")
        res_df = res_df.sort_values(
            [self.time_field, "p_total"],
            ascending=True,
        )
        res_df.drop_duplicates(
            subset=self.time_field, inplace=True, keep="last"
        )
        return res_df

    def get_fk_predict_rate(self, res_df):
        if res_df.empty:
            return None
        res_df = res_df.astype(
            {"p_total_true": "float64", "p_total_pre": "float64"}
        )
        condition = (res_df["p_total_true"].notnull()) & (
            res_df["p_total_pre"] != 0
        )
        if condition.any():
            res_df.loc[condition, "sub_p_total_rate"] = (
                abs(res_df["p_total_true"] - res_df["p_total_pre"])
                / res_df["p_total_pre"]
                * 100
            )
        res_df.loc[
            (res_df["p_total_true"] == 0) & (res_df["p_total_pre"] == 0),
            "sub_p_total_rate",
        ] = 0
        condition = (
            (res_df["p_total_true"].notnull())
            & (res_df["p_total_true"] != 0)
            & (res_df["p_total_pre"] == 0)
            & (res_df["p_total_pre"].notnull())
        )
        if condition.any():
            res_df.loc[condition, "sub_p_total_rate"] = (
                abs(res_df["p_total_true"] - res_df["p_total_pre"])
                / res_df["p_total_pre"]
                * 100
            )

        res_df["pre_p_total_rate"] = 100 - res_df["sub_p_total_rate"].clip(
            lower=0, upper=100
        )
        res_df["pre_p_total_rate"] = res_df["pre_p_total_rate"].round(2)

        total_rate = res_df["pre_p_total_rate"].mean()
        total_rate = round(total_rate, 2) if not np.isnan(total_rate) else None

        return total_rate

    def get_fk_cons_num(self, area_code, type_id, year, province_no):
        if area_code is None:
            area_code = province_no
        rules = [
            ("org_no", "query", "=", area_code),
            ("type_id", "query", "=", type_id),
            ("year", "query", "=", year),
        ]
        table_name = self.ORGNO_TYPEID_CONS_NUM
        res_df = self.sql_dal.sql_pandas.query_by_rules(table_name, rules)
        if not len(res_df):
            self.logger.warning(f"查询结果为空:{table_name}, {rules}")
            return res_df
        self.logger.info(f"查询:{table_name},结果数量:{len(res_df)}, {rules}")
        res_df = (
            res_df.groupby("org_no")
            .agg({"cons_num": sum, "cons_count": "sum"})
            .reset_index()
        )
        return res_df

    def get_max_p_real_result(self, area_code, start_date, end_date):
        sql = f"""select p_total_sum as p_total, p_kt_sum as p_kt, date_time
                  from {self.ORGNO_TYPEID_15MIN}
                  where org_no = {area_code}
                    and type_id = 0
                    and data_time >= '{start_date}'
                    and data_time < '{end_date}'
                    
               """
        df = self.sql_dal.sql_pandas.query(sql)
        df[self.time_field] = pd.to_datetime(df[self.time_field]).dt.strftime(
            "%Y-%m-%d"
        )
        return df

    def get_temperature_data(self, area_code, start_date, end_date):
        cid_sql = f"""select region_code
                      from {self.ACLR_BASE_DOC_ALL} 
                      where cons_no = '{99999}_{area_code}' """
        region_df = self.sql_dal.sql_pandas.query(cid_sql)
        region_code = region_df["region_code"].tolist()[0]

        rules = [
            ("region_code", "query", "=", region_code),
            ("datetime", "query", ">=", start_date),
            ("datetime", "query", "<", end_date),
        ]

        tmp_df = self.ana_query(
            self.ACLR_WEATHER_HOUR, rules, doc_time_field="datetime"
        )
        if tmp_df.empty:
            print(f"未查到{start_date},{end_date}天气数据:{rules}")
            return tmp_df
        tmp_df = tmp_df[["datetime", "tmp"]]
        tmp_df["datetime"] = tmp_df["datetime"].dt.date
        tmp_df = tmp_df.sort_values(["datetime", "tmp"], ascending=False)
        tmp_df = tmp_df.drop_duplicates("datetime")
        tmp_df.rename(
            columns={"tmp": "tmp_pre", "datetime": "data_time"}, inplace=True
        )
        return tmp_df

    def get_max_p_result(self, params, is_download):
        area_code = params.get("area_code")
        start_date = params.get("start_date")
        end_date = params.get("end_date")
        end_date = (
            datetime.strptime(end_date, "%Y-%m-%d") + timedelta(days=1)
        ).strftime("%Y-%m-%d")

        add_rules = ("is_day_max", "query", "=", "1")
        true_df = self.get_15min_result(
            area_code, 0, start_date, end_date, add_rules=add_rules
        )

        is_day_max = "(1,3)" if config["is_total_max"] else "(2,3)"
        add_sql = f"and is_day_max in {is_day_max}"
        predict_df = self.get_predict_result(
            area_code, 99999, start_date, end_date, add_sql=add_sql
        )

        if true_df.empty and predict_df.empty:
            return {
                "picture_data": [],
                "statistic_data": {"total_rate": None, "kt_rate": None},
            }
        if not true_df.empty:
            true_df[self.time_field] = true_df[self.time_field].dt.date
        if not predict_df.empty:
            predict_df[self.time_field] = predict_df[self.time_field].dt.date

        res_df = pd.merge(
            true_df,
            predict_df,
            on=self.time_field,
            suffixes=("_true", "_pre"),
            how="outer",
        )

        tmp_df = self.get_temperature_data(area_code, start_date, end_date)

        if not tmp_df.empty:
            res_df = pd.merge(res_df, tmp_df, on=self.time_field, how="left")
            res_df.loc[
                res_df[self.time_field] <= datetime.today().date(), "tmp_true"
            ] = res_df["tmp_pre"]
        else:
            res_df["tmp_true"] = np.nan
            res_df["tmp_pre"] = np.nan

        total_rate, kt_rate, res_df = self.get_predict_rate(res_df)

        for col in ["p_total_true", "p_kt_true", "p_total_pre", "p_kt_pre"]:
            res_df[col] = (res_df[col] / 10000).round(2)

        res_df.sort_values(self.time_field, inplace=True)
        res_df = res_df.replace({np.nan: None})
        res_df = res_df.where(res_df.notnull(), None)

        if is_download:
            org_name = self.get_org_name(area_code)
            res_df["org_name"] = org_name
            return res_df

        res_dict = res_df.to_dict("records")
        return_dict = {
            "picture_data": res_dict,
            "statistic_data": {"total_rate": total_rate, "kt_rate": kt_rate},
        }
        return return_dict
