import json
import time
from collections import defaultdict
from datetime import datetime, timedelta

import numpy as np
import pandas as pd
from kafka import KafkaProducer

from air_web.config.config import config
from air_web.dw.common_fun import create_topic
from air_web.dw.data_mapping import DateType, AlertType, PlanType
from air_web.dw.dws_common import AggCommon


class OrgTypeDayStat(AggCommon):
    """获取当天每个区域最大点"""

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelta,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )

    def get_day_stat_df(
        self, day_org_type_df, stat_field, stat_type, map_dict
    ):
        ascending = True if stat_type == "min" else False
        stat_field_list = [stat_field, "data_time"]
        # stat_df = day_org_type_df.loc[
        #     day_org_type_df.groupby(["org_no", "type_id"])[stat_field_list].rank(
        #         method='min', ascending=is_min
        #     )
        #     == 1
        #     ]
        stat_df = day_org_type_df.sort_values(
            stat_field_list, ascending=[ascending, True]
        )
        stat_df = stat_df.drop_duplicates(["org_no", "type_id"])

        rename_dict = {
            f"{name}": f"{stat_type}_{map_dict[name]}"
            for name in map_dict.keys()
        }
        rename_dict.update({"time_display_name": "data_date"})
        stat_df = stat_df.rename(columns=rename_dict)
        if stat_field != "p_total_diff":
            stat_df.drop(
                ["p_total_diff", "p_total_baseline"], axis=1, inplace=True
            )
        else:
            stat_df.drop(["p_kt_sum"], axis=1, inplace=True)

        return stat_df

    def agg_org_type_day(self, day_org_type_df, tmp_df):
        day_org_type_df = day_org_type_df.drop_duplicates(
            ["org_no", "type_id", "data_time"]
        )
        # 按 org 和 type 分组，找到最大、最小值和平均值的行
        self.logger.info(
            "查询15min表数据条数:{},task_id:{},step:{},star_time:{} ...".format(
                len(day_org_type_df), self.task_id, self.step, self.start_time
            )
        )

        kt_dict = {
            "data_time": "p_kt_time",
            "p_kt_sum": "p_kt",
            "p_total_sum": "p_kt_total",
        }
        total_dict = {
            "data_time": "p_total_time",
            "p_total_sum": "p_total",
            "p_kt_sum": "p_total_kt",
        }
        total_diff_dict = {
            "data_time": "total_diff_time",
            "p_total_diff": "total_diff",
            "p_total_sum": "total_diff_total",
            "p_total_baseline": "total_diff_baseline_total",
        }

        max_kt_df = self.get_day_stat_df(
            day_org_type_df, "p_kt_sum", "max", kt_dict
        )
        max_total_df = self.get_day_stat_df(
            day_org_type_df, "p_total_sum", "max", total_dict
        )
        max_total_diff_df = self.get_day_stat_df(
            day_org_type_df, "p_total_diff", "max", total_diff_dict
        )
        min_kt_df = self.get_day_stat_df(
            day_org_type_df, "p_kt_sum", "min", kt_dict
        )
        min_total_df = self.get_day_stat_df(
            day_org_type_df, "p_total_sum", "min", total_dict
        )
        mean_kt_df = (
            day_org_type_df.groupby(["org_no", "type_id"])["p_kt_sum"]
            .mean()
            .reset_index()
        )
        mean_kt_df.rename(columns={"p_kt_sum": "avg_p_kt"}, inplace=True)
        mean_total_df = (
            day_org_type_df.groupby(["org_no", "type_id"])["p_total_sum"]
            .mean()
            .reset_index()
        )
        mean_total_df.rename(
            columns={"p_total_sum": "avg_p_total"}, inplace=True
        )

        # 将最大、最小值和平均值的行合并成一个新的 DataFrame
        on_cols = [
            "org_no",
            "org_name",
            "ad_org_name",
            "p_org_no",
            "type_id",
            "type_code",
            "p_type_id",
            "data_date",
            "region_code",
        ]
        result = pd.merge(max_kt_df, max_total_df, on=on_cols)
        result = pd.merge(result, max_total_diff_df, on=on_cols)
        result = pd.merge(result, min_kt_df, on=on_cols)
        result = pd.merge(result, min_total_df, on=on_cols)
        result = pd.merge(result, mean_kt_df, on=["org_no", "type_id"])
        result = pd.merge(result, mean_total_df, on=["org_no", "type_id"])

        # 合并温度
        if not tmp_df.empty:
            result = result.merge(tmp_df, on="region_code", how="left")
        result = result.drop(["region_code"], axis=1)

        result["date_type"] = DateType.DAY
        # 填充nan值
        result["p_org_no"].fillna(-1, inplace=True)
        result["ad_org_name"].fillna("全省", inplace=True)
        result["type_code"].fillna("全社会", inplace=True)
        result["p_type_id"].fillna(-1, inplace=True)

        self.res_df = result

    def get_source_data(self):
        sql = """select ot.org_no,org_name,ad_org_name,p_org_no,region_code,
                        ot.type_id,type_code,p_type_id,
                        ot.data_time,time_display_name,
                        p_kt_sum,p_total_sum,
                        p_total_baseline,
                        (p_total_baseline-p_total_sum) as p_total_diff
                        from {source_table} ot
                        left join 
                            (select org_no,type_id,data_time,p_total_sum as p_total_baseline
                            from baseline_orgno_typeid_15min
                            )bot on bot.org_no=ot.org_no 
                                and bot.type_id=ot.type_id 
                                and bot.data_time=ot.data_time
                        left join 
                            (select distinct on7, region_code
                            from aclr_base_doc_all
                            )ab on ab.on7=ot.org_no
                        where ot.data_time >= '{start_time}' and ot.data_time <'{end_time}'
                      """.format(
            source_table=self.source_table,
            start_time=self.start_time,
            end_time=self.end_time,
        )
        day_org_type_df = self.sql_engine.query(sql)
        if day_org_type_df.empty:
            time.sleep(5)
            day_org_type_df_new = self.sql_engine.query(sql)
            if day_org_type_df_new.empty:
                self.logger.info(
                    "查询15min表数据为空,task_id:{},step:{},star_time:{} ...".format(
                        self.task_id, self.step, self.start_time
                    )
                )
                return day_org_type_df_new
            else:
                day_org_type_df = day_org_type_df_new
        return day_org_type_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        day_org_type_df = self.get_source_data()
        if day_org_type_df.empty:
            return
        tmp_df = self.get_temperature_data()
        self.agg_org_type_day(day_org_type_df, tmp_df)
        self.save_data_to_table()


class OrgDayStat(AggCommon):
    """获取当天每个区域最大点"""

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelta,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )

    def agg_org_type_day_max(self):
        sql = """select org_no,org_name,ad_org_name,p_org_no,type_code,type_id,data_time,p_kt_sum,
                        p_total_sum,time_display_name,p_type_id 
                 from {source_table} 
                 where data_time >= '{start_time}' and data_time <'{end_time}'
              """.format(
            source_table=self.source_table,
            start_time=self.start_time,
            end_time=self.end_time,
        )
        day_org_type_df = self.sql_engine.query(sql)
        if day_org_type_df.empty:
            time.sleep(5)
            day_org_type_df_new = self.sql_engine.query(sql)
            if day_org_type_df_new.empty:
                self.logger.info(
                    "查询15min表数据为空,task_id:{},step:{},star_time:{} ...".format(
                        self.task_id, self.step, self.start_time
                    )
                )
                return
            else:
                day_org_type_df = day_org_type_df_new

        day_org_type_df = day_org_type_df.drop_duplicates(
            ["org_no", "type_id", "data_time"]
        )
        # 按 org 和 type 分组，找到最大、最小值和平均值的行
        self.logger.info(
            "查询15min表数据条数:{},task_id:{},step:{},star_time:{} ...".format(
                len(day_org_type_df), self.task_id, self.step, self.start_time
            )
        )
        max_rows = day_org_type_df.loc[
            day_org_type_df.groupby(["org_no", "type_id"])["p_kt_sum"].rank(
                method="min", ascending=False
            )
            == 1
        ]
        max_rows = max_rows.drop_duplicates(["org_no", "type_id"])
        max_rows["p_kt_rate_max"] = round(
            max_rows["p_kt_sum"] / max_rows["p_total_sum"], 4
        )
        min_rows = day_org_type_df.loc[
            day_org_type_df.groupby(["org_no", "type_id"])["p_kt_sum"].rank(
                method="min", ascending=True
            )
            == 1
        ]
        min_rows = min_rows.drop_duplicates(["org_no", "type_id"])
        min_rows["p_kt_rate_min"] = round(
            min_rows["p_kt_sum"] / min_rows["p_total_sum"], 4
        )
        mean_kt_rows = (
            day_org_type_df.groupby(["org_no", "type_id"])["p_kt_sum"]
            .mean()
            .reset_index(name="avg_p_kt")
        )
        mean_total_rows = (
            day_org_type_df.groupby(["org_no", "type_id"])["p_total_sum"]
            .mean()
            .reset_index(name="avg_p_total")
        )

        # 将最大、最小值和平均值的行合并成一个新的 DataFrame
        result = pd.merge(
            max_rows,
            min_rows,
            on=[
                "org_no",
                "type_id",
                "time_display_name",
                "org_name",
                "ad_org_name",
                "p_org_no",
                "type_code",
                "p_type_id",
            ],
            suffixes=("_max", "_min"),
        )
        result = pd.merge(result, mean_kt_rows, on=["org_no", "type_id"])
        result = pd.merge(result, mean_total_rows, on=["org_no", "type_id"])

        result.replace({np.inf: None}, inplace=True)

        map_dict = {
            "data_time": "p_kt_time",
            "p_kt_sum": "p_kt",
            "p_kt_rate": "p_kt_rate",
            "p_total_sum": "p_kt_total",
        }
        keys = ["max", "min"]
        # rename_dict = {f"{k}_{key}": v.replace('p_kt_', f"{key}_p_kt_") for k, v in map_dict.items() for key in keys}
        rename_dict = {
            f"{name}_{key}": f"{key}_{map_dict[name]}"
            for name in map_dict.keys()
            for key in keys
        }
        rename_dict.update({"time_display_name": "data_time"})
        result = result.rename(columns=rename_dict)
        # res_result = result.drop(columns=["p_total_sum_max","p_total_sum_min"])
        self.res_df = result.replace({np.nan: None})

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        self.agg_org_type_day_max()
        self.save_data_to_table()


class GetCityAlert2023(AggCommon):
    """
    地市预警判断
    """

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelta,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )
        self.today = self.start_time.strftime("%Y-%m-%d")
        self.data_date = self.start_time.strftime('%m-%d')

    def send_data_to_kafka(self):
        if self.res_df.empty:
            return
        if self.start_time.date() != datetime.now().date():  # 历史的不推送
            return
        sql = f"select * from city_alert where alert_time='{self.start_time}'"
        res_df = self.sql_engine.query(sql)
        if not res_df.empty:  # 第一次计算出来推送，如果结果不为空表示已经推送过了
            self.logger.info(
                "地市启动信息已经推送过kafka!task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return
        send_df = self.res_df.rename(
            columns={'on5': 'executeCityRange', 'alert_time': 'executeStartTime'})
        send_df['executeCityRange'] = send_df['executeCityRange'].astype(str)
        send_df['downIndex'] = (send_df['alert_type'].replace(AlertType.THRESHOLD_MAP) *
                                100).astype('int64').astype(str)
        days = 2 if send_df['alert_type'].tolist()[0] == AlertType.WINTER else 0
        send_df['executeEndTime'] = (pd.to_datetime(send_df['executeStartTime']) + pd.Timedelta(
            days=days)).dt.strftime('%Y-%m-%d')
        send_df = send_df[['executeCityRange', 'downIndex', 'executeStartTime', 'executeEndTime']]
        for col in ['id', 'name', 'code', 'tradeCode', 'tradeName', 'strategyName']:
            send_df[col] = None

        send_list = send_df.to_dict("record")

        producer = KafkaProducer(
            bootstrap_servers=','.join(config['KAFKA_HOST'])
        )
        topic_name = config['TOPIC_AIR_CONDITION_PLAN']
        create_topic(topic_name)
        for send_dict in send_list:
            message = {'data': send_dict, 'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
            value = json.dumps(message).encode('utf-8')
            producer.send(topic_name, value=value)
        # 刷新并关闭Kafka生产者
        producer.flush()
        producer.close()
        self.logger.info(
            "地市启动信息推送kafka成功!num:{},task_id:{},step:{},star_time:{} ...".format(
                len(send_list), self.task_id, self.step, self.start_time
            )
        )

    def judge_winter_city_alert(self):
        """
        冬季：该地市昨天的最大总负荷超过了去年冬季最大总负荷
        :return:
        """
        yesterday = (self.start_time - timedelta(days=1)).strftime('%Y-%m-%d')
        data_year = self.start_time.year-2 if self.data_date < config['cooling_start_date'] else self.start_time.year-1
        sql = f"""SELECT cw.on5, cw.city_name, '{self.today}' as alert_time, {AlertType.WINTER} as alert_type 
                 from city_write cw
                 JOIN city_total_critical ct on ct.on5=cw.on5 and ct.data_year='{data_year}'
                 WHERE data_time='{yesterday}'
                 and write_max_p_total > winter_max_p_total
              """
        self.res_df = self.sql_engine.query(sql)

    def judge_summer_city_alert(self):
        """
        夏季:该地市有过高温预警(无时间范围限制),昨天和前天连续两天超过阈值
        :return:
        """

        # 从city_write表中和city_report表中关联结果
        # sql_cwr = '''SELECT cr.on5, MAX(cw.city_name) AS city_name, CURRENT_DATE() as alert_time,MAX(cw.data_time) AS cw_data_time, year(MIN(cr.data_time)) - 1  AS cr_year,MIN(cr.max_p_total) AS max_p_total
        #              FROM city_write AS cw
        #              JOIN city_report AS cr ON cw.on5 = cr.on5
        #              WHERE cr.data_time >= CURDATE() - INTERVAL 2 DAY AND cr.data_time < CURDATE() and cw.heat_alert = 1
        #              GROUP BY cr.on5
        #              ORDER BY max_p_total ASC;'''
        # 从city_write表中查询()
        day_before = (self.start_time - timedelta(days=2)).strftime('%Y-%m-%d')
        sql_cw = f"""SELECT cw.on5, MAX(city_name) AS city_name, '{self.today}' as alert_time, 
                            cw_data_time, year(MIN(data_time)) - 1  AS cr_year,
                            MIN(write_max_p_total) AS max_p_total
                    FROM city_write cw
                    INNER JOIN 
                    (SELECT ON5, MAX(data_time)as cw_data_time 
                     FROM city_write
                     WHERE heat_alert=1 and data_time < '{self.today}'
                     GROUP BY on5
                     )atc on atc.on5=cw.on5
                    WHERE data_time >= '{day_before}' AND data_time < '{self.today}'
                    GROUP BY on5
                    HAVING COUNT(DISTINCT data_time) = 2
                    ORDER BY max_p_total ASC;"""
        cwr_res = self.sql_engine.query(sql_cw)
        if cwr_res.empty:
            return
        cwr_dict_r = cwr_res.to_dict("records")

        # 从city_total_critical表中获取on5和data_year的分组的值用于后续的判断
        data_year = int(self.today[0:4])-1
        sql_ctc =f"""SELECT on5, data_year, 
                            max_p_total_90p, 
                            max_p_total_95p 
                     FROM city_total_critical 
                     where data_year= {data_year};"""
        ctc_res = self.sql_engine.query(sql_ctc)
        ctc_dict_r = ctc_res.to_dict("records")
        ctc_dict = defaultdict(dict)
        for i in ctc_dict_r:
            ctc_dict[f'{i.get("on5")}-{i.get("data_year")}'][
                "max_p_total_90p"
            ] = float(i.get("max_p_total_90p"))
            ctc_dict[f'{i.get("on5")}-{i.get("data_year")}'][
                "max_p_total_95p"
            ] = float(i.get("max_p_total_95p"))

        cwr_dict = defaultdict(dict)
        # 处理写入mysql的数据
        for c in cwr_dict_r:
            # 首先校验on5+年份必须存在的时候才可以做预警，不存在直接打印error
            # 处理alert_type只有被判断为1和2的数据会被写入，不满足条件的会被剔除
            if f'{c.get("on5")}-{c.get("cr_year")}' in ctc_dict:
                max_p_total_95p = ctc_dict[
                    f'{c.get("on5")}-{c.get("cr_year")}'
                ]["max_p_total_95p"]
                max_p_total_90p = ctc_dict[
                    f'{c.get("on5")}-{c.get("cr_year")}'
                ]["max_p_total_90p"]
                if c.get("max_p_total") > max_p_total_95p:
                    cwr_dict[c.get("on5")]["alert_type"] = AlertType.SUMMER_L1
                elif (
                    max_p_total_90p <= c.get("max_p_total") <= max_p_total_95p
                ):
                    cwr_dict[c.get("on5")]["alert_type"] = AlertType.SUMMER_L2
                else:
                    continue
                cwr_dict[c.get("on5")]["on5"] = c.get("on5")
                cwr_dict[c.get("on5")]["city_name"] = c.get("city_name")
                cwr_dict[c.get("on5")]["alert_time"] = c.get("alert_time")
                cwr_dict[c.get("on5")]["heat_alert_time"] = c.get(
                    "cw_data_time"
                )
            else:
                on5 = c.get("on5", "") or ""
                year = c.get("cr_year", "") or ""
                self.logger.error(
                    "在city_total_critical未找到对应的on5:{}+年份:{}档案信息,task_id:{},step:{},star_time:{}".format(
                        on5,
                        year,
                        self.task_id,
                        self.step,
                        self.start_time,
                    )
                )
                continue
        # 写入mysql的city_alert表
        df = pd.DataFrame(list(cwr_dict.values()))
        self.res_df = df.replace({np.nan: None})

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        if self.data_date >= config["cooling_start_date"] and self.data_date <= config[
            "cooling_end_date"]:
            self.judge_summer_city_alert()
        else:
            self.judge_winter_city_alert()
        self.send_data_to_kafka()
        self.save_data_to_table()


class CityControl2023(AggCommon):
    """空调调控:只调控高压"""

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelt,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )
        self.today = self.start_time.strftime("%Y-%m-%d")
        self.yesterday = (self.start_time-timedelta(days=1)).strftime('%Y-%m-%d')

    def winter_consecutive_judge(self, row):
        control_time = row['control_time']
        on7 = row["on7"]
        sql = f"""select * from city_control 
                  where on7={on7} 
                    and control_time = '{control_time}'
                    and compute_date < '{self.today}'
               """
        on7_control_df = self.sql_engine.query(sql)
        if on7_control_df.empty:
            self.logger.warning("city_control未查询到数据")
            return
        date_count = len(on7_control_df['compute_date'].drop_duplicates())
        if date_count != row['contin_days']:
            self.logger.warning(f"city_control只有{date_count}天，未满足{row['contin_days']}天")
            return

        sql = f"""select * from substandard_cons
                  where on7={on7} 
                    and compute_date >= '{control_time}'
                    and compute_date < '{self.today}'
               """
        on7_subst_cons_df = self.sql_engine.query(sql)

        on7_subst_cons_df = on7_subst_cons_df.sort_values(['rate'])  # 取3天内最小的调节率
        on7_subst_cons_df = on7_subst_cons_df.drop_duplicates('cons_no')
        on7_subst_cons_df['compute_date'] = self.today
        self.sql_engine.update_df_by_id(on7_subst_cons_df, "substandard_cons")
        print(
            "不达标用户写入mysql,task_id:{},step:{},star_time:{},on7:{},数据条数:{}".format(
                self.task_id,
                self.step,
                self.start_time,
                on7,
                len(on7_subst_cons_df),
            )
        )

        on7_control_df = on7_control_df.drop_duplicates('on7')
        on7_control_df['n_cons_subst'] = len(on7_subst_cons_df)
        on7_control_df['n_cons_st'] = on7_control_df['n_cons_total'] - on7_control_df[
            'n_cons_subst']
        on7_control_df['compute_date'] = self.today
        on7_control_df["is_control"] = 0

        self.res_df = on7_control_df
        self.save_data_to_table()

    def save_subst_cons(self, on7, control_time, res_subst_cons_df):
        if res_subst_cons_df.empty:
            self.logger.error(
                "不达标用户为空,task_id:{},step:{},star_time:{},on7:{} ...".format(
                    self.task_id, self.step, self.start_time, on7
                )
            )
            return
        sql = f"select cons_no, cons_name from c_cons where on7 = {on7} and type_code_sort=1"
        cons_df = self.sql_engine.query(sql)

        res_subst_cons_df = res_subst_cons_df.merge(
            cons_df, on="cons_no", how="inner"
        )

        sql = f"""delete from substandard_cons 
                  where compute_date = '{self.today}'
                    and control_time = '{control_time}' 
                    and on7={on7}
               """
        self.sql_engine.engine.execute(sql)
        self.sql_engine.update_df_by_id(res_subst_cons_df, "substandard_cons")
        print(
            "不达标用户写入mysql,task_id:{},step:{},star_time:{},on7:{},数据条数:{}".format(
                self.task_id,
                self.step,
                self.start_time,
                on7,
                len(res_subst_cons_df),
            )
        )

    def get_city_cons_kt(self, on5, on7, data_date):
        rules = [
            ("data_time", "query", "=", data_date),
            ("on7", "query", "=", on7),
            ("type_code_sort", "query", "=", "1")
        ]
        index_name = self.get_power_96_idx_name(on5, on7)
        cons_df = self.es_dal.query_dataframe(
            rules,
            index_name,
            doc_time_field="data_time",
            source=["cons_no", "data_time", "p_kt"],
        )
        if cons_df.empty:
            self.logger.warning(
                "查询数据为空,task_id:{},step:{},star_time:{},index:{},date:{},on7:{}...".format(
                    self.task_id,
                    self.step,
                    self.start_time,
                    index_name,
                    data_date,
                    on7,
                )
            )
            return cons_df
        cons_df["data_time"] = cons_df["data_time"].dt.strftime("%H:%M")
        return cons_df

    def proc_on7_alert_city(self, row):
        """
        计算某个地市当天的未达标用户
        """
        on5 = row["on5"]
        on7 = row["on7"]
        control_time = row["control_time"]
        threshold = row["control_prop"]
        n_cons_total = row["cons_count"]

        today_df = self.get_city_cons_kt(on5, on7, self.today)
        yesterday_df = self.get_city_cons_kt(on5, on7, self.yesterday)
        if today_df.empty or yesterday_df.empty:
            ("dataframe空白")
            return

        merge_df = pd.merge(print
            today_df,
            yesterday_df,
            on=["cons_no", "data_time"],
            how="inner",
            suffixes=("_today", "_yesterday"),
        )

        # 过滤昨日负荷为0的点,如果某用户96点都为0，则算作达标用户
        merge_df = merge_df.loc[merge_df['p_kt_yesterday'] != 0]

        # 计算达标率的平均值
        merge_df["rate"] = (
            merge_df["p_kt_yesterday"] - merge_df["p_kt_today"]
        ) / merge_df["p_kt_yesterday"]
        merge_df = merge_df.groupby("cons_no")["rate"].mean().reset_index()

        subst_cons_df = merge_df.loc[merge_df["rate"] <= threshold]
        n_cons_subst = len(subst_cons_df)
        n_cons_st = n_cons_total - n_cons_subst

        # 客户要求:小于-1的比率都改为-1
        subst_cons_df.loc[subst_cons_df["rate"] < -1, "rate"] = -1

        res_dict = {
            "compute_date": self.today,
            "on5": on5,
            "on7": on7,
            "control_time": control_time,
            "is_control": 1,
            "n_cons_total": n_cons_total,
            "n_cons_st": n_cons_st,
            "n_cons_subst": n_cons_subst,
        }

        subst_cons_df["on5"] = on5
        subst_cons_df["on7"] = on7
        subst_cons_df["control_time"] = control_time
        subst_cons_df["compute_date"] = self.today
        subst_cons_df = subst_cons_df[
            ["cons_no", "rate", "on5", "on7", "control_time", "compute_date"]
        ]

        self.save_subst_cons(on7, control_time, subst_cons_df)

        self.res_df = pd.DataFrame([res_dict])
        self.save_data_to_table()

    def proc_alert_city(self, alert_df):
        alert_df['diff_days'] = (self.start_time.date() - alert_df['control_time']).dt.days

        # 只需调控1天的，或者南瑞推送的
        one_day_df = alert_df.loc[
            (alert_df['contin_days'] == 1) | (alert_df['plan_type'] == PlanType.NANRUI)]
        if not one_day_df.empty:
            for index, row in one_day_df.iterrows():
                if row['diff_days'] != row['contin_days']:  # 超过调控时间范围的不再计算
                    self.proc_on7_alert_city(row)

        # 需调控大于1天的，并且是志翔计算的
        no_one_day_df = alert_df.loc[
            (alert_df['contin_days'] != 1) & (alert_df['plan_type'] == PlanType.ZHIXIANG)]
        if not no_one_day_df.empty:
            no_one_day_df = no_one_day_df.sort_values(['on7', 'control_time'], ascending=False)
            no_one_day_df = no_one_day_df.drop_duplicates('on7')

            # 前n天分别判断当天未达标用户，第n+1天判断前n天未连续达标的用户
            for index, row in no_one_day_df.iterrows():
                if row['diff_days'] == row['contin_days']:
                    self.winter_consecutive_judge(row)
                else:
                    self.proc_on7_alert_city(row)

    def get_city_alert(self):
        # data_date的作用：因为存储的时间范围是左闭右闭，为了在结束的后一天还能拿到，所以需要减一天
        data_date = (self.start_time - timedelta(days=1)).strftime('%Y-%m-%d')
        sql = f"""select c.org_no, control_time, control_prop, contin_days, plan_type, r.org_level
	              from city_approval_info c
	              left join real_org_no r on r.org_no=c.org_no
	              where end_time >= '{data_date}' and control_time <= '{self.today}'
               """
        approval_df = self.sql_engine.query(sql)
        if approval_df.empty:
            return approval_df

        on5_df = approval_df.loc[approval_df['org_level'] == 1].rename(columns={"org_no": "on5"})
        on7_df = approval_df.loc[approval_df['org_level'] == 2].rename(columns={"org_no": "on7"})

        on5_list = on5_df['on5'].astype(str).tolist()
        on7_list = on7_df['on7'].astype(str).tolist()

        where_list = []
        if on5_list:
            where_list.append(f"p_org_no in ({','.join(on5_list)})")
        if on7_list:
            where_list.append(f"org_no in ({','.join(on7_list)})")
        where_sql = "where " + " or ".join(where_list)

        sql = f"""select p_org_no as on5, org_no as on7, cons_count
                  from real_org_no r
	              left join 
	                (select on7, count(1) as cons_count 
	                 from c_cons 
	                 where type_code_sort=1 
	                 group by on7) c
	                on c.on7=r.on7
                  {where_sql}
               """
        cons_count_df = self.sql_engine.query(sql)
        on5_df = on5_df.merge(cons_count_df, on='on5', how='inner')
        on7_df = on7_df.merge(cons_count_df, on='on7', how='inner')

        columns = ["on5", "on7", "control_time", "control_prop", "contin_days", "plan_type", "cons_count"]
        alert_df = pd.concat([on5_df[columns], on7_df[columns]])
        return alert_df

    def main(self):
        """主函数"""
        print("开始执行classCityControl")
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()

        alert_df = self.get_city_alert()
        if alert_df.empty:
            self.logger.info(
                "未查询到今日预警地市,task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            print("未查询到今日预警地市")
            return
        self.proc_alert_city(alert_df)
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time
            )
        )


class GetCityAlert(AggCommon):
    """
    地市预警判断
    """

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelta,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )
        self.today = self.start_time.strftime("%Y-%m-%d")
        self.data_date = self.start_time.strftime('%m-%d')

    def send_data_to_kafka(self):
        if self.res_df.empty:
            return
        if self.start_time.date() != datetime.now().date():  # 历史的不推送
            return
        sql = f"select * from city_alert where alert_time='{self.start_time}'"
        res_df = self.sql_engine.query(sql)
        if not res_df.empty:  # 第一次计算出来推送，如果结果不为空表示已经推送过了
            self.logger.info(
                "地市启动信息已经推送过kafka!task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return
        send_df = self.res_df.rename(
            columns={'on5': 'executeCityRange', 'alert_time': 'executeStartTime'})
        send_df['executeCityRange'] = send_df['executeCityRange'].astype(str)
        send_df['downIndex'] = (send_df['alert_type'].replace(AlertType.THRESHOLD_MAP) *
                                100).astype('int64').astype(str)
        days = 2 if send_df['alert_type'].tolist()[0] == AlertType.WINTER else 0
        send_df['executeEndTime'] = (pd.to_datetime(send_df['executeStartTime']) + pd.Timedelta(
            days=days)).dt.strftime('%Y-%m-%d')
        send_df = send_df[['executeCityRange', 'downIndex', 'executeStartTime', 'executeEndTime']]
        for col in ['id', 'name', 'code', 'tradeCode', 'tradeName', 'strategyName']:
            send_df[col] = None

        send_list = send_df.to_dict("record")

        producer = KafkaProducer(
            bootstrap_servers=','.join(config['KAFKA_HOST'])
        )
        topic_name = config['TOPIC_AIR_CONDITION_PLAN']
        create_topic(topic_name)
        for send_dict in send_list:
            message = {'data': send_dict, 'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
            value = json.dumps(message).encode('utf-8')
            producer.send(topic_name, value=value)
        # 刷新并关闭Kafka生产者
        producer.flush()
        producer.close()
        self.logger.info(
            "地市启动信息推送kafka成功!num:{},task_id:{},step:{},star_time:{} ...".format(
                len(send_list), self.task_id, self.step, self.start_time
            )
        )

    def judge_summer_city_alert(self):
        """
        夏季:该地市有过高温预警(无时间范围限制),昨天的最大总负荷超过了去年夏季最大总负荷95%
        :return:
        """
        yesterday = (self.start_time - timedelta(days=1)).strftime('%Y-%m-%d')
        data_year = self.start_time.year - 1
        sql = f"""SELECT cw.on5, ct.city_name, '{self.today}' as alert_time, 
                         1 as alert_type, cw_data_time as heat_alert_time
                  from city_write cw
                  JOIN city_total_critical ct on ct.on5=cw.on5 and ct.data_year='{data_year}'
                  JOIN 
                    (SELECT ON5, MAX(data_time)as cw_data_time 
                    FROM city_write
                    WHERE heat_alert=1 and data_time < '{self.today}'
                    GROUP BY on5
                    )atc on atc.on5=cw.on5
                  WHERE data_time='{yesterday}'
                    and write_max_p_total > max_p_total * 0.95
                  """
        res_df = self.sql_engine.query(sql)
        if res_df.empty:
            return

        ctrl_on5_list = res_df['on5'].tolist()
        if 51101 in ctrl_on5_list:  # 优先判断省份是否开启调控，如果开启，则代表所有地市开启
            # 客户需求:全省计划排除4个地市：甘孜、阿坝、凉山、攀枝花
            heat_alert_time = res_df.loc[res_df['on5'] == 51101, 'heat_alert_time'].tolist()[0]
            sql = f"""select org_no as on5, org_name as city_name, 
                             "{self.today}" as alert_time, 1 as alert_type,
                             "{heat_alert_time}" as heat_alert_time
                      from real_org_no
                      where org_level=1
                        and org_no not in (51418,51517,51419,51490,51408,51409)
                   """
            res_df = self.sql_engine.query(sql)
        elif 51401 in ctrl_on5_list:  # 因为这里的成都数据包括了天府和岷江，所以一起启动调控
            heat_alert_time = res_df.loc[res_df['on5'] == 51401, 'heat_alert_time'].tolist()[0]
            add_list = [[51428, "国网天府新区供电公司", self.today, 1, heat_alert_time],
                        [51422, "国网四川岷江供电有限责任公司", self.today, 1, heat_alert_time]]
            add_df = pd.DataFrame(add_list, columns=["on5", "city_name", "alert_time",
                                                     "alert_type", "heat_alert_time"])
            res_df = pd.concat([res_df, add_df])
        self.res_df = res_df.replace({np.nan: None})

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )

        self.judge_summer_city_alert()
        self.send_data_to_kafka()
        self.save_data_to_table()


class CityControl(AggCommon):
    """空调调控:只调控高压"""

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelt,
        rewrite,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )
        self.today = self.start_time.strftime("%Y-%m-%d")
        self.yesterday = (self.start_time-timedelta(days=1)).strftime('%Y-%m-%d')

    def save_subst_cons(self, on7, control_time, res_subst_cons_df):
        if res_subst_cons_df.empty:
            self.logger.error(
                "不达标用户为空,task_id:{},step:{},star_time:{},on7:{} ...".format(
                    self.task_id, self.step, self.start_time, on7
                )
            )
            return
        sql = f"select cons_no, cons_name from c_cons where on7 = {on7} and type_code_sort=1"
        cons_df = self.sql_engine.query(sql)

        res_subst_cons_df = res_subst_cons_df.merge(
            cons_df, on="cons_no", how="left"
        )

        sql = f"""delete from substandard_cons 
                  where compute_date = '{self.today}'
                    and control_time = '{control_time}' 
                    and on7={on7}
               """
        self.sql_engine.engine.execute(sql)
        self.sql_engine.update_df_by_id(res_subst_cons_df, "substandard_cons")
        print(
            "不达标用户写入mysql,task_id:{},step:{},star_time:{},on7:{},数据条数:{}".format(
                self.task_id,
                self.step,
                self.start_time,
                on7,
                len(res_subst_cons_df),
            )
        )

    def second_judge(self, subst_cons_df, merge_df, on5, on7):
        cons_no_list = subst_cons_df['cons_no'].tolist()

        rules = [("data_time", "query", "=", f"{self.today} {self.latest_time_point}"),
                 ("cons_no", "query", "in", cons_no_list)]
        index_name = (
                    f"{config['POWERLOAD_PREDICT_PRE_IDX']}{on5}-{on7}"
                )
        predict_df = self.es_dal.query_dataframe(
            rules,
            index_name,
            doc_time_field="data_time",
            source=["cons_no", "data_time", "p_total"],
        )
        if predict_df.empty:
            self.logger.warning(
                "查询数据为空,task_id:{},step:{},star_time:{},index:{},date:{},on7:{}...".format(
                    self.task_id,
                    self.step,
                    self.start_time,
                    index_name,
                    self.today,
                    on7,
                )
            )
            return pd.DataFrame()
        predict_df["data_time"] = predict_df["data_time"].dt.strftime("%H:%M")

        subst_merge_df = pd.merge(predict_df, merge_df, on=['cons_no', 'data_time'], how='left')
        # 和预测值比较，只保留是真实值的用户
        subst_merge_df = subst_merge_df.loc[subst_merge_df['p_total'] != subst_merge_df[
            'p_total_today']]
        return subst_merge_df

    def delete_control_res(self, row):
        on7 = row['on7']
        control_time = row['control_time']

        where_sql = f"""where compute_date = '{self.today}'
                        and control_time = '{control_time}' 
                        and on7={on7}
                     """
        sql = f"delete from {self.save_table} {where_sql}"
        self.sql_engine.engine.execute(sql)
        sql = f"delete from substandard_cons {where_sql}"
        self.sql_engine.engine.execute(sql)
        print(
            "清空调控结果数据,task_id:{},step:{},star_time:{},on7:{}".format(
                self.task_id,
                self.step,
                self.start_time,
                on7
            )
        )

    def get_city_cons_load(self, on5, on7, data_date, time_type, row):
        if time_type == 1:  # 判断1个点
            rules = [("data_time", "query", "=", f"{data_date} {self.latest_time_point}")]
        elif time_type == 2:  # 判断两个高峰平均值
            rules = [("data_time", "query", ">=", f"{data_date} 12:00:00"),
                     ("data_time", "query", "<", f"{data_date} 22:00:00")]
        else:  # 判断1个高峰平均值
            rules = [("data_time", "query", ">=", f"{data_date} {row['ctrl_st']}"),
                     ("data_time", "query", "<", f"{data_date} {row['ctrl_et']}")]

        rules.extend([
            ("on7", "query", "=", on7),
            ("type_code_sort", "query", "=", "1")
        ])
        index_name = self.get_power_96_idx_name(on5, on7)
        cons_df = self.es_dal.query_dataframe(
            rules,
            index_name,
            doc_time_field="data_time",
            source=["cons_no", "data_time", "p_total", "p_kt"],
        )
        if cons_df.empty:
            self.logger.warning(
                "查询数据为空,task_id:{},step:{},star_time:{},index:{},date:{},on7:{}...".format(
                    self.task_id,
                    self.step,
                    self.start_time,
                    index_name,
                    data_date,
                    on7,
                )
            )
            return cons_df
        cons_df["data_time"] = cons_df["data_time"].dt.strftime("%H:%M")
        if time_type == 2:  # 过滤掉非高峰时段
            cons_df = cons_df.loc[(cons_df['data_time'] < '14:00') |
                                  (cons_df['data_time'] >= '20:00')]
        return cons_df

    def proc_on7_alert_city(self, row, time_type):
        """
        计算某个地市当天的未达标用户
        """
        on5 = row["on5"]
        on7 = row["on7"]
        control_time = row["control_time"]
        threshold = row["control_prop"]
        n_cons_total = row["cons_count"]

        today_df = self.get_city_cons_load(on5, on7, self.today, time_type, row)
        yesterday_df = self.get_city_cons_load(on5, on7, self.yesterday, time_type, row)
        if today_df.empty or yesterday_df.empty:
            return

        merge_df = pd.merge(
            today_df,
            yesterday_df,
            on=["cons_no", "data_time"],
            how="inner",
            suffixes=("_today", "_yesterday"),
        )

        # 过滤昨日负荷为0的点,如果某用户96点都为0，则算作达标用户
        merge_df = merge_df.loc[merge_df['p_kt_yesterday'] != 0]

        # 计算
        merge_df["total_diff"] = merge_df["p_total_yesterday"] - merge_df["p_total_today"]
        merge_df["kt_diff"] = merge_df["p_kt_yesterday"] - merge_df["p_kt_today"]
        merge_df["thre_diff"] = merge_df["p_kt_yesterday"] * threshold

        avg_df = merge_df.groupby("cons_no")[
            "total_diff", "kt_diff", "thre_diff", "p_kt_yesterday"].mean().reset_index()
        subst_cons_df = avg_df.loc[avg_df["kt_diff"] <= avg_df["thre_diff"]]

        # 1个点,判断是否是已经采集上来的
        if time_type == 1 and not subst_cons_df.empty:
            subst_cons_df = self.second_judge(subst_cons_df, merge_df, on5, on7)
        # 用p_total再判断一遍:总负荷压降也小于空调负荷的调节标准
        if not subst_cons_df.empty:
            subst_cons_df = subst_cons_df.loc[subst_cons_df["total_diff"] <= subst_cons_df["thre_diff"]]

        n_cons_subst = len(subst_cons_df)
        n_cons_st = n_cons_total - n_cons_subst

        if not subst_cons_df.empty:
            # 客户要求:小于-1的比率都改为-1
            subst_cons_df["rate"] = subst_cons_df["kt_diff"] / subst_cons_df["p_kt_yesterday"]
            subst_cons_df.loc[subst_cons_df["rate"] < -1, "rate"] = -1
            subst_cons_df = subst_cons_df[["cons_no", "rate"]]

        res_dict = {
            "compute_date": self.today,
            "on5": on5,
            "on7": on7,
            "control_time": control_time,
            "is_control": 1,
            "n_cons_total": n_cons_total,
            "n_cons_st": n_cons_st,
            "n_cons_subst": n_cons_subst,
        }
        print("control_time:", control_time, ",n_cons_total:", n_cons_total, ",n_cons_st:", n_cons_st, ",n_cons_subst:", n_cons_subst)

        if not subst_cons_df.empty:
            subst_cons_df["on5"] = on5
            subst_cons_df["on7"] = on7
            subst_cons_df["control_time"] = control_time
            subst_cons_df["compute_date"] = self.today
            subst_cons_df = subst_cons_df[
                ["cons_no", "rate", "on5", "on7", "control_time", "compute_date"]
            ]
            self.save_subst_cons(on7, control_time, subst_cons_df)

        self.res_df = pd.DataFrame([res_dict])
        self.save_data_to_table()

    def get_query_time(self, now_time, now_str):
        minute = int(now_str[-2:])
        if minute < 15:
            closest_minute = 0
        elif minute < 30:
            closest_minute = 15
        elif minute < 45:
            closest_minute = 30
        else:
            closest_minute = 45
        early_minutes = 0 * 15  # 提前几个点，默认为0
        self.latest_time_point = (
                now_time.replace(minute=closest_minute, second=0, microsecond=0
                                 )-timedelta(minutes=early_minutes)).strftime(
            '%H:%M:%S')

    def proc_alert_city(self, alert_df):
        now_time = datetime.now()
        now_str = now_time.strftime('%H:%M')
        self.get_query_time(now_time, now_str)

        for index, row in alert_df.iterrows():
            if row['plan_type'] == 2 or row['ctrl_st'] is None:  # 志翔自动计划或者执行两个高峰的南瑞手动计划
                if (now_str >= '12:00' and now_str < '14:00') or (
                        now_str >= '20:00' and now_str < '22:00'):
                    self.proc_on7_alert_city(row, 1)
                elif now_str >= '14:00' and now_str < '20:00':
                    self.delete_control_res(row)
                else:
                    self.proc_on7_alert_city(row, 2)
            else:  # 执行一个高峰的南瑞手动计划
                if now_str >= row['ctrl_st'][:5] and now_str < row['ctrl_et'][:5]:
                    self.proc_on7_alert_city(row, 1)
                elif now_str >= row['ctrl_et'][:5]:
                    self.proc_on7_alert_city(row, 3)
                else:
                    continue

    def get_city_alert(self):
        sql = f"""select c.org_no, control_time, end_time, control_prop, r.org_level, plan_type,
                         ctrl_st, ctrl_et
	              from city_approval_info c
	              left join real_org_no r on r.org_no=c.org_no
	              where control_time = '{self.today}'
               """
        approval_df = self.sql_engine.query(sql)
        if approval_df.empty:
            return approval_df

        on5_df = approval_df.loc[approval_df['org_level'] == 1].rename(columns={"org_no": "on5"})
        on7_df = approval_df.loc[approval_df['org_level'] == 2].rename(columns={"org_no": "on7"})

        on5_list = on5_df['on5'].astype(str).tolist()
        on7_list = on7_df['on7'].astype(str).tolist()

        where_list = []
        if on5_list:
            where_list.append(f"p_org_no in ({','.join(on5_list)})")
        if on7_list:
            where_list.append(f"org_no in ({','.join(on7_list)})")
        where_sql = "where " + " or ".join(where_list)

        sql = f"""select p_org_no as on5, org_no as on7, cons_count
                  from real_org_no r
	              right join 
	                (select on7, count(distinct cons_no) as cons_count 
	                 from aclr_base_doc_all 
	                 where type_code_sort=1 
	                 group by on7) c
	                on c.on7=r.org_no
                  {where_sql}
               """
        cons_count_df = self.sql_engine.query(sql)
        on5_df = on5_df.merge(cons_count_df, on='on5', how='inner')
        on7_df = on7_df.merge(cons_count_df, on='on7', how='inner')

        columns = ["on5", "on7", "control_time", "end_time", "control_prop", "plan_type",
                   "cons_count", "ctrl_st", "ctrl_et"]
        alert_df = pd.concat([on5_df[columns], on7_df[columns]])
        return alert_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()

        now_str = datetime.now().strftime('%H:%M')
        if  '12:00':
            self.logger.info(
                "当前时间未到12:00，无需判断空调调控效果,task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return

        alert_df = self.get_city_alert()now_str <
        if alert_df.empty:
            self.logger.info(
                "未查询到今日预警地市,task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return
        self.proc_alert_city(alert_df)
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time
            )
        )


if __name__ == "__main__":
    import os

    from air_web.dw.logger import init_log

    if not os.path.exists("./logs/"):
        os.mkdir("./logs/")
    log = init_log("./logs/")

    step_dict = {"save_table": "city_alert"}
    # step_dict = {"save_table": "city_control"}
    GetCityAlert(
        log,
        {"task_id": 4},
        "2024-08-03 00:00:00",
        "2024-08-04 00:00:00",
        "step",
        step_dict,
        "1 D",
        rewrite=False,
    ).main()
