import getopt
import sys
from datetime import datetime, timedelta

import pandas as pd

from air_web.config.config import config
from air_web.data_platform import sql_engine
from air_web.web_flask.tools.air_condition_show import get_baseline_day
from air_web.web_flask.dal.base_dal import EsBaseDal
from air_web.web_flask.common.logger import init_log


class CtrlReport:
    CONS_COLS_MAP = {
        "cons_name": "户名", "cons_no": "户名", "p_diff_max": "最大压降负荷",
        "p_diff_avg": "平均压降负荷", "rate": "平均压降比率"
    }

    DIFF_COLS_MAP = {
        "shi": "市（州）公司",  "p_diff_max": "调节最大负荷", "p_diff_bus_max": "调节最大负荷（商业）",
        "p_diff_pub_max": "调节最大负荷（公共机构）", "p_diff_avg": "调节平均负荷",
        "p_diff_bus_avg": "调节平均负荷（商业）", "p_diff_pub_avg": "调节平均负荷（公共机构）",
        "rate": "地市执行达标情况"
    }

    PRO_COLS_MAP = {"n_cons_total": "参与调节用户数", "rate": "户执行达标率"}

    def __init__(self, today, baseline_day, start_time, end_time):
        self.today = today
        self.baseline_day = baseline_day
        self.es_dal = EsBaseDal(config["ES_HOST"])

        self.start_time = start_time
        self.end_time = end_time

        self.log = init_log(None)

    def query_diff_df(self, type_id, on5_list):
        s_sql = f"""   
                   select a.org_no as on5, a.quarter, b.p_kt_sum-a.p_kt_sum as p_diff,
                          b.p_kt_sum as p_kt_baseline
                   from
                   ((select
                       org_no,
                       date_format( data_time, '%%H:%%i' ) quarter,
                       p_kt_sum
                   from
                       orgno_typeid_15min 
                   where
                        data_time >= '{self.today} {self.start_time}' 
                        and data_time <= '{self.today} {self.end_time}'
                       and type_id = {type_id}
                       and org_no in ({','.join(on5_list)})
                   ) a
                   left join (	select
                       org_no,
                       date_format( data_time, '%%H:%%i' ) quarter,
                       p_kt_sum
                   from
                       orgno_typeid_15min 
                   where
                       data_time >= '{self.baseline_day} {self.start_time}' 
                       and data_time<'{self.baseline_day} {self.end_time}'
                       and type_id = {type_id}
                       and org_no in ({','.join(on5_list)})
                   ) b on a.org_no=b.org_no and a.quarter = b.quarter) 
                 """
        diff_df = sql_engine.query(s_sql)
        return diff_df

    def get_diff_data(self, on5_df, on5_list):
        """
        调节平均负荷：（基线日负荷-当前日负荷） 2组数据  点对点计算，取平均值
        调节最大负荷：（基线日负荷-当前日负荷） 2组数据  点对点计算，取其中最大值
        """

        # 调控地市-全行业的调节负荷
        org_diff_df = self.query_diff_df(0, on5_list)
        org_diff_df = org_diff_df.sort_values("p_diff", ascending=False)
        max_org_diff_df = org_diff_df.drop_duplicates("on5")
        avg_org_diff_df = org_diff_df.groupby("on5")["p_diff", "p_kt_baseline"].mean().reset_index()
        avg_org_diff_df["rate"] = avg_org_diff_df.apply(lambda x:x['p_diff']/x['p_kt_baseline']
        if x['p_kt_baseline'] != 0 else 0, axis=1)

        # 调控地市-商业的调节负荷
        bus_diff_df = self.query_diff_df(101, on5_list)
        max_diff_df = pd.merge(max_org_diff_df, bus_diff_df, on=["on5", "quarter"],
                               how="left", suffixes=("", "_bus"))
        avg_bus_diff_df = bus_diff_df.groupby("on5")["p_diff"].mean().reset_index()
        avg_diff_df = pd.merge(avg_org_diff_df, avg_bus_diff_df, on="on5",
                               how="left", suffixes=("", "_bus"))

        # 调控地市-公共机构的调节负荷
        pub_diff_df = self.query_diff_df(102, on5_list)
        max_diff_df = pd.merge(max_diff_df, pub_diff_df, on=["on5", "quarter"],
                               how="left", suffixes=("", "_pub"))
        avg_pub_diff_df = pub_diff_df.groupby("on5")["p_diff"].mean().reset_index()
        avg_diff_df = pd.merge(avg_diff_df, avg_pub_diff_df, on="on5",
                               how="left", suffixes=("", "_pub"))

        diff_df = pd.merge(max_diff_df, avg_diff_df,  on="on5",  suffixes=("_max", "_avg"))
        diff_df = diff_df.merge(on5_df, on="on5", how="left")
        diff_df.sort_values("p_diff_max", inplace=True)

        diff_df = diff_df[self.DIFF_COLS_MAP.keys()]
        diff_df = diff_df.rename(columns=self.DIFF_COLS_MAP)
        return diff_df

    def get_cons_name(self, cons_df):
        cons_list = cons_df["cons_no"].tolist()
        sql = f"""select cons_no,cons_name
                  from c_cons 
                  where cons_no in ('{"','".join(cons_list)}')
               """
        df = sql_engine.query(sql)
        cons_df = cons_df.merge(df, on='cons_no', how='left')
        return cons_df

    @staticmethod
    def get_power_96_idx_name(on5, on7):
        is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
        index_name = (
            config["POWERLOAD"]
            .replace("on5", str(on5))
            .replace("on7", str(on7))
            .replace("cal01", is_cal)
        )
        return index_name

    def get_city_cons_load(self, on5, on7, data_date):
        time_list = [f"{data_date} {self.start_time}", f"{data_date} {self.end_time}"]

        rules = [
            ("on7", "query", "=", on7),
            # ("type_code_sort", "query", "=", "1"),
            ("data_time", "query", "in", time_list)
        ]
        index_name = self.get_power_96_idx_name(on5, on7)
        cons_df = self.es_dal.query_dataframe(
            rules,
            index_name,
            doc_time_field="data_time",
            source=["cons_no", "data_time", "p_kt"],
        )
        if cons_df.empty:
            self.log.warning("查询数据为空,index:{},on7:{}...".format(index_name, on7))
            return cons_df
        cons_df["data_time"] = cons_df["data_time"].dt.strftime("%H:%M")
        return cons_df

    def get_subst_cons(self, on5_list):
        sql = f"""select p_org_no as on5, org_no as on7, cons_count
                  from real_org_no r
	              right join 
	                (select on7, count(distinct cons_no) as cons_count 
	                 from aclr_base_doc_all 
	                 where type_code_sort=1 
	                 group by on7) c
	                on c.on7=r.org_no
                  where p_org_no in ({','.join(on5_list)})
               """
        on7_count_df = sql_engine.query(sql)

        on7_list = []
        cons_list = []
        for index, row in on7_count_df.iterrows():
            on5 = row["on5"]
            on7 = row["on7"]
            n_cons_total = row["cons_count"]

            today_df = self.get_city_cons_load(on5, on7, self.today)
            yesterday_df = self.get_city_cons_load(on5, on7, self.baseline_day)
            if today_df.empty or yesterday_df.empty:
                continue

            merge_df = pd.merge(
                today_df,
                yesterday_df,
                on=["cons_no", "data_time"],
                how="inner",
                suffixes=("_today", "_yesterday"),
            )

            # 过滤昨日负荷为0的点,如果某用户96点都为0，则算作达标用户
            rate_df = merge_df.loc[merge_df['p_kt_yesterday'] != 0]
            rate_df['p_diff'] = rate_df["p_kt_yesterday"] - rate_df["p_kt_today"]
            rate_df["rate"] = rate_df['p_diff'] / rate_df["p_kt_yesterday"]

            avg_rate_df = rate_df.groupby("cons_no")["rate"].mean().reset_index()
            subst_cons_df = avg_rate_df.loc[avg_rate_df["rate"] <= 0.25]

            n_cons_subst = len(subst_cons_df)
            on7_dict = {
                "on5": on5,
                "on7": on7,
                "n_cons_total": n_cons_total,
                "n_cons_st": n_cons_total-n_cons_subst
            }
            on7_list.append(on7_dict)

            if n_cons_subst == 0:
                self.log.info(f"on7:{on7}无未达标用户")
                continue

            rate_df = rate_df.loc[rate_df['cons_no'].isin(subst_cons_df["cons_no"])]
            max_diff_df = rate_df.groupby("cons_no")["p_diff"].max().reset_index()
            avg_diff_df = rate_df.groupby("cons_no")["p_diff"].mean().reset_index()

            cons_df = pd.merge(max_diff_df, avg_diff_df, on="cons_no", suffixes=("_max", "_avg"))
            cons_df = cons_df.merge(avg_rate_df, on="cons_no")
            cons_list.append(cons_df)

        if len(cons_list) == 0:
            self.log.warning("未达标用户数为0")
            cons_df = pd.DataFrame()
        else:
            cons_df = pd.concat(cons_list)
            cons_df = self.get_cons_name(cons_df)
            cons_df = cons_df[self.CONS_COLS_MAP.keys()]
            cons_df = cons_df.rename(columns=self.CONS_COLS_MAP)

        if len(on7_list) == 0:
            self.log.warning("未查询到地市达标情况")
            on5_df, pro_df = pd.DataFrame(), pd.DataFrame()
        else:
            on7_df = pd.DataFrame(on7_list)
            on5_df = on7_df.groupby("on5")[["n_cons_total", "n_cons_st"]].sum().reset_index()
            on5_df["rate"] = on5_df.apply(lambda x: round(x["n_cons_st"]/x["n_cons_total"], 3) if x[
                                         "n_cons_total"] !=0 else 0, axis=1)

            on5_df["pro"] = "全省"
            pro_df = on5_df.groupby("pro")[["n_cons_total", "n_cons_st"]].sum().reset_index()
            pro_df["rate"] = pro_df.apply(
                lambda x: round(x["n_cons_st"] / x["n_cons_total"], 3) if x[
                 "n_cons_total"] != 0 else 0, axis=1)
            pro_df = pro_df[self.PRO_COLS_MAP.keys()]
            pro_df = pro_df.rename(columns=self.PRO_COLS_MAP)

        return cons_df, on5_df, pro_df

    def get_start_and_end_time(self):
        if self.start_time and self.end_time:
            self.log.info(f"查询数据时间点:{self.start_time},{self.end_time}")
            return
        now_time = datetime.now().replace(second=0)
        hour = now_time.hour

        if now_time.minute >= 45:
            start_time = now_time.replace(minute=0)
        elif now_time.minute >= 30:
            start_time = now_time.replace(hour=hour-1).replace(minute=45)
        elif now_time.minute >= 15:
            start_time = now_time.replace(hour=hour-1).replace(minute=30)
        else:
            start_time = now_time.replace(hour=hour-1).replace(minute=15)

        self.start_time = start_time.strftime("%H:%M:%S")
        self.end_time = (start_time + timedelta(minutes=15)).strftime("%H:%M:%S")
        self.log.info(f"现在时间:{now_time}，查询数据时间点:{self.start_time},{self.end_time}")

    def get_ctrl_on5_list(self, on5_list):
        if len(on5_list) > 0:
            sql = f"""select org_no as on5, org_name as shi 
                      from real_org_no 
                      where org_no in ({','.join(on5_list)})
                   """
        else:
            sql = f"""select c.org_no as on5, r.org_name as shi
                      from city_approval_info c
                      left join real_org_no r on c.org_no=r.org_no and r.org_level=1
                      where control_time <= '{self.today}'
                        and end_time >= '{self.today}'
                   """
        on5_df = sql_engine.query(sql)
        if on5_df.empty:
            self.log.warning(f"{self.today}无调控地市")
            return []

        return on5_df

    def get_high_tmp_alert_city(self):
        sql = f"""select distinct city_name 
                  from city_write 
                  where data_time <='{self.today}' 
                  and heat_alert=1
               """
        alert_df = sql_engine.query(sql)
        alert_df.rename(columns={"city_name": "高温橙色预警地市"})
        return alert_df

    def main(self, on5_list):
        on5_df = self.get_ctrl_on5_list(on5_list)
        if len(on5_df) == 0:
            return
        on5_list = on5_df["on5"].astype(str).tolist()
        self.log.info(f"{self.today}调控地市:{on5_list}")

        alert_city_df = self.get_high_tmp_alert_city()
        self.get_start_and_end_time()
        cons_df, on5_rate_df, pro_df = self.get_subst_cons(on5_list)
        diff_df = self.get_diff_data(on5_df, on5_list)

        with pd.ExcelWriter('ctrl_report.xlsx') as writer:
            alert_city_df.to_excel(writer, sheet_name='发布过高温橙色预警地市', index=False)
            pro_df.to_excel(writer, sheet_name='全省信息', index=False)
            diff_df.to_excel(writer, sheet_name='地市信息', index=False)
            cons_df.to_excel(writer, sheet_name='未达标用户信息', index=False)
            writer.save()
        pro_df.to_excel('xx.xlsx', index=False)
        print("生成excel成功：ctrl_report.xlsx")


if __name__ == "__main__":
    today = datetime.today().strftime("%Y-%m-%d")
    baseline_day = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
    start_time = None
    end_time = None
    on5_list = []

    opts, args = getopt.getopt(sys.argv[1:], "t:b:s:e:o:")
    for opt, val in opts:
        if opt == "-t":
            today = val
        elif opt == '-b':
            baseline_day = val
        elif opt == '-s':
            start_time = val
        elif opt == '-e':
            end_time = val
        elif opt == '-o':
            on5_list = val.split(',')

    CtrlReport(today, baseline_day, start_time, end_time).main(on5_list)
