import getopt
import multiprocessing
import os
import sys
from datetime import datetime, timedelta

import numpy as np
import pandas as pd
import tqdm
from elasticsearch import Elasticsearch

from air_web.config.config import config
from air_web.data_platform import sql_engine
from air_web.web_flask.dal.base_dal import EsBaseDal


def get_on5_data(on5, cid_list, data_date, start_time, end_time):
    rules = [
        ("data_time", "query", ">=", f"{data_date} {start_time}"),
        ("data_time", "query", "<=", f"{data_date} {end_time}"),
        ("c_id", "query", "in", cid_list),
    ]
    is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
    index_name = (
        power_96_idx.replace("on5", str(on5))
        .replace("on7", "*")
        .replace("cal01", is_cal)
    )
    on5_df = es_dal.query_dataframe(
        rules,
        index_name,
        doc_time_field="data_time",
        source=["c_id", "data_time", "on5", "p_total"],
    )
    if on5_df.empty:
        return on5_df
    on5_df["data_time"] = on5_df["data_time"].dt.strftime("%H:%M")
    return on5_df


def export_busi_on5_max_diff(base_date, today_date, start_time, end_time):
    # 商业用户且容量>2000
    sql = """select distinct c_id, cons_no, cons_name, on5, shi
             from aclr_base_doc_all
             where pare_type_id=101 and contract_cap > 2000"""
    cons_df = sql_engine.query(sql)

    on5_dict = (
        cons_df[["on5", "c_id"]].groupby("on5")["c_id"].agg(list).to_dict()
    )
    df = pd.DataFrame()
    for on5, cid_list in on5_dict.items():
        print("on5:", on5)
        base_df = get_on5_data(on5, cid_list, base_date, start_time, end_time)
        today_df = get_on5_data(
            on5, cid_list, today_date, start_time, end_time
        )
        # if base_df.empty and today_df.empty:
        #     continue

        on5_df = pd.merge(
            today_df,
            base_df,
            on=["c_id", "data_time", "on5"],
            how="left",
            suffixes=("", "_baseline"),
        )
        on5_df["p_total_diff"] = on5_df["p_total_baseline"] - on5_df["p_total"]
        df = pd.concat([df, on5_df])
    sum_df = df.groupby("data_time")["p_total_diff"].sum()
    max_time = sum_df.idxmax()

    df = df.loc[df["data_time"] == max_time]
    df = (
        df.groupby("on5")
        .agg({"c_id": "count", "p_total_diff": "sum"})
        .reset_index()
    )
    df.rename(columns={"c_id": "cons_count"}, inplace=True)

    df = df.merge(
        cons_df[["on5", "shi"]].drop_duplicates(), on="on5", how="left"
    )
    df = df[["on5", "shi", "cons_count", "p_total_diff"]]
    df["max_time"] = max_time
    df.to_excel("./busi_city.xlsx", index=False)
    print(f"{today_date}{max_time}各个地市调节负荷导出完成:busi_city.xlsx")


def get_max_p_kt(
    start_time, end_time, type_id, max_time_dict, is_province=False
):
    org_level = 0 if is_province else 1

    s_sql = f""" 
               SELECT
                   on5 on5,
                   data_time,
                   p_kt_sum as p_kt
               FROM
                   orgno_typeid_15min 
               WHERE
                    data_time >= '{start_time}' and data_time < '{end_time}'
                   AND type_id = {type_id} 
                   AND org_no in (select org_no from real_org_no where org_level = {org_level})
             """
    diff_df = sql_engine.query(s_sql)
    res_diff_list = []
    for on5, group_df in diff_df.groupby("on5"):
        if on5 in max_time_dict:
            max_data = group_df.loc[
                group_df["data_time"] == max_time_dict[on5], "p_kt"
            ].tolist()[0]
        else:
            group_df = group_df.sort_values("p_kt", ascending=False)
            max_data = group_df.iloc[0]["p_kt"]
            max_time = group_df.iloc[0]["data_time"]
            max_time_dict.update({on5: max_time})
        res_diff_list.append(
            {
                "on5": on5,
                "max_p_kt": max_data,
            }
        )
    res_diff_df = pd.DataFrame(res_diff_list)
    return res_diff_df


def export_on5_max_p_kt(start_date, end_date):
    date_list = pd.date_range(
        start=start_date, end=end_date, freq="1D"
    ).strftime("%Y-%m-%d")

    all_res_df = pd.DataFrame()
    for i in range(len(date_list) - 1):
        start_date = date_list[i]
        end_date = date_list[i + 1]
        print(start_date)

        max_time_dict = {}
        all_on5_df = get_max_p_kt(start_date, end_date, 0, max_time_dict)
        all_pro_df = get_max_p_kt(
            start_date, end_date, 0, max_time_dict, is_province=True
        )
        all_df = pd.concat([all_on5_df, all_pro_df])
        res_df = all_df

        busi_on5_df = get_max_p_kt(start_date, end_date, 101, max_time_dict)
        busi_pro_df = get_max_p_kt(
            start_date, end_date, 101, max_time_dict, is_province=True
        )
        busi_df = pd.concat([busi_on5_df, busi_pro_df])

        if not busi_df.empty:
            res_df = pd.merge(
                res_df,
                busi_df,
                on="on5",
                how="left",
                suffixes=("", "_busi"),
            )
        else:
            res_df["max_p_kt_busi"] = None

        public_on5_df = get_max_p_kt(start_date, end_date, 102, max_time_dict)
        public_pro_df = get_max_p_kt(
            start_date, end_date, 102, max_time_dict, is_province=True
        )
        public_df = pd.concat([public_on5_df, public_pro_df])
        if not public_df.empty:
            res_df = pd.merge(
                res_df,
                public_df,
                on="on5",
                how="left",
                suffixes=("", "_pub"),
            )
        else:
            res_df["max_p_kt_pub"] = None

        res_df["data_date"] = start_date
        all_res_df = pd.concat([all_res_df, res_df])
    all_res_df.to_excel("./on5_max_p_kt.xlsx", index=False)
    print(f"{start_date}到{end_date}各地市最大空调负荷导出完成:on5_max_p_kt.xlsx")


def proc_day_on5(data_date, start_time):
    sql = f"""select org_no as on5, org_name as shi, p_total_sum as p_total, data_time
             from orgno_typeid_15min
             where data_time = '{data_date} {start_time}'
               and p_org_no='51101'
               and type_id = 0
             """
    res_df = sql_engine.query(sql)
    res_df["data_time"] = res_df["data_time"].dt.strftime("%H:%M")
    return res_df


def export_on5_kt(start_date, end_date, start_time, on5_list):
    sql = (
        """select on5, count(cons_no) as cons_count from c_cons group by on5"""
    )
    count_df = sql_engine.query(sql)

    yesterday_df = proc_day_on5(start_date, start_time)
    today_df = proc_day_on5(end_date, start_time)

    df = pd.merge(
        today_df,
        yesterday_df,
        on=["on5", "shi", "data_time"],
        how="left",
        suffixes=("", "_baseline"),
    )
    df["p_total_diff"] = df["p_total_baseline"] - df["p_total"]

    df = df.merge(count_df, on="on5", how="left")
    df = df[["on5", "shi", "cons_count", "p_total_diff"]]
    df.to_excel("./city.xlsx", index=False)
    print(f"{start_date}各地市空调负荷导出完成:city.xlsx")


def export_ggjg(start_date, end_date, file, power_96_idx):
    cons_df = pd.read_excel(
        file, sheet_name="公共事业(6148户)", header=0, dtype=str
    )
    cons_list = cons_df["用户编号"].tolist()
    sql = """select c_id, cons_no as 用户编号 
             from c_cons 
             where on5='51401' 
               and cons_no in ({cons_list})""".format(
        cons_list=",".join(cons_list)
    )
    cid_df = sql_engine.query(sql)
    print(f"成都市公共机构用户数量:{len(cid_df)}")
    c_id_list = cid_df["c_id"].tolist()
    power_96_idx = power_96_idx.replace("on5", "51401").replace("on7", "*")

    rules = [
        ("data_time", "query", "=", start_date),
        ("c_id", "query", "in", c_id_list),
    ]
    yesterday_df = es_dal.query_dataframe(
        rules,
        power_96_idx,
        doc_time_field="data_time",
        source=["c_id", "p_kt"],
    )
    yesterday_df = yesterday_df.rename(columns={"p_kt": "p_kt_yesterday"})
    rules = [
        ("data_time", "query", "=", end_date),
        ("c_id", "query", "in", c_id_list),
    ]
    today_df = es_dal.query_dataframe(
        rules,
        power_96_idx,
        doc_time_field="data_time",
        source=["c_id", "p_kt"],
    )
    today_df = today_df.rename(columns={"p_kt": "p_kt_today"})

    res_df = pd.merge(yesterday_df, today_df, on="c_id", how="left")
    res_df["rate"] = round(
        (res_df["p_kt_today"] - res_df["p_kt_yesterday"])
        / res_df["p_kt_yesterday"],
        4,
    )
    res_df["rate"].replace([np.inf, -np.inf], np.nan, inplace=True)

    res_df = res_df.merge(cid_df, on="c_id", how="left")
    res_df = res_df.merge(cons_df, on="用户编号", how="left")
    res_df = res_df.drop("c_id", axis=1)
    res_df.to_excel("./ggjg_kt_info.xlsx", index=False)
    print(f"{start_date}至{end_date}成都市公共机构空调负荷导出完成:ggjg_kt_info.xlsx")


def cal_pro(power_96_idx):
    # power_96_idx = power_96_idx.replace('on5', '51401').replace('on7', '*')

    rules = [
        ("data_time", "query", ">=", start_date),
        ("data_time", "query", "<", end_date),
    ]
    yesterday_df = es_dal.query_dataframe(
        rules,
        power_96_idx,
        doc_time_field="data_time",
        source=["c_id", "p_kt"],
    )
    yesterday_df = yesterday_df.sort_values(
        by=["c_id", "p_kt"], ascending=False
    )
    yesterday_df = yesterday_df.drop_duplicates(subset="c_id")
    yesterday_df["p_kt"] = yesterday_df["p_kt"].round(4)
    yesterday_df = yesterday_df.rename(columns={"p_kt": "p_kt_yesterday"})
    yesterday_df = yesterday_df.set_index("c_id")
    date = datetime.strptime(end_date, "%Y-%m-%d")
    next_day = date + timedelta(days=1)
    next_day = next_day.strftime("%Y-%m-%d")
    rules = [
        ("data_time", "query", ">=", end_date),
        ("data_time", "query", "<", next_day),
    ]
    today_df = es_dal.query_dataframe(
        rules,
        power_96_idx,
        doc_time_field="data_time",
        source=["c_id", "p_kt"],
    )
    today_df = today_df.sort_values(by=["c_id", "p_kt"], ascending=False)
    today_df = today_df.drop_duplicates(subset="c_id")
    today_df["p_kt"] = today_df["p_kt"].round(4)
    today_df = today_df.rename(columns={"p_kt": "p_kt_today"})
    today_df = today_df.set_index("c_id")
    res_df = pd.concat([yesterday_df, today_df], axis=1)
    # res_df = pd.merge(yesterday_df, today_df, on='c_id', how='left')
    res_df["rate"] = round(
        (res_df["p_kt_today"] - res_df["p_kt_yesterday"])
        / res_df["p_kt_yesterday"],
        4,
    )
    res_df["rate"].replace([np.inf, -np.inf], np.nan, inplace=True)

    df = archives_df[archives_df["c_id"].isin(res_df.index)][
        ["c_id", "shi", "xian", "org_name", "cons_name", "cons_no"]
    ]
    df = df.set_index("cons_no")
    res_df = pd.concat([res_df, df], axis=1)
    print(f"finish {power_96_idx}")

    return res_df


def get_all_index(power_96_idx):
    idxs = es.cat.indices(index=power_96_idx, format="json")
    names = [x["index"] for x in idxs]
    return names


def cal_kt_huanbi(start_date, end_date):
    rules = [
        ("data_time", "query", "=", start_date),
        ("p_org_no", "query", "=", "99"),
        ("type_id", "query", "=", "99"),
        ("type_code_sort", "query", "=", "99"),
    ]
    yesterday_res = es_dal.query_dataframe(
        rules, power_area_idx, doc_time_field="data_time"
    )
    yesterday_res = yesterday_res[["org_no", "org_name", "p_kt"]].rename(
        columns={"p_kt": "p_kt_yesterday"}
    )

    rules = [
        ("data_time", "query", "=", end_date),
        ("p_org_no", "query", "=", "99"),
        ("type_id", "query", "=", "99"),
        ("type_code_sort", "query", "=", "99"),
    ]
    today_res = es_dal.query_dataframe(
        rules, power_area_idx, doc_time_field="data_time"
    )
    today_res = today_res[["org_no", "p_kt"]].rename(
        columns={"p_kt": "p_kt_today"}
    )

    res = pd.merge(yesterday_res, today_res, on="org_no", how="left")
    res["rate"] = round(
        (res["p_kt_today"] - res["p_kt_yesterday"]) / res["p_kt_yesterday"], 4
    )

    res.to_excel("./huanbi_res.xlsx", index=False)
    print(f"计算{start_date}和{end_date}空调负荷环比且导出完成:huanbi_res.xlsx")


def get_cons_info(is_filter):
    sql = """select distinct c_id, cons_no, cons_name, type_code, on5, shi, on7, xian, org_no, 
                    org_name 
             from aclr_base_doc_all
             """
    if is_filter:
        sql = f'{sql} where pare_type_id=101 and contract_cap > 2000'
    cons_df = sql_engine.query(sql)
    return cons_df


def get_max_time():
    sql = """select max_time
             from area_max_time
             where org_no='99'
             order by max_p_total desc
             limit 1
          """

    res_df = sql_engine.query(sql)
    if len(res_df) == 0:
        print("未查到总负荷最大的时间")
        return "9999-01-01"
    max_time = res_df["max_time"].tolist()[0]
    print("总负荷最大的时间:{}".format(max_time))
    return max_time


def get_on5_data_1(on5, cid_list, data_date, start_time, end_time):
    rules = [
        ("data_time", "query", ">=", f"{data_date} {start_time}"),
        ("data_time", "query", "<=", f"{data_date} {end_time}"),
        ("on5", "query", "=", on5),
        ('c_id', 'query', 'in', cid_list)
    ]
    is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
    index_name = (
        power_96_idx.replace("on5", str(on5))
            .replace("on7", "*")
            .replace("cal01", is_cal)
    )
    on5_df = es_dal.query_dataframe(
        rules,
        index_name,
        doc_time_field="data_time",
        source=[
            "c_id",
            "data_time",
            "p_total",
            "p_kt",
            "kt_ratio"
        ],
    )
    on5_df['data_time'] = on5_df['data_time'].dt.strftime("%H:%M")
    return on5_df


def export_cons_curve_to_excel(start_date, end_date, power_96_idx, is_filter, start_time, end_time):
    # max_time = get_max_time()
    cons_df = get_cons_info(is_filter)
    print('cons:', len(cons_df))

    on5_dict = (
        cons_df[["on5", "c_id"]]
            .groupby("on5")["c_id"]
            .agg(list)
            .to_dict()
    )
    df = pd.DataFrame()
    for on5, cid_list in on5_dict.items():
        print('on5:', on5)
        base_df = get_on5_data_1(on5, cid_list, start_date, start_time, end_time)
        today_df = get_on5_data_1(on5, cid_list, end_date, start_time, end_time)

        on5_df = pd.merge(today_df, base_df[["c_id", "data_time", "p_kt", "p_total"]],
                          on=["c_id", "data_time"], how='left', suffixes=('','_baseline'))
        on5_df['p_kt_diff'] = on5_df['p_kt_baseline'] - on5_df['p_kt']
        on5_df['p_total_diff'] = on5_df['p_total_baseline'] - on5_df['p_total']
        df = pd.concat([df, on5_df])
        # break

    print(f"count:{len(df)}")
    df["kt_ratio"] = df["kt_ratio"].apply(lambda x: "{:.2%}".format(x))
    df = df.merge(cons_df, on="c_id", how="left")
    df = df.sort_values(["c_id", "data_time"])
    df = df[
        [
            "cons_no",
            "cons_name",
            "type_code",
            "on5",
            "shi",
            "on7",
            "xian",
            "org_no",
            "org_name",
            "p_total",
            "p_total_baseline",
            "p_total_diff",
            "p_kt",
            "p_kt_baseline",
            "p_kt_diff",
            "kt_ratio",
            "data_time",
        ]
    ]
    df.to_excel("./cons_curve.xlsx", index=False)
    print(f"{start_date}用户96点负荷导出完成:cons_curve.xlsx")


def cons_max_sum():
    rules = [("c_id", "same", ">", "0"), ("p_kt", "stat", "max", ">", "0")]
    es_res = es_dal.get_group_vector(
        rules, power_96_idx, doc_time_field="data_time"
    )
    res_list = []
    for r in es_res:
        res_list.append([r[0][0]["c_id"], r[1]])
    df = pd.DataFrame(res_list, columns=["c_id", "p_kt"])
    res = df["p_kt"].sum()
    print(res)


if __name__ == "__main__":
    es_host = config["ES_HOST"]
    power_area_idx = config["POWERLOAD_AREA"]
    power_96_idx = config["POWERLOAD"]
    power_96_path = "/home/smxu/hebei/power96/"
    index_96 = "aclr_res_power_96_"
    suffix = ""
    is_huanbi = False
    start_date = "2022-01-01T09:45:00"
    end_date = "2022-01-02T09:45:00"
    p_num = 1
    is_ggjg = False
    file = "/home/smxu/hebei/sichuan/6万户数据初步分析-汇总_20230621.xls"

    is_on5_kt = False
    on5_list = []

    pro = False

    cons_curve = False
    is_filter = False

    on5_max = False

    start_time = ""
    end_time = ""
    busi_on5 = False

    opts, args = getopt.getopt(
        sys.argv[1:],
        "s:e:f:o:p:c:FS:T:",
        ["huanbi", "ggjg", "on5", "pro", "cons", "on5_max", "busi_on5"],
    )
    for opt, val in opts:
        if opt == "-s":
            start_date = val
        elif opt == "-e":
            end_date = val
        elif opt == "-f":
            file = val
        elif opt == "-o":
            suffix = val
        elif opt == "-p":
            p_num = int(val)
        elif opt == "--huanbi":
            is_huanbi = True
        elif opt == "--ggjg":
            is_ggjg = True
        elif opt == "--on5":
            is_on5_kt = True
        elif opt == "-c":
            on5_list = val.split(",")
            on5_list = [int(on5) for on5 in on5_list]
        elif opt == "--pro":
            pro = True
        elif opt == "--cons":
            cons_curve = True
        elif opt == "-F":
            is_filter = True
        elif opt == "-S":
            start_time = val
        elif opt == "-T":
            end_time = val
        elif opt == "--on5_max":
            on5_max = True
        elif opt == "--busi_on5":
            busi_on5 = True

    es_dal = EsBaseDal(es_host)
    index_96 = index_96 + suffix + "*"

    if is_on5_kt:
        print(start_date, end_date, power_area_idx)
        export_on5_kt(start_date, end_date, start_time, on5_list)
    if cons_curve:
        print(start_date, power_96_idx)
        export_cons_curve_to_excel(start_date, end_date, power_96_idx, is_filter, start_time, end_time)
    # if on5_max:
    #     export_on5_max_p_kt(start_date, end_date)
    if busi_on5:  # 导出各个地市商业且>2000容量的用户数，及某时间段内的最大调节负荷
        export_busi_on5_max_diff(start_date, end_date, start_time, end_time)
