import time
import traceback
from multiprocessing import Pool

import pandas as pd
from es_pandas import es_pandas

from air_web.data_platform import mysql_con, sql_engine
from air_web.web_flask.dal.base_dal import EsBaseDal


def write_to_es(df, index_name):
    doc_type = index_name + "_doc"
    # ep.init_es_tmpl(df, doc_type)
    ep.to_es(
        df,
        index=index_name,
        doc_type=doc_type,
        # thread_count=2,
        # chunk_size=5000,
        use_index=False,
        request_timeout=60,
    )


def proc_max_time(df, group_field=[], is_write=False):
    group_field.extend(["org_no", "org_name", "data_date"])

    temp_df = df.sort_values("p_total", ascending=False)
    temp_df.drop_duplicates(group_field, inplace=True)

    if is_write:
        res_df = temp_df.loc[
            (temp_df["type_code"] == "all")
            & (temp_df["type_code_sort"] == "all")
        ]
        res_df = res_df.drop(["type_code", "type_code_sort"], axis=1)
        if "p_org_no" in res_df:
            res_df = res_df.drop(["p_org_no", "p_org_name"], axis=1)
        res_df.rename(
            columns={
                "data_time": "max_time",
                "p_total": "max_p_total",
                "p_kt": "max_p_kt",
                "p_base": "max_p_base",
                "data_date": "compute_date",
            },
            inplace=True,
        )
        sql_engine.insert_df(res_df, "area_max_time")
    temp_df["is_day_max"] = 1
    return temp_df


def stats2(data_date, rules, powerload_area_idx, org_no="", org_name=""):
    res = es.query_dataframe(
        rules, powerload_area_idx, doc_time_field="data_time"
    )
    if len(res) == 0:
        print(f"{data_date}为空，跳过")
        return
    result = (
        res.groupby(
            [
                "p_org_no",
                "p_org_name",
                "type_code",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    result.rename(
        columns={"p_org_no": "org_no", "p_org_name": "org_name"}, inplace=True
    )
    result["p_org_no"] = org_no
    result["p_org_name"] = org_name

    group_field = ["type_code", "type_code_sort"]
    max_df = proc_max_time(result, group_field, is_write=True)
    result = result.merge(
        max_df[
            [
                "org_no",
                "type_code",
                "type_code_sort",
                "data_time",
                "is_day_max",
            ]
        ],
        on=["org_no", "type_code", "type_code_sort", "data_time"],
        how="left",
    )
    result["is_day_max"].fillna(0, inplace=True)
    write_to_es(result, powerload_area_idx)
    return result


def stats1(
    data_date, on7_type_dict, cons_list, powerload_idx, powerload_area_idx
):
    source_list = [
        "data_date",
        "data_time",
        "on7",
        "xian",
        "on5",
        "shi",
        "type_code",
        "type_code_sort",
        "p_total",
        "p_kt",
        "p_base",
    ]
    for on7, type_code_list in on7_type_dict.items():
        print("proc", on7)
        on7_df = pd.DataFrame()
        for type_code in type_code_list:
            rules = [
                ("data_date", "query", "=", data_date),
                ("on7", "query", "=", on7),
                ("type_code", "query", "=", type_code),
            ]
            if cons_list:
                rules.append(("real_cons_no", "query", "in", cons_list))
            res = es.query_dataframe(
                rules,
                powerload_idx,
                doc_time_field="data_time",
                source=source_list,
            )
            if len(res) == 0:
                print(f"{data_date}为空，跳过")
                continue
            result = (
                res.groupby(
                    [
                        "on7",
                        "xian",
                        "on5",
                        "shi",
                        "type_code",
                        "type_code_sort",
                        "data_time",
                        "data_date",
                    ]
                )
                .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
                .reset_index()
            )
            result.rename(
                columns={
                    "on7": "org_no",
                    "xian": "org_name",
                    "on5": "p_org_no",
                    "shi": "p_org_name",
                },
                inplace=True,
            )

            group_field = ["type_code", "type_code_sort"]
            max_df = proc_max_time(result, group_field)
            result = result.merge(
                max_df[
                    [
                        "org_no",
                        "type_code",
                        "type_code_sort",
                        "data_time",
                        "is_day_max",
                    ]
                ],
                on=["org_no", "type_code", "type_code_sort", "data_time"],
                how="left",
            )
            result["is_day_max"].fillna(0, inplace=True)
            write_to_es(result, powerload_area_idx)
            on7_df = pd.concat([on7_df, result])

        if on7_df.empty:
            continue

        all_result = (
            on7_df.groupby(
                [
                    "org_no",
                    "org_name",
                    "p_org_no",
                    "p_org_name",
                    "type_code_sort",
                    "data_time",
                    "data_date",
                ]
            )
            .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
            .reset_index()
        )
        all_result["type_code"] = "all"
        group_field = ["type_code_sort"]
        max_df = proc_max_time(all_result, group_field)
        all_result = all_result.merge(
            max_df[["org_no", "is_day_max", "data_time", "type_code_sort"]],
            on=["org_no", "data_time", "type_code_sort"],
            how="left",
        )
        all_result["is_day_max"].fillna(0, inplace=True)
        write_to_es(all_result, powerload_area_idx)

        # 再加一条type_code_sort=all
        sort_all_result = (
            all_result.groupby(
                [
                    "org_no",
                    "org_name",
                    "p_org_no",
                    "p_org_name",
                    "data_time",
                    "data_date",
                    "type_code",
                ]
            )
            .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
            .reset_index()
        )
        sort_all_result["type_code_sort"] = "all"
        max_df = proc_max_time(sort_all_result, is_write=True)
        sort_all_result = sort_all_result.merge(
            max_df[["org_no", "is_day_max", "data_time"]],
            on=["org_no", "data_time"],
            how="left",
        )
        sort_all_result["is_day_max"].fillna(0, inplace=True)
        write_to_es(sort_all_result, powerload_area_idx)


def stats(
    on7_type_dict, data_date, cons_list, powerload_idx, powerload_area_idx
):
    stats1(
        data_date, on7_type_dict, cons_list, powerload_idx, powerload_area_idx
    )

    print("proc shi")
    rules = [("data_date", "query", "=", data_date)]
    stats2(data_date, rules, powerload_area_idx, "99", "河北省电力公司")

    print("proc on5")
    rules = [
        ("data_date", "query", "=", data_date),
        ("p_org_no", "query", "=", "99"),
    ]
    stats2(data_date, rules, powerload_area_idx)


def main(on7_type_dict):
    date_list = pd.date_range(
        start="2022-01-01", end="2022-01-15", freq="1D"
    ).strftime("%Y-%m-%d")
    pro_pool = Pool(8)
    for data_date in date_list:
        print("start", data_date)
        powerload_idx = "hbfh_powerload_idx-{}".format(
            data_date[0:7].replace("-", ".")
        )
        powerload_area_idx = "hbfh_powerload_area_idx-{}".format(
            data_date[0:7].replace("-", ".")
        )

        # stats(on7_type_dict, data_date, cons_list, powerload_idx, powerload_area_idx)
        # break
        pro_pool.apply_async(
            func=stats,
            args=(
                on7_type_dict,
                data_date,
                cons_list,
                powerload_idx,
                powerload_area_idx,
            ),
        )
        print("end", data_date)

    pro_pool.close()
    pro_pool.join()


def proc_area_type_map(cons_list):
    df = ep.to_pandas(c_cons_idx)
    sql_engine.insert_df(df, "c_cons")

    if cons_list:
        filter_df = df.loc[df["real_cons_no"].isin(cons_list)]
    else:
        filter_df = df

    filter_df = filter_df[["on7", "type_code"]].drop_duplicates()
    on7_type_dict = filter_df.groupby("on7")["type_code"].agg(list).to_dict()

    res_df = df[
        ["on7", "xian", "type_code", "type_code_sort"]
    ].drop_duplicates()
    res_df.rename(columns={"on7": "org_no", "xian": "org_name"}, inplace=True)

    df2 = df[["on5", "shi", "type_code", "type_code_sort"]].drop_duplicates()
    df2.rename(columns={"on5": "org_no", "shi": "org_name"}, inplace=True)
    res_df = pd.concat([res_df, df2])

    df3 = df[["type_code", "type_code_sort"]].drop_duplicates()
    df3["org_no"] = "99"
    df3["org_name"] = "河北省电力公司"
    res_df = pd.concat([res_df, df3])
    sql_engine.insert_df(res_df, "area_type_map")
    return on7_type_dict


def get_filter_cons_list():
    cons_path = "/home/smxu/hebei/cons_test.csv"
    cons_df = pd.read_csv(cons_path, header=0, dtype=str)
    cons_list = cons_df["cons_no"].tolist()
    return cons_list


if __name__ == "__main__":
    es_host = "zxtech:Zxod112_shining10@192.168.80.231:19200"
    # es_host = 'zxtech:Zxod112_shining10@192.168.83.221:9200'
    es = EsBaseDal(es_host)
    ep = es_pandas(es_host)
    c_cons_idx = "hbfh_c_cons_idx"
    columns = [
        "org_no",
        "org_name",
        "p_org_no",
        "p_org_name",
        "type_code",
        "type_code_sort",
        "data_time",
        "data_date",
        "p_total",
        "p_kt",
        "p_base",
        "is_day_max",
    ]

    a = time.time()
    cons_list = []  # get_filter_cons_list()
    on7_type_dict = proc_area_type_map(cons_list)
    main(on7_type_dict)
    b = time.time()
    print(b - a)
