import getopt
import sys
import time
import traceback
from multiprocessing import Pool

import pandas as pd
from es_pandas import es_pandas

from air_web.data_platform import sql_engine
from air_web.web_flask.dal.base_dal import EsBaseDal

# from air_web.web_flask.tools.es_data_mapping import init_es_tmpl


def write_to_es(df, index_name):
    doc_type = index_name + "_doc"
    ep.to_es(
        df,
        index=index_name,
        doc_type=doc_type,
        thread_count=2,
        chunk_size=5000,
        use_index=False,
        request_timeout=60,
    )


def proc_max_time(df, group_field=[], is_write=False):
    group_field.extend(["org_no", "org_name", "data_date"])

    temp_df = df.sort_values("p_total", ascending=False)
    temp_df.drop_duplicates(group_field, inplace=True)

    if is_write:
        res_df = temp_df.loc[
            (temp_df["type_code"] == "all")
            & (temp_df["type_code_sort"] == "all")
        ]
        res_df = res_df.drop(["type_code", "type_code_sort"], axis=1)
        if "p_org_no" in res_df:
            res_df = res_df.drop(["p_org_no", "p_org_name"], axis=1)
        res_df.rename(
            columns={
                "data_time": "max_time",
                "p_total": "max_p_total",
                "p_kt": "max_p_kt",
                "p_base": "max_p_base",
                "data_date": "compute_date",
            },
            inplace=True,
        )
        sql_engine.insert_df(res_df, "area_max_time")
    temp_df["is_day_max"] = 1
    return temp_df


def save_proc_max_time(df):
    df = df.loc[df["is_day_max"] == 1]
    res_df = df.drop(["type_code", "type_code_sort", "is_day_max"], axis=1)
    if "p_org_no" in res_df:
        res_df = res_df.drop(["p_org_no", "p_org_name"], axis=1)
    res_df.rename(
        columns={
            "data_time": "max_time",
            "p_total": "max_p_total",
            "p_kt": "max_p_kt",
            "p_base": "max_p_base",
            "data_date": "compute_date",
        },
        inplace=True,
    )
    sql_engine.insert_df(res_df, "area_max_time")


def stats2(on7_df, powerload_area_idx):
    print("proc shi")
    result = (
        on7_df.groupby(
            [
                "p_org_no",
                "p_org_name",
                "type_code",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    result.rename(
        columns={"p_org_no": "org_no", "p_org_name": "org_name"}, inplace=True
    )
    result["p_org_no"] = "99"
    result["p_org_name"] = "河北省电力公司"

    group_field = ["type_code", "type_code_sort"]
    max_df = proc_max_time(result, group_field, is_write=True)
    print("===============shi", len(max_df))
    result = result.merge(
        max_df[
            [
                "org_no",
                "type_code",
                "type_code_sort",
                "data_time",
                "is_day_max",
            ]
        ],
        on=["org_no", "type_code", "type_code_sort", "data_time"],
        how="left",
    )
    result["is_day_max"].fillna(0, inplace=True)
    result = result.astype({"data_time": "datetime64[ns]"})
    write_to_es(result, powerload_area_idx)

    print("proc sheng")
    sheng_result = (
        result.groupby(
            [
                "p_org_no",
                "p_org_name",
                "type_code",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    sheng_result.rename(
        columns={"p_org_no": "org_no", "p_org_name": "org_name"}, inplace=True
    )
    sheng_result["p_org_no"] = ""
    sheng_result["p_org_name"] = ""

    group_field = ["type_code", "type_code_sort"]
    max_df = proc_max_time(sheng_result, group_field, is_write=True)
    print("===============sheng", len(max_df))
    sheng_result = sheng_result.merge(
        max_df[
            [
                "org_no",
                "type_code",
                "type_code_sort",
                "data_time",
                "is_day_max",
            ]
        ],
        on=["org_no", "type_code", "type_code_sort", "data_time"],
        how="left",
    )
    sheng_result["is_day_max"].fillna(0, inplace=True)
    write_to_es(sheng_result, powerload_area_idx)


def res_to_df(res, sum_field):
    if len(res) > 0:
        res_dict = [
            {
                "type_code": ll[0][0]["type_code"],
                "data_time": ll[0][1]["data_time"],
                sum_field: ll[1],
            }
            for ll in res
        ]
        res_df = pd.DataFrame(res_dict)
    else:
        res_df = pd.DataFrame({"data_time": [], sum_field: []})
    return res_df


def stats1(data_date, filter_df, cons_list, powerload_idx, powerload_area_idx):
    on7_type_dict = filter_df.groupby("on7")["type_code"].agg(list).to_dict()
    type_df = filter_df[["type_code", "type_code_sort"]].drop_duplicates()
    filter_df = filter_df[["on5", "shi", "on7", "xian"]].drop_duplicates()
    on7_df = pd.DataFrame()
    for on7, type_code_list in on7_type_dict.items():
        print("proc:", on7)
        # on7_type_df = pd.DataFrame()
        # for type_code in type_code_list:
        rules = [
            ("data_date", "query", "=", data_date),
            ("on7", "query", "=", on7),
            ("type_code", "same", ">", "0"),
            ("data_time", "same", ">", "0"),
        ]

        if cons_list:
            rules.append(("real_cons_no", "query", "in", cons_list))

        rules1 = rules + [("p_total", "stat", "sum", ">=", "0")]
        res1 = es.get_group_vector(
            rules1, powerload_idx, doc_time_field="data_time"
        )
        res_df = res_to_df(res1, "p_total")
        if res_df.empty:
            print(f"{on7}-{data_date}为空，跳过")
            continue

        rules2 = rules + [("p_kt", "stat", "sum", ">=", "0")]
        res2 = es.get_group_vector(
            rules2, powerload_idx, doc_time_field="data_time"
        )
        res_df2 = res_to_df(res2, "p_kt")

        rules3 = rules + [("p_base", "stat", "sum", ">=", "0")]
        res3 = es.get_group_vector(
            rules3, powerload_idx, doc_time_field="data_time"
        )
        res_df3 = res_to_df(res3, "p_base")

        res_df = pd.merge(
            res_df, res_df2, on=["data_time", "type_code"], how="left"
        )
        res_df = pd.merge(
            res_df, res_df3, on=["data_time", "type_code"], how="left"
        )

        res_df = res_df.astype({"data_time": "datetime64[ns]"})
        res_df["is_day_max"] = 0
        res_df = res_df.sort_values("p_total", ascending=False)
        res_df = res_df.reset_index(drop=True)
        res_df.loc[0, "is_day_max"] = 1
        res_df["on7"] = on7
        res_df["data_date"] = data_date
        # res_df['type_code'] = type_code
        res_df = res_df.merge(type_df, on="type_code", how="left")
        res_df = res_df.merge(filter_df, on="on7", how="left")
        res_df.rename(
            columns={
                "on7": "org_no",
                "xian": "org_name",
                "on5": "p_org_no",
                "shi": "p_org_name",
            },
            inplace=True,
        )

        # on7_type_df = pd.concat([on7_type_df, res_df])

        if res_df.empty:
            continue
        save_proc_max_time(res_df)
        write_to_es(res_df, powerload_area_idx)
        on7_df = pd.concat([on7_df, res_df[res_columns]])

    all_result = (
        on7_df.groupby(
            [
                "org_no",
                "org_name",
                "p_org_no",
                "p_org_name",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    all_result["type_code"] = "all"
    group_field = ["type_code_sort"]
    max_df = proc_max_time(all_result, group_field)
    all_result = all_result.merge(
        max_df[["org_no", "is_day_max", "data_time", "type_code_sort"]],
        on=["org_no", "data_time", "type_code_sort"],
        how="left",
    )
    all_result["is_day_max"].fillna(0, inplace=True)
    write_to_es(all_result, powerload_area_idx)
    on7_df = pd.concat([on7_df, all_result[res_columns]])

    # 再加一条type_code_sort=all
    sort_all_result = (
        all_result.groupby(
            [
                "org_no",
                "org_name",
                "p_org_no",
                "p_org_name",
                "data_time",
                "data_date",
                "type_code",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    sort_all_result["type_code_sort"] = "all"
    max_df = proc_max_time(sort_all_result, is_write=True)
    sort_all_result = sort_all_result.merge(
        max_df[["org_no", "is_day_max", "data_time"]],
        on=["org_no", "data_time"],
        how="left",
    )
    sort_all_result["is_day_max"].fillna(0, inplace=True)
    write_to_es(sort_all_result, powerload_area_idx)
    on7_df = pd.concat([on7_df, sort_all_result[res_columns]])
    return on7_df


def stats(filter_df, data_date, cons_list, powerload_idx, powerload_area_idx):
    print("start", data_date)
    t1 = time.time()
    on7_df = stats1(
        data_date, filter_df, cons_list, powerload_idx, powerload_area_idx
    )

    stats2(on7_df, powerload_area_idx)
    t2 = time.time()
    print("end", data_date, t2 - t1)


def main(
    filter_df,
    cons_list,
    process_num,
    powerload_idx,
    powerload_area_idx,
    start_date,
    end_date,
):
    date_list = pd.date_range(
        start=start_date, end=end_date, freq="1D"
    ).strftime("%Y-%m-%d")
    pro_pool = Pool(process_num)
    for data_date in date_list:
        powerload_idx = (
            powerload_idx
            if powerload_idx
            else "hbfh_powerload_idx-{}".format(
                data_date[0:7].replace("-", ".")
            )
        )
        powerload_area_idx = (
            powerload_area_idx
            if powerload_area_idx
            else "hbfh_powerload_area_idx-{}".format(
                data_date[0:7].replace("-", ".")
            )
        )

        # stats(filter_df, data_date, cons_list, powerload_idx, powerload_area_idx)
        # break
        result = pro_pool.apply_async(
            func=stats,
            args=(
                filter_df,
                data_date,
                cons_list,
                powerload_idx,
                powerload_area_idx,
            ),
        )
        try:
            result.get()
        except:
            print(traceback.format_exc())

    pro_pool.close()
    pro_pool.join()


def proc_area_type_map(c_cons_idx, cons_list):
    df = ep.to_pandas(c_cons_idx)
    sql_engine.insert_df(df, "c_cons")

    if cons_list:
        filter_df = df.loc[df["cons_no"].isin(cons_list)]
    else:
        filter_df = df
    filter_df = filter_df[
        ["on5", "shi", "on7", "xian", "type_code", "type_code_sort"]
    ].drop_duplicates()

    res_df = df[
        ["on7", "xian", "type_code", "type_code_sort"]
    ].drop_duplicates()
    res_df.rename(columns={"on7": "org_no", "xian": "org_name"}, inplace=True)

    df2 = df[["on5", "shi", "type_code", "type_code_sort"]].drop_duplicates()
    df2.rename(columns={"on5": "org_no", "shi": "org_name"}, inplace=True)
    res_df = pd.concat([res_df, df2])

    df3 = df[["type_code", "type_code_sort"]].drop_duplicates()
    df3["org_no"] = "99"
    df3["org_name"] = "河北省电力公司"
    res_df = pd.concat([res_df, df3])
    sql_engine.insert_df(res_df, "area_type_map")
    return filter_df


def get_filter_cons_list(filter_csv):
    if filter_csv:
        cons_df = pd.read_csv(filter_csv, header=0, dtype=str)
        cons_list = cons_df["cons_no"].tolist()
        return cons_list
    else:
        return []


if __name__ == "__main__":
    es_host = "zxtech:Zxod112_shining10@192.168.80.231:19200"
    process_num = 2
    filter_csv = None
    is_init_temp = False
    start_date = "2022-01-01"
    end_date = "2022-01-03"
    powerload_idx = "hbfh_powerload_idx-2022.01"
    powerload_area_idx = "hbfh_powerload_area_idx_7"
    c_cons_idx = "hbfh_c_cons_idx"

    opts, args = getopt.getopt(
        sys.argv[1:], "h:n:f:is:e:", ["pl=", "pla=", "cc="]
    )
    for opt, val in opts:
        if opt == "-h":
            es_host = val
        elif opt == "-n":
            process_num = int(val)
        elif opt == "-f":
            filter_csv = val
        elif opt == "-i":
            is_init_temp = True
        elif opt == "-s":
            start_date = val
        elif opt == "-e":
            end_date = val
        elif opt == "--pl":
            powerload_idx = val
        elif opt == "--pla":
            powerload_area_idx = val
        elif opt == "--cc":
            c_cons_idx = val

    a = time.time()
    es = EsBaseDal(es_host)
    ep = es_pandas(es_host)
    # if is_init_temp:
    #     init_es_tmpl(es_host)

    res_columns = [
        "org_no",
        "org_name",
        "p_org_no",
        "p_org_name",
        "type_code",
        "type_code_sort",
        "data_time",
        "data_date",
        "p_total",
        "p_kt",
        "p_base",
        "is_day_max",
    ]

    cons_list = get_filter_cons_list(filter_csv)
    filter_df = proc_area_type_map(c_cons_idx, cons_list)
    main(
        filter_df,
        cons_list,
        process_num,
        powerload_idx,
        powerload_area_idx,
        start_date,
        end_date,
    )
    b = time.time()
    print("end", b - a)
