import getopt
import sys
import time
import traceback
from multiprocessing import Pool

import pandas as pd
from es_pandas import es_pandas

from air_web.data_platform import sql_engine
import ctypes;

def do_bulk(
    actions,
    parallel=False,
    chunk_size=5000,
    thread=4,
    retry=5,
    raise_on_error=False,
    request_timeout=60,
):
    success = 0
    failed = 0
    my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
    try:
        if parallel:
            gen = my_lib.helpers.parallel_bulk(
                es,
                actions,
                thread_count=thread,
                chunk_size=chunk_size,
                raise_on_error=raise_on_error,
                request_timeout=request_timeout,
            )
        else:
            gen = my_lib.helpers.streaming_bulk(
                es,
                actions,
                max_retries=retry,
                raise_on_error=raise_on_error,
                chunk_size=chunk_size,
                request_timeout=request_timeout,
            )
        for res in gen:
            if res[0]:
                success += 1
            else:
                failed += 1
                print(res[1])
    except:
        print("Caught do_bulk exception")
        traceback.print_exc()
    print(f"import es:{success}/{success + failed}")


def test_to_actions(df, index_name):
    for record in df.to_dict(orient="records"):
        action = {"_index": index_name, "_type": "_doc", "_source": record}
        yield action


def write_to_es(df, index_name):
    # ep.to_es(df, index=index_name, thread_count=2, chunk_size=5000,
    #          use_index=False, request_timeout=60)
    do_bulk(test_to_actions(df, index_name))


def proc_max_time(df, total_max_df, group_field=[], is_write=False):
    group_field.extend(["org_no", "org_name", "data_date"])

    temp_df = df.sort_values("p_total", ascending=False)
    temp_df.drop_duplicates(group_field, inplace=True)

    if is_write:
        res_df = temp_df.loc[
            (temp_df["type_code"] == "all")
            & (temp_df["type_code_sort"] == "all")
        ]
        res_df = res_df.drop(["type_code", "type_code_sort"], axis=1)
        if "p_org_no" in res_df:
            res_df = res_df.drop(["p_org_no", "p_org_name"], axis=1)
        res_df.rename(
            columns={
                "data_time": "max_time",
                "p_total": "max_p_total",
                "p_kt": "max_p_kt",
                "p_base": "max_p_base",
                "data_date": "compute_date",
            },
            inplace=True,
        )
        total_max_df = pd.concat([total_max_df, res_df])
        # sql_engine.insert_df(res_df, 'area_max_time')

    temp_df["is_day_max"] = 1
    return temp_df, total_max_df


def stats2(total_df, total_max_df):
    print("proc shi")
    result = (
        total_df.groupby(
            [
                "p_org_no",
                "p_org_name",
                "type_code",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    result.rename(
        columns={"p_org_no": "org_no", "p_org_name": "org_name"}, inplace=True
    )
    result["p_org_no"] = "99"
    result["p_org_name"] = "河北省电力公司"

    group_field = ["type_code", "type_code_sort"]
    max_df, total_max_df = proc_max_time(
        result, total_max_df, group_field, is_write=True
    )
    print("===============shi", len(max_df))
    result = result.merge(
        max_df[
            [
                "org_no",
                "type_code",
                "type_code_sort",
                "data_time",
                "is_day_max",
            ]
        ],
        on=["org_no", "type_code", "type_code_sort", "data_time"],
        how="left",
    )
    result["is_day_max"].fillna(0, inplace=True)

    print("proc sheng")
    sheng_result = (
        result.groupby(
            [
                "p_org_no",
                "p_org_name",
                "type_code",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    sheng_result.rename(
        columns={"p_org_no": "org_no", "p_org_name": "org_name"}, inplace=True
    )
    sheng_result["p_org_no"] = ""
    sheng_result["p_org_name"] = ""

    group_field = ["type_code", "type_code_sort"]
    max_df, total_max_df = proc_max_time(
        sheng_result, total_max_df, group_field, is_write=True
    )
    print("===============sheng", len(max_df))
    sheng_result = sheng_result.merge(
        max_df[
            [
                "org_no",
                "type_code",
                "type_code_sort",
                "data_time",
                "is_day_max",
            ]
        ],
        on=["org_no", "type_code", "type_code_sort", "data_time"],
        how="left",
    )
    sheng_result["is_day_max"].fillna(0, inplace=True)

    total_df = pd.concat([total_df, result[res_columns]])
    total_df = pd.concat([total_df, sheng_result[res_columns]])

    return total_df, total_max_df


def proc_agg_data(agg_res):
    res_list = []
    for bucket in agg_res:
        bucket_dict = {
            "data_time": bucket["key_as_string"],
            "p_total": bucket["sum_p_total"]["value"],
            "p_kt": bucket["sum_p_kt"]["value"],
            "p_base": bucket["sum_p_base"]["value"],
        }
        res_list.append(bucket_dict)
    df = pd.DataFrame(res_list)
    return df


def stats1(data_date, powerload_idx, filter_df):
    total_df = pd.DataFrame()
    total_max_df = pd.DataFrame()

    on7_type_dict = filter_df.groupby("on7")["type_code"].agg(list).to_dict()
    type_df = filter_df[["type_code", "type_code_sort"]].drop_duplicates()
    type_sort_dict = type_df.set_index("type_code")["type_code_sort"].to_dict()
    filter_df = filter_df[["on5", "shi", "on7", "xian"]].drop_duplicates()

    on7_df = pd.DataFrame()
    for on7, type_code_list in on7_type_dict.items():
        print("proc:", on7)
        # on7_type_df = pd.DataFrame()
        for type_code in type_code_list:
            query = {
                "size": 0,
                "query": {
                    "bool": {
                        "must": [
                            {"term": {"data_date": data_date}},
                            {"term": {"on7": on7}},
                            {"term": {"type_code": type_code}},
                            {
                                "term": {
                                    "type_code_sort": type_sort_dict[type_code]
                                }
                            },
                        ]
                    }
                },
                "aggs": {
                    "group_by": {
                        "terms": {
                            "size": 10000,
                            "field": "data_time",
                        },
                        "aggs": {
                            "sum_p_total": {"sum": {"field": "p_total"}},
                            "sum_p_base": {"sum": {"field": "p_base"}},
                            "sum_p_kt": {"sum": {"field": "p_kt"}},
                        },
                    }
                },
            }
            result = es.search(index=powerload_idx, body=query)
            agg_res = result["aggregations"]["group_by"]["buckets"]
            if len(agg_res) == 0:
                print(f"{on7}-{data_date}_{type_code}为空，跳过")
                continue
            no_all_df = proc_agg_data(agg_res)

            no_all_df = no_all_df.astype({"data_time": "datetime64[ns]"})
            no_all_df["on7"] = on7
            no_all_df["data_date"] = data_date
            no_all_df["type_code"] = type_code
            no_all_df["type_code_sort"] = type_sort_dict[type_code]
            no_all_df["is_day_max"] = 0
            no_all_df = no_all_df.sort_values("p_total", ascending=False)
            no_all_df = no_all_df.reset_index(drop=True)
            no_all_df.loc[0, "is_day_max"] = 1

            # no_all_df = no_all_df.merge(type_df, on='type_code', how='left')
            no_all_df = no_all_df.merge(filter_df, on="on7", how="left")
            no_all_df.rename(
                columns={
                    "on7": "org_no",
                    "xian": "org_name",
                    "on5": "p_org_no",
                    "shi": "p_org_name",
                },
                inplace=True,
            )

            # on7_type_df = pd.concat([on7_type_df, no_all_df])
            on7_df = pd.concat([on7_df, no_all_df[res_columns]])

    if on7_df.empty:
        return pd.DataFrame(), pd.DataFrame()

    # 加一条type_code=all
    all_result = (
        on7_df.groupby(
            [
                "org_no",
                "org_name",
                "p_org_no",
                "p_org_name",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    all_result["type_code"] = "all"
    group_field = ["type_code_sort"]
    max_df, total_max_df = proc_max_time(all_result, total_max_df, group_field)
    all_result = all_result.merge(
        max_df[["org_no", "is_day_max", "data_time", "type_code_sort"]],
        on=["org_no", "data_time", "type_code_sort"],
        how="left",
    )
    all_result["is_day_max"].fillna(0, inplace=True)

    # 再加一条type_code_sort=all
    sort_all_result = (
        all_result.groupby(
            [
                "org_no",
                "org_name",
                "p_org_no",
                "p_org_name",
                "data_time",
                "data_date",
                "type_code",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    sort_all_result["type_code_sort"] = "all"
    max_df, total_max_df = proc_max_time(
        sort_all_result, total_max_df, is_write=True
    )
    sort_all_result = sort_all_result.merge(
        max_df[["org_no", "is_day_max", "data_time"]],
        on=["org_no", "data_time"],
        how="left",
    )
    sort_all_result["is_day_max"].fillna(0, inplace=True)

    total_df = pd.concat([total_df, on7_df[res_columns]])
    total_df = pd.concat([total_df, all_result[res_columns]])
    total_df = pd.concat([total_df, sort_all_result[res_columns]])

    return total_df, total_max_df


def stats(data_date, powerload_idx, powerload_area_idx, filter_df):
    print("start", data_date)
    t1 = time.time()
    total_df, total_max_df = stats1(data_date, powerload_idx, filter_df)
    if total_df.empty:
        return

    total_df, total_max_df = stats2(total_df, total_max_df)

    write_to_es(total_df, powerload_area_idx)
    sql_engine.insert_df(total_max_df, "area_max_time")
    print(f"import mysql:{len(total_max_df)}")

    t2 = time.time()
    print("end", data_date, t2 - t1)


def main(
    process_num,
    powerload_idx,
    powerload_area_idx,
    start_date,
    end_date,
    filter_df,
):
    date_list = pd.date_range(
        start=start_date, end=end_date, freq="1D"
    ).strftime("%Y-%m-%d")
    pro_pool = Pool(process_num)
    for data_date in date_list:
        powerload_idx = (
            powerload_idx
            if powerload_idx
            else "hbfh_powerload_idx-{}".format(
                data_date[0:7].replace("-", ".")
            )
        )
        powerload_area_idx = (
            powerload_area_idx
            if powerload_area_idx
            else "hbfh_powerload_area_idx-{}".format(
                data_date[0:7].replace("-", ".")
            )
        )

        # stats(data_date, powerload_idx, powerload_area_idx, filter_df)
        # break
        result = pro_pool.apply_async(
            func=stats,
            args=(data_date, powerload_idx, powerload_area_idx, filter_df),
        )
        try:
            result.get()
        except:
            print(traceback.format_exc())

    pro_pool.close()
    pro_pool.join()


def proc_area_type_map(c_cons_idx, no_mysql):
    df = ep.to_pandas(c_cons_idx)
    filter_df = df[
        ["on5", "shi", "on7", "xian", "type_code", "type_code_sort"]
    ].drop_duplicates()

    if not no_mysql:
        sql_engine.insert_df(df, "c_cons")

        res_df = df[
            ["on7", "xian", "type_code", "type_code_sort"]
        ].drop_duplicates()
        res_df.rename(
            columns={"on7": "org_no", "xian": "org_name"}, inplace=True
        )

        df2 = df[
            ["on5", "shi", "type_code", "type_code_sort"]
        ].drop_duplicates()
        df2.rename(columns={"on5": "org_no", "shi": "org_name"}, inplace=True)
        res_df = pd.concat([res_df, df2])

        df3 = df[["type_code", "type_code_sort"]].drop_duplicates()
        df3["org_no"] = "99"
        df3["org_name"] = "河北省电力公司"
        res_df = pd.concat([res_df, df3])
        sql_engine.insert_df(res_df, "area_type_map")
    return filter_df


def get_filter_cons_list(filter_csv):
    if filter_csv:
        cons_df = pd.read_csv(filter_csv, header=0, dtype=str)
        cons_list = cons_df["cons_no"].tolist()
        return cons_list
    else:
        return []


if __name__ == "__main__":
    es_host = "zxtech:Zxod112_shining10@192.168.80.231:19200"
    process_num = 2
    filter_csv = None
    is_init_temp = False
    start_date = "2022-01-01"
    end_date = "2023-01-31"
    powerload_idx = "hbfh_powerload_idx-2022.01"
    powerload_area_idx = "hbfh_powerload_area_idx"
    c_cons_idx = "hbfh_c_cons_idx"
    no_mysql = False

    opts, args = getopt.getopt(
        sys.argv[1:], "h:n:f:is:e:", ["pl=", "pla=", "cc=", "no_mysql"]
    )
    for opt, val in opts:
        if opt == "-h":
            es_host = val
        elif opt == "-n":
            process_num = int(val)
        elif opt == "-f":
            filter_csv = val
        elif opt == "-i":
            is_init_temp = True
        elif opt == "-s":
            start_date = val
        elif opt == "-e":
            end_date = val
        elif opt == "--pl":
            powerload_idx = val
        elif opt == "--pla":
            powerload_area_idx = val
        elif opt == "--cc":
            c_cons_idx = val
        elif opt == "--no_mysql":
            no_mysql = True
    my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
    a = time.time()
    es = my_lib.zElasticsearch(es_host)
    ep = es_pandas(es_host)
    # if is_init_temp:
    #     init_es_tmpl(es_host)

    res_columns = [
        "org_no",
        "org_name",
        "p_org_no",
        "p_org_name",
        "type_code",
        "type_code_sort",
        "data_time",
        "data_date",
        "p_total",
        "p_kt",
        "p_base",
        "is_day_max",
    ]

    cons_list = get_filter_cons_list(filter_csv)
    filter_df = proc_area_type_map(c_cons_idx, no_mysql)
    main(
        process_num,
        powerload_idx,
        powerload_area_idx,
        start_date,
        end_date,
        filter_df,
    )
    b = time.time()
    print("end", b - a)
