import getopt
import os
import sys
import traceback
from multiprocessing import Pool

import pandas as pd
from es_pandas import es_pandas

from air_web.data_platform import mysql_con, sql_engine
from air_web.web_flask.tools.es_data_mapping import DataMap, init_es_tmpl
import ctypes;

def proc_cons_gb():
    org_path = os.path.join(path, "real_org_no.csv")
    org_df = pd.read_csv(org_path, header=0, dtype=str)

    cons_no_path = os.path.join(path, "cons_gb_list.csv")
    cons_list_df = pd.read_csv(cons_no_path, header=0, dtype=str)
    mp_list = cons_list_df["mp_id"].tolist()

    cons_path = os.path.join(path, "gb_11.csv")
    df = pd.read_csv(cons_path, header=0, dtype=str, index_col=0)
    filter_df = df.loc[df["EMP_ID"].isin(mp_list)]
    filter_df = filter_df[
        [
            "EMP_ID",
            "CCONS_CONS_NO",
            "CCONS_CONS_NAME",
            "ON5",
            "CITY",
            "ON7",
            "CM_T_FACTOR",
            "RCOLL_T_FACTOR",
        ]
    ]
    filter_df.rename(
        columns={
            "CITY": "SHI",
            "EMP_ID": "ID",
            "CCONS_CONS_NO": "CONS_NO",
            "CCONS_CONS_NAME": "CONS_NAME",
        },
        inplace=True,
    )
    filter_df["TYPE_CODE"] = "低压居民"
    filter_df["TYPE_CODE_SORT"] = 2
    filter_df["SHI"] = filter_df["SHI"].str.replace("供电公司", "市")
    filter_df = filter_df.merge(
        org_df[["ORG_NO", "ORG_NAME"]],
        left_on="ON7",
        right_on="ORG_NO",
        how="left",
    )
    filter_df.rename(columns={"ORG_NAME": "XIAN"}, inplace=True)
    filter_df.drop("ORG_NO", axis=1, inplace=True)

    filter_df.columns = map(str.lower, filter_df.columns)
    filter_df.to_csv(path + "cons_gb.csv", index=False)


def proc_cons_zb():
    org_path = os.path.join(path, "real_org_no.csv")
    org_df = pd.read_csv(org_path, header=0, dtype=str)

    cons_no_path = os.path.join(path, "cons_list.csv")
    cons_list_df = pd.read_csv(cons_no_path, header=0, dtype=str)
    cons_list = cons_list_df["cons_no"].tolist()

    cons_path = os.path.join(path, "专变用户关联结果.csv")
    df = pd.read_csv(cons_path, header=0, dtype=str, index_col=0)
    filter_df = df.loc[df["EMP_ID"].isin(cons_list)]
    filter_df = filter_df[
        [
            "EMP_ID",
            "CCONS_CONS_NO",
            "CCONS_CONS_NAME",
            "ON5",
            "CITY",
            "ON7",
            "负控标签分类",
            "CM_T_FACTOR",
            "RCOLL_T_FACTOR",
        ]
    ]
    filter_df.rename(
        columns={
            "CITY": "SHI",
            "负控标签分类": "TYPE_CODE",
            "EMP_ID": "ID",
            "CCONS_CONS_NO": "CONS_NO",
            "CCONS_CONS_NAME": "CONS_NAME",
        },
        inplace=True,
    )
    filter_df["TYPE_CODE_SORT"] = 1
    filter_df["SHI"] = filter_df["SHI"].str.replace("供电公司", "市")
    filter_df = filter_df.merge(
        org_df[["ORG_NO", "ORG_NAME"]],
        left_on="ON7",
        right_on="ORG_NO",
        how="left",
    )
    filter_df.rename(columns={"ORG_NAME": "XIAN"}, inplace=True)
    filter_df.drop("ORG_NO", axis=1, inplace=True)

    filter_df.columns = map(str.lower, filter_df.columns)
    filter_df.to_csv(path + "cons_zb.csv", index=False)


def do_bulk(
    actions,
    parallel=True,
    chunk_size=5000,
    thread=4,
    retry=5,
    raise_on_error=False,
    request_timeout=60,
):
    my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
    success = 0
    failed = 0
    try:
        if parallel:
            gen = my_lib.helpers.parallel_bulk(
                es,
                actions,
                thread_count=thread,
                chunk_size=chunk_size,
                raise_on_error=raise_on_error,
                request_timeout=request_timeout,
            )
        else:
            gen = my_lib.helpers.streaming_bulk(
                es,
                actions,
                max_retries=retry,
                raise_on_error=raise_on_error,
                chunk_size=chunk_size,
                request_timeout=request_timeout,
            )
        for res in gen:
            if res[0]:
                success += 1
            else:
                failed += 1
                print(res[1])
    except:
        print("Caught do_bulk exception")
        traceback.print_exc()
    print(f"{success}/{success + failed}")


def test_to_actions(df, index_name):
    for record in df.to_dict(orient="records"):
        action = {"_index": index_name, "_type": "_doc", "_source": record}
        yield action


def write_data(res_df, table_name):
    # sql_engine.insert_df(res_df, table_name)

    if table_name == "c_cons":
        index_name = f"hbfh_{table_name}_idx"
        ep.to_es(res_df, index_name, request_timeout=60)
    else:
        res_df["month"] = res_df["data_date"].dt.strftime("%Y.%m")
        for month, group_df in res_df.groupby("month"):
            index_name = f"hbfh_{table_name}_idx-{month}"
            group_df.drop("month", axis=1, inplace=True)
            ep.to_es(res_df, index_name, request_timeout=60)
            # do_bulk(test_to_actions(group_df, index_name))


def trans_type(df):
    dtype = {
        k: v for k, v in DataMap.MAP.items() if k in df.columns.values
    }  # df中存在的列
    df = df.astype(dtype, errors="ignore")
    return df


def proc_data(cons_df, df, field_list, table_name, date_field, is_gb):
    df.columns = map(str.lower, df.columns)
    df.rename(columns={"cons_no": "cons_merge"}, inplace=True)
    if is_gb:
        cons_df["cons_merge"] = cons_df["cons_no"]
    else:
        cons_df["cons_merge"] = cons_df["cons_no"].astype(int).astype(str)
    df = df.merge(cons_df, on="cons_merge")[field_list]
    df.rename(columns={date_field: "data_time"}, inplace=True)
    df = trans_type(df)
    if date_field == "datetime":
        df = proc_data_time(df)
    df["data_date"] = df["data_time"].dt.date.astype("datetime64")
    df["real_cons_no"] = df["cons_no"]
    df["cons_no"] = df["cons_no"] + "_" + df["mp_id"]
    df["real_cons_name"] = df["cons_name"]
    df["cons_name"] = df["cons_name"] + "_" + df["mp_id"]
    if "is_day_max" in df:
        df["is_day_max"] = df["is_day_max"].replace({"False": 0, "True": 1})
    if "kt_day" in df:
        df["kt_day"] = df["kt_day"].replace({"False": 0, "True": 1})

    if not is_gb:
        for col in ["p_total", "p_base", "p_kt", "p_std_left", "p_std_right"]:
            if col in df:
                df[col] = round(df[col] * df["t_factor"], 4)

    res_df = df.drop(["real_cons_name", "t_factor"], axis=1)
    write_data(res_df, table_name)
    return df


def proc_data_time(df_96):
    df_96["data_time"] = df_96["data_time"].dt.round("15min")
    df_96 = df_96.drop_duplicates(subset=["data_time", "cons_no"])
    return df_96


def proc_powerload(power_path, mp_id, cons_df, is_gb=False):
    if is_gb:
        csv_96_suff = ".pickle_data_out_96"
        csv_day_suff = ".pickle_data_out_day"
    else:
        csv_96_suff = "_data_out_96"
        csv_day_suff = "_data_out_day"
    df_day = pd.read_csv(f"{power_path}/{mp_id}{csv_day_suff}.csv", dtype=str)
    df_96 = pd.read_csv(f"{power_path}/{mp_id}{csv_96_suff}.csv", dtype=str)
    # time_list = pd.date_range(start='2022-01-01', end='2022-01-02', freq='15T').strftime('%M:%S')
    # df_96['time'] = df_96['datetime'].str[-5:]
    # df_96_new = df_96.loc[~df_96['time'].isin(time_list)]
    # if not df_96_new.empty:
    #     print(mp_id)

    filed_list = [
        "on5",
        "shi",
        "on7",
        "xian",
        "type_code",
        "type_code_sort",
        "cons_no",
        "cons_name",
        "mp_id",
        "datetime",
        "p_total",
        "p_base",
        "p_kt",
        "p_std_left",
        "p_std_right",
        "is_day_max",
        "t_factor",
    ]
    proc_data(cons_df, df_96, filed_list, "powerload", "datetime", is_gb)
    print(f"proc {mp_id} powerload end")

    filed_list = [
        "on5",
        "shi",
        "on7",
        "xian",
        "type_code",
        "type_code_sort",
        "cons_no",
        "cons_name",
        "mp_id",
        "date",
        "p_total",
        "p_base",
        "p_kt",
        "p_max",
        "kt_day",
        "t_factor",
    ]
    df = proc_data(cons_df, df_day, filed_list, "powerload_day", "date", is_gb)
    print(f"proc {mp_id} powerload_day end")

    res_cons_df = df[
        [
            "on5",
            "shi",
            "on7",
            "xian",
            "type_code",
            "type_code_sort",
            "cons_no",
            "cons_name",
            "real_cons_no",
            "real_cons_name",
            "mp_id",
            "t_factor",
        ]
    ].drop_duplicates()
    write_data(res_cons_df, "c_cons")


def main():
    conc_name = "cons_gb.csv" if is_gb else "cons_zb.csv"
    cons_path = os.path.join(path, conc_name)
    cons_df = pd.read_csv(cons_path, header=0, dtype=str)

    cons_df.rename(columns={"id": "mp_id", t_factor: "t_factor"}, inplace=True)
    cons_df["t_factor"] = cons_df["t_factor"].astype(float)
    if is_gb:
        cons_df["temp_id"] = cons_df["cons_no"] + "-" + cons_df["mp_id"]
        id_list = cons_df["temp_id"].tolist()
    else:
        id_list = cons_df["mp_id"].tolist()

    power_path = os.path.join(path, "96_data_new")
    pro_pool = Pool(5)
    for mp_id in id_list[0:10]:
        print(f"proc {mp_id}")
        # proc_powerload(power_path, mp_id, cons_df, is_gb)
        result = pro_pool.apply_async(
            func=proc_powerload, args=(power_path, mp_id, cons_df, is_gb)
        )

        try:
            result.get()
        except:
            print(traceback.format_exc())
    pro_pool.close()
    pro_pool.join()


if __name__ == "__main__":
    path = "/home/smxu/hebei/"
    is_gb = False
    t_factor = "cm_t_factor"
    es_host = "zxtech:Zxod112_shining10@192.168.80.231:19200"

    opts, args = getopt.getopt(
        sys.argv[1:],
        "p:gr",
    )
    for opt, val in opts:
        if opt == "-p":
            path = val
        elif opt == "-g":
            is_gb = True
        elif opt == "-r":
            t_factor = "rcoll_t_factor"

    # proc_cons_zb()
    # proc_cons_gb()

    if es_host:
        my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
        ep = es_pandas(es_host)
        es = my_lib.zElasticsearch(es_host)
        # init_es_tmpl(es)

    main()
    print("end")
