import getopt
import sys
import time
import traceback
from datetime import datetime, timedelta

import pandas as pd

from air_web.config.config import config
from air_web.data_platform import sql_engine
import ctypes;

def modify_alias(index_names, alias_name):
    # 使用 _cat/aliases API 获取所有别名
    aliases = es.cat.aliases(format="json")
    # 遍历所有别名，输出每个别名的信息
    alias_list = []
    for alias_dict in aliases:
        alias_list.append(alias_dict["alias"])

    # 删除所有别名
    if alias_name in alias_list:
        alias_body = {
            "actions": [
                {"remove": {"index": f"{alias_name}*", "alias": alias_name}}
            ]
        }
        es.indices.update_aliases(body=alias_body)
        print(f"remove alias:{alias_name}")

    # 添加别名
    alias_body = {
        "actions": [{"add": {"index": index_names, "alias": alias_name}}]
    }

    es.indices.update_aliases(body=alias_body)
    print(f"add index:{index_names}, alias:{alias_name}")


def do_bulk(
    actions,
    parallel=True,
    chunk_size=5000,
    thread=4,
    retry=5,
    raise_on_error=False,
    request_timeout=60,
):
    success = 0
    failed = 0
    my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
    try:
        if parallel:
            gen = my_lib.helpers.parallel_bulk(
                es,
                actions,
                thread_count=thread,
                chunk_size=chunk_size,
                raise_on_error=raise_on_error,
                request_timeout=request_timeout,
            )
        else:
            gen = my_lib.helpers.streaming_bulk(
                es,
                actions,
                max_retries=retry,
                raise_on_error=raise_on_error,
                chunk_size=chunk_size,
                request_timeout=request_timeout,
            )
        for res in gen:
            if res[0]:
                success += 1
            else:
                failed += 1
                print(res[1])
    except:
        print("Caught do_bulk exception")
        traceback.print_exc()
    print(f"import es:{success}/{success + failed}")


def test_to_actions(df, index_name):
    for record in df.to_dict(orient="records"):
        action = {
            "_id": f"{record['org_no']}_{record['type_id']}_{record['type_code_sort']}_{record['data_time']}",
            "_index": index_name,
            "_type": "_doc",
            "_source": record,
        }
        yield action


def write_to_es(df, index_name):
    # ep.to_es(df, index=index_name, thread_count=2, chunk_size=5000,
    #          use_index=False, request_timeout=60)
    do_bulk(test_to_actions(df, index_name))


def proc_max_time(df, total_max_df, group_field=[], is_write=False):
    group_field.extend(["org_no", "org_name", "data_date"])

    temp_df = df.sort_values("p_kt", ascending=False)
    temp_df.drop_duplicates(group_field, inplace=True)

    if is_write:
        res_df = temp_df.loc[
            (temp_df["type_id"] == "99") & (temp_df["type_code_sort"] == "99")
        ]
        res_df = res_df.drop(["type_id", "type_code_sort"], axis=1)
        if "p_org_no" in res_df:
            res_df = res_df.drop(["p_org_no", "p_org_name"], axis=1)
        res_df.rename(
            columns={
                "data_time": "max_time",
                "p_total": "max_p_total",
                "p_kt": "max_p_kt",
                "p_base": "max_p_base",
                "data_date": "compute_date",
            },
            inplace=True,
        )
        total_max_df = pd.concat([total_max_df, res_df])
        # sql_engine.update_df_by_id(res_df, 'area_max_time')

    temp_df["is_day_max"] = 1
    return temp_df, total_max_df


def stats2(total_df, total_max_df):
    print("proc shi")
    result = (
        total_df.groupby(
            [
                "p_org_no",
                "p_org_name",
                "type_id",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    result.rename(
        columns={"p_org_no": "org_no", "p_org_name": "org_name"}, inplace=True
    )
    result["p_org_no"] = "99"
    result["p_org_name"] = "河北省电力公司"

    group_field = ["type_id", "type_code_sort"]
    max_df, total_max_df = proc_max_time(
        result, total_max_df, group_field, is_write=True
    )
    print("===============shi", len(max_df))
    result = result.merge(
        max_df[
            ["org_no", "type_id", "type_code_sort", "data_time", "is_day_max"]
        ],
        on=["org_no", "type_id", "type_code_sort", "data_time"],
        how="left",
    )
    result["is_day_max"].fillna(0, inplace=True)

    print("proc sheng")
    sheng_result = (
        result.groupby(
            [
                "p_org_no",
                "p_org_name",
                "type_id",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    sheng_result.rename(
        columns={"p_org_no": "org_no", "p_org_name": "org_name"}, inplace=True
    )
    sheng_result["p_org_no"] = ""
    sheng_result["p_org_name"] = ""

    group_field = ["type_id", "type_code_sort"]
    max_df, total_max_df = proc_max_time(
        sheng_result, total_max_df, group_field, is_write=True
    )
    print("===============sheng", len(max_df))
    sheng_result = sheng_result.merge(
        max_df[
            ["org_no", "type_id", "type_code_sort", "data_time", "is_day_max"]
        ],
        on=["org_no", "type_id", "type_code_sort", "data_time"],
        how="left",
    )
    sheng_result["is_day_max"].fillna(0, inplace=True)

    total_df = pd.concat([total_df, result[res_columns]])
    total_df = pd.concat([total_df, sheng_result[res_columns]])

    return total_df, total_max_df


def proc_agg_data(agg_res):
    res_list = []
    for bucket in agg_res:
        key_list = bucket["key"].split("|")
        bucket_dict = {
            "type_id": int(key_list[0]),
            "data_time": key_list[1],
            "p_total": bucket["sum_p_total"]["value"],
            "p_kt": bucket["sum_p_kt"]["value"],
            "p_base": bucket["sum_p_base"]["value"],
        }
        res_list.append(bucket_dict)
    df = pd.DataFrame(res_list)
    return df


def stats1(data_date, end_date, powerload_idx, filter_df):
    total_df = pd.DataFrame()
    total_max_df = pd.DataFrame()

    on7_list = filter_df["on7"].drop_duplicates().tolist()
    type_df = filter_df[["type_id", "type_code_sort"]].drop_duplicates()
    filter_df = filter_df[["on5", "shi", "on7", "xian"]].drop_duplicates()

    columns = ["type_id", "data_time"]
    group_source = ""
    for idx, key in enumerate(columns):
        add_source = (
            f"+'|'+doc['{key}'].value" if idx else f"doc['{key}'].value"
        )
        group_source += add_source

    on7_df = pd.DataFrame()
    for on7 in on7_list:
        print("proc:", on7)
        if on7 in [None, "null"]:
            continue
        on7_str = str(on7)

        query = {
            "size": 0,
            "query": {
                "bool": {
                    "must": [
                        {
                            "range": {
                                "data_time": {"gte": data_date, "lt": end_date}
                            }
                        },
                        {"term": {"on7": on7_str}},
                        {"range": {"p_total": {"lt": 1000000}}},
                    ]
                }
            },
            "aggs": {
                "group_by": {
                    "terms": {
                        "size": 10000,
                        "script": {"source": group_source, "lang": "painless"},
                    },
                    "aggs": {
                        "sum_p_total": {"sum": {"field": "p_total"}},
                        "sum_p_base": {"sum": {"field": "p_base"}},
                        "sum_p_kt": {"sum": {"field": "p_kt"}},
                    },
                }
            },
        }

        index_name = powerload_idx.replace("on5", on7_str[0:5]).replace(
            "on7", on7_str[0:7]
        )
        result = es.search(index=index_name, body=query, timeout="60s")
        agg_res = result["aggregations"]["group_by"]["buckets"]
        if len(agg_res) == 0:
            print(f"{data_date}_{on7}县区为空，跳过")
            continue
        no_all_df = proc_agg_data(agg_res)

        no_all_df = no_all_df.astype({"data_time": "datetime64[ns]"})
        no_all_df["on7"] = on7
        no_all_df["data_date"] = data_date
        no_all_df["is_day_max"] = 0
        no_all_df = no_all_df.sort_values("p_total", ascending=False)
        no_all_df = no_all_df.reset_index(drop=True)
        no_all_df.loc[0, "is_day_max"] = 1

        no_all_df = no_all_df.merge(type_df, on="type_id", how="left")
        no_all_df = no_all_df.merge(filter_df, on="on7", how="left")
        no_all_df.rename(
            columns={
                "on7": "org_no",
                "xian": "org_name",
                "on5": "p_org_no",
                "shi": "p_org_name",
            },
            inplace=True,
        )

        on7_df = pd.concat([on7_df, no_all_df[res_columns]])
    if on7_df.empty:
        return pd.DataFrame(), pd.DataFrame()

    # 加一条type_code=all
    all_result = (
        on7_df.groupby(
            [
                "org_no",
                "org_name",
                "p_org_no",
                "p_org_name",
                "type_code_sort",
                "data_time",
                "data_date",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    all_result["type_id"] = "99"
    group_field = ["type_code_sort"]
    max_df, total_max_df = proc_max_time(all_result, total_max_df, group_field)
    all_result = all_result.merge(
        max_df[["org_no", "is_day_max", "data_time", "type_code_sort"]],
        on=["org_no", "data_time", "type_code_sort"],
        how="left",
    )
    all_result["is_day_max"].fillna(0, inplace=True)

    # 再加一条type_code_sort=all
    sort_all_result = (
        all_result.groupby(
            [
                "org_no",
                "org_name",
                "p_org_no",
                "p_org_name",
                "data_time",
                "data_date",
                "type_id",
            ]
        )
        .agg({"p_total": "sum", "p_kt": "sum", "p_base": "sum"})
        .reset_index()
    )
    sort_all_result["type_code_sort"] = "99"
    max_df, total_max_df = proc_max_time(
        sort_all_result, total_max_df, is_write=True
    )
    sort_all_result = sort_all_result.merge(
        max_df[["org_no", "is_day_max", "data_time"]],
        on=["org_no", "data_time"],
        how="left",
    )
    sort_all_result["is_day_max"].fillna(0, inplace=True)

    total_df = pd.concat([total_df, on7_df[res_columns]])
    total_df = pd.concat([total_df, all_result[res_columns]])
    total_df = pd.concat([total_df, sort_all_result[res_columns]])

    return total_df, total_max_df


def stats(start_date, end_date, powerload_idx, powerload_area_idx, filter_df):
    print("start", start_date)
    t1 = time.time()
    total_df, total_max_df = stats1(
        start_date, end_date, powerload_idx, filter_df
    )
    if total_df.empty:
        return

    total_df, total_max_df = stats2(total_df, total_max_df)

    write_to_es(total_df, powerload_area_idx)
    sql_engine.update_df_by_id(total_max_df, "area_max_time")
    print(f"import mysql:{len(total_max_df)}")

    t2 = time.time()
    print("end", start_date, t2 - t1)


def main(start_date, end_date):
    sql = "select distinct on5, shi, on7, xian, type_id, type_code_sort from c_cons"
    filter_df = sql_engine.query(sql)

    powerload_idx = config["POWERLOAD"]
    powerload_area_idx = config["POWERLOAD_AREA"]
    stats(start_date, end_date, powerload_idx, powerload_area_idx, filter_df)
    # modify_alias(powerload_area_idx, ori_powerload_area_idx)


def job():
    now = datetime.now().strftime("%Y.%m.%d %H.%M.%S")
    start_date = (datetime.now() - timedelta(hours=3)).strftime("%Y-%m-%d")
    end_date = (datetime.now() + timedelta(days=1)).strftime("%Y-%m-%d")
    # start_date = '2022-01-01'
    # end_date = '2022-01-02'
    print("开始计算", now, start_date, end_date)
    date_list = pd.date_range(
        start=start_date, end=end_date, freq="1D"
    ).strftime("%Y-%m-%d")
    for i in range(len(date_list) - 1):
        start = date_list[i]
        end = date_list[i + 1]
        main(start, end)


if __name__ == "__main__":
    my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
    es = my_lib.zElasticsearch(config["ES_HOST"])

    res_columns = [
        "org_no",
        "org_name",
        "p_org_no",
        "p_org_name",
        "type_id",
        "type_code_sort",
        "data_time",
        "data_date",
        "p_total",
        "p_kt",
        "p_base",
        "is_day_max",
    ]
    job()
