import getopt
import os
import sys
import time
import traceback
from datetime import date, datetime, timedelta
from multiprocessing import Pool

import numpy as np
import pandas as pd
from es_pandas import es_pandas

import ruleng.es
from air_web.config.config import config
from air_web.data_platform import sql_engine
from air_web.dw.data_mapping import ConsType
from air_web.dw.logger import init_log
from air_web.scripts.check_mysql_data_tool import (
    check_predict_data,
    check_virtual_data,
)
from air_web.web_flask.dal.base_dal import EsBaseDal
import ctypes;

ConnectionTimeout = ruleng.es.es_mod.exceptions.ConnectionTimeout


if not os.path.exists("/home/zshield/logs/dw/"):
    os.mkdir("/home/zshield/logs/dw/")
log = init_log("/home/zshield/logs/dw/")


def get_region_df(max_level=2):
    region_sql = f"select Distinct org_no, org_name, ad_org_name, p_org_no from real_org_no where org_level <= {max_level}"
    return sql_engine.query(region_sql)


def get_type_df():
    type_sql = f"""select DISTINCT type_id, p_type_id,type_level,type_code from type_map"""
    return sql_engine.query(type_sql)


def get_agg_days(index):
    agg_query = {
        "aggs": {
            "date_list": {
                "date_histogram": {
                    "field": "data_time",
                    "calendar_interval": "1d",
                }
            }
        },
        "size": 0,
    }

    result = es.search(index=index, body=agg_query, request_timeout=60)
    date_list = [
        bucket["key_as_string"]
        for bucket in result["aggregations"]["date_list"]["buckets"]
    ]
    date_format = "%Y-%m-%d %H:%M:%S.%f"
    date_objects = [
        datetime.strptime(date_str, date_format) for date_str in date_list
    ]
    date_strs = [date_obj.strftime("%Y-%m-%d") for date_obj in date_objects]
    return date_strs


def get_cons_df():
    query_rule = {"query": {"terms": {"cons_type": [ConsType.ORDINARY_USER, ConsType.LINE_USER]}}}
    df = ep.to_pandas(config["C_CONS_IDX"], query_rule=query_rule)
    res_df = df[
        [
            "on5",
            "shi",
            "on7",
            "xian",
            "type_id",
            "type_code",
            "type_code_sort",
            "cons_no",
            "cons_name",
            "cons_type",
            "pare_type_id",
            "pare_type_code",
            "org_no",
            "org_name",
        ]
    ].drop_duplicates()
    sql_engine.update_df_by_id(res_df, config["C_CONS"])


def get_virtual_15min(v_power96_df, start_time, end_time):
    """
    虚拟用户的orgno_typeid_15min生成及写入（同一个区县多行业的一天）
    """
    on5 = int(v_power96_df["on5"][0])
    on7 = int(v_power96_df["on7"][0])

    save_table = "v_orgno_typeid_15min"
    v_power96_df = v_power96_df[
        ["type_id", "data_time", "p_total", "p_kt", "p_base"]
    ]
    v_power96_df = v_power96_df.drop_duplicates(
        subset=["type_id", "data_time"]
    )
    org_no = on7  # on7
    if on5 == 99999:  # 全省
        org_no = get_province_no()
    else:
        if on7 == 9999999:  # on5
            org_no = on5
    v_power96_df["org_no"] = org_no
    v_15min_df = pd.merge(v_power96_df, type_df, how="left", on="type_id")
    # v_15min_df = v_15min_df[v_15min_df["type_id"] != "99999"] # 去除全行业
    # 聚合公专变，以及关联type_map
    v_15min_df["type_level"] = v_15min_df["type_level"].fillna(-1)
    v_15min_df["type_level"] = v_15min_df["type_level"].astype(int)
    level1_df = v_15min_df[v_15min_df["type_level"] == 1]
    level1_df = (
        level1_df.groupby(["org_no", "p_type_id", "data_time"])
        .agg({"p_total": sum, "p_kt": sum, "p_base": sum})
        .reset_index()
    )
    level1_df = level1_df.rename(columns={"p_type_id": "type_id"})
    level1_df = pd.merge(level1_df, type_df, how="left", on="type_id")
    v_15min_df = pd.concat([v_15min_df, level1_df], ignore_index=True)
    v_15min_df = v_15min_df.drop("type_level", axis=1)
    # 关联real_org_no
    v_15min_df = pd.merge(
        v_15min_df,
        region_df[["org_no", "org_name", "ad_org_name", "p_org_no"]],
        how="left",
        on="org_no",
    )
    (
        v_15min_df["p_total_count"],
        v_15min_df["p_kt_count"],
        v_15min_df["p_base_count"],
    ) = (0, 0, 0)
    v_15min_df["time_display_name"] = v_15min_df["data_time"].dt.strftime(
        "%Y-%m-%d"
    )

    def get_group_max(group):
        sorted_group = group.sort_values("p_total_sum", ascending=False)
        return sorted_group.iloc[0]["data_time"]

    v_15min_df = v_15min_df.rename(
        columns={
            "p_kt": "p_kt_sum",
            "p_total": "p_total_sum",
            "p_base": "p_base_sum",
        }
    )
    day_max_df = (
        v_15min_df.groupby(["org_no", "type_id", "time_display_name"])
        .apply(get_group_max)
        .reset_index(name="data_time")
    )
    day_max_df["is_day_max"] = 1
    v_15min_df = v_15min_df.merge(
        day_max_df[["org_no", "type_id", "data_time", "is_day_max"]],
        on=["org_no", "type_id", "data_time"],
        how="left",
    )
    v_15min_df["is_day_max"].fillna(0, inplace=True)
    if v_15min_df.empty:
        print(
            f"org_no {org_no},s-e:{start_time,end_time} v_orgno_typeid_15min 写入为空"
        )
    else:
        v_15min_df = v_15min_df.replace({np.nan: None})
        sql_engine.update_df_by_id(v_15min_df, save_table)
        print(
            f"org_no {org_no}, s-e:{start_time,end_time} v_orgno_typeid_15min 写入条数 {v_15min_df.shape[0]}， date:{day_max_df['time_display_name'].drop_duplicates().to_list()}"
        )
        # 调用虚拟用户的每日聚合
        get_max_time(v_15min_df, org_no, start_time, end_time)


def get_max_time(v_15min_df, org_no, start_time, end_time):
    """
    虚拟用户的area_max_time生成及写入
    """
    # Todo 不是按照全行业取得最大值
    save_table = "v_area_max_time"
    all_type_df = v_15min_df[v_15min_df["is_day_max"] == 1]
    all_type_df = all_type_df[all_type_df["type_id"].isin([1, 2])]
    res_df = (
        all_type_df.groupby(["org_no", "org_name", "data_time"])
        .agg({"p_total_sum": sum, "p_base_sum": sum, "p_kt_sum": sum})
        .reset_index()
    )
    res_df = res_df.rename(
        columns={
            "data_time": "max_time",
            "p_total_sum": "max_p_total",
            "p_base_sum": "max_p_base",
            "p_kt_sum": "max_p_kt",
        }
    )
    res_df["compute_date"] = pd.to_datetime(res_df["max_time"]).dt.date

    if res_df.empty:
        print(
            f"org_no {org_no},s-e:{start_time,end_time}, v_area_max_time 写入为空"
        )
    else:
        res_df = res_df.replace({np.nan: None})
        sql_engine.update_df_by_id(res_df, save_table)
        print(
            f"org_no {org_no},s-e:{start_time,end_time}, v_area_max_time 写入条数 {res_df.shape[0]}"
        )


def get_all_index(input_index):
    idxs = es.cat.indices(index=input_index, format="json")
    names = [x["index"] for x in idxs]
    print(f"es:{config['ES_HOST']},获取到相关索引数量:{len(names)},退出")
    return names


def get_agg_days(index):
    agg_query = {
        "aggs": {
            "date_list": {
                "date_histogram": {
                    "field": "data_time",
                    "calendar_interval": "1d",
                }
            }
        },
        "size": 0,
    }

    result = es.search(index=index, body=agg_query, request_timeout=60)
    date_list = [
        bucket["key_as_string"]
        for bucket in result["aggregations"]["date_list"]["buckets"]
    ]
    date_format = "%Y-%m-%d %H:%M:%S.%f"
    date_objects = [
        datetime.strptime(date_str, date_format) for date_str in date_list
    ]
    date_strs = [date_obj.strftime("%Y-%m-%d") for date_obj in date_objects]
    return date_strs


def get_res_power2(search_index, save_tables, start_day, end_day, key_list):
    # 单个索引   on7（on5_on7)/on5(on5_9999999)/全省（9999_9999999)
    sql_engine.engine.dispose()
    rules = [
        ("data_time", "query", ">=", start_day),
        ("data_time", "query", "<=", end_day),
    ]
    es_dal = EsBaseDal(config["ES_HOST"])  # 进程内创建es对象
    count = es_dal.ruleng_query_count(
        rules, search_index, doc_time_field="data_time"
    )
    print(
        f"index_name:{search_index}, count:{count} start_day:{start_day} end_day:{end_day}"
    )
    t1 = time.time()
    result_df = pd.DataFrame()
    if count > 10000:
        per = 10000
        cur = count // per + 1
        for i in range(cur):  # 1000*10
            res_df = pd.DataFrame()
            for j in range(10):
                try:
                    res_df = es_dal.query_dataframe_scroll(
                        rules,
                        search_index,
                        scroll_size=per,
                        doc_time_field="data_time",
                        start_index=i * per,
                    )
                    break
                except ConnectionTimeout:
                    # time.sleep(5)
                    print(
                        "step:{} search失败次数:{},index_name:{},star_day:{} end_day:{}".format(
                            i + 1, j + 1, search_index, start_day, end_day
                        )
                    )
            result_df = pd.concat([result_df, res_df])
    else:
        for j in range(10):
            try:
                result_df = es_dal.query_dataframe(
                    rules, search_index, doc_time_field="data_time"
                )
                break
            except ConnectionTimeout:
                # time.sleep(5)
                print(
                    "search失败次数:{},index_name:{},star_day:{} end_day:{}".format(
                        j + 1, search_index, start_day, end_day
                    )
                )
    t2 = time.time()
    print(
        f"index_name:{search_index} start_day:{start_day} end_day:{end_day} 查询es耗时 {t2-t1} s"
    )
    if not result_df.empty:
        # 同步计算结果至mysql
        t3 = time.time()
        result_df = result_df[key_list]
        result_df = result_df.drop_duplicates(subset=["data_time", "cons_no"])
        result_df = result_df.reset_index(drop=True)
        true_flag = False
        re_try = 3  # 重试次数
        while re_try > 0:
            true_flag = sql_engine.update_df_by_id_with_session(
                result_df, save_tables
            )
            if true_flag:  # 成功退出
                break
            time.sleep(1)
            re_try -= 1
            print(
                f"更新重试次数还剩{re_try}次，{search_index}, 开始日期 {start_day} 结束日期 {end_day} "
            )
        # 转换为15min结果，并进一步转换为max_time结果
        t4 = time.time()
        print(
            f"剩余重试次数{re_try} {search_index}, 开始日期 {start_day} 结束日期 {end_day} "
            f"执行mysql更新插入{'成功' if true_flag else '失败'}，数据条数{result_df.shape[0]}, 耗时：{t4-t3} "
        )
        if gen_15min_bool:
            get_virtual_15min(result_df, start_day, end_day)
    else:
        print("get_power empty")

    return search_index, start_day, end_day


def get_province_no():
    sql = "select org_no from real_org_no where org_level=0"
    df = sql_engine.query(sql)
    province_no = df["org_no"].tolist()[0]
    return province_no


def main(
    process_num,
    input_index,
    out_table,
    start_date,
    end_date,
    key_list,
    ck_mode,
    is_predict=False,
):
    print(f"执行多索引导入mysql， 索引前缀：{input_index}，写入表：{out_table}")
    index_names = get_all_index(input_index)  # 获取能匹配到的所有索引
    sort_index_names = sorted(
        index_names, key=lambda x: not str(x).endswith("9999999")
    )
    pro_pool = Pool(process_num)
    data_list = pd.date_range(
        start=start_date, end=end_date, freq="1D"
    ).strftime("%Y-%m-%d")
    data_list = sorted(data_list, reverse=True)
    results = []
    for search_index in sort_index_names:  # 遍历索引+按时间段划分子任务
        for i in range(0, len(data_list), 10):
            # 以on7/on5/全省的10天为最小单位处理
            end_day = data_list[i]
            start_day = data_list[min(len(data_list) - 1, i + 9)]
            # 设置 -o 参数时 非on5的跳过
            if only_on5 and not str(search_index).endswith("9999999"):
                continue
            results.append(
                pro_pool.apply_async(
                    func=get_res_power2,
                    args=(
                        search_index,
                        out_table,
                        start_day,
                        end_day,
                        key_list,
                    ),
                )
            )
    pro_pool.close()
    pro_pool.join()
    try:
        for res in results:
            index_acrl, start_str, end_str = res.get()
            if ck_mode and str(index_acrl).endswith("9999999"):  # on5级别的输出检查结果
                org_no = str(index_acrl).split("-")[-2]
                try:
                    if is_predict:
                        check_predict_data(es_dal, org_no, start_str, end_str)
                    else:
                        if org_no == "99999":  # 全省
                            org_no = get_province_no()
                        check_virtual_data(ep, org_no, start_str, end_str)
                except:
                    print(f"{index_acrl} {start_date} {end_date} 核查失败")
                    print(traceback.format_exc())
        print(len(results))
    except:
        print(traceback.format_exc())
    # pro_pool.close()
    # pro_pool.join()


if __name__ == "__main__":
    # cd /home/zshield/bin/ruleng & python air_web.dw.virtual_res_es_to_mysql_v2 -s 2023-01-01 -e 2023-02-01 -o -g
    # -s -e 指定同步的开始结束时间， 如果不指定就同步索引内的所有数据， 按日期倒序同步 一个子进程同步10天
    # -o 只同步on5级别的 默认false
    # -g 同步完成功后去调用函数生成 虚拟用户的15min表和max_time表  默认false
    # -p 进程数， 默认5
    # -c 不同步虚拟用户的可算档案到c_cons
    # -v 仅同步虚拟用户的结算结果
    # -t 仅同步预测结果
    my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
    es = my_lib.zElasticsearch(config["ES_HOST"])
    es_dal = EsBaseDal(config["ES_HOST"])

    ep = es_pandas(config["ES_HOST"])
    save_table1 = config["POWERLOAD_VIRTUAL_TABLE"]  # 同步es并写入的表
    index_name1 = config["POWERLOAD_VIRTUAL_IDX"]  # 要同步的es
    predict_index_name = config["POWERLOAD_PREDICT_VIRTUAL_IDX"]
    predict_save_table = config["POWERLOAD_PREDICT_VIRTUAL_TABLE"]
    type_df = get_type_df()  # 行业表
    region_df = get_region_df()  # 区县表

    # 是否执行虚拟用户的orgno_typeid_15min 和 max_time表生成 指定参数-g 为True
    gen_15min_bool = False
    # 仅同步on5级别的即 on7 == 9999999
    only_on5 = False
    # 是否同步虚拟用户档案到c_cons表， 指定-t 为False, 后续增量可以指定为False
    trans_cons = True
    trans_vcons = True
    trans_predict = True
    # 检查模式: False 不做检查 True 同步完做检查（仅虚拟用户) (默认False)
    check_mode = False
    # 开始时间或结束时间不指定时，全时间同步
    start_date = (date.today() - timedelta(days=5)).strftime("%Y-%m-%d")
    end_date = date.today().strftime("%Y-%m-%d")
    pre_end_date = (date.today() + timedelta(days=3)).strftime("%Y-%m-%d")
    pro_num = 1  # 进程数
    opts, args = getopt.getopt(sys.argv[1:], "s:e:p:mgocvt")
    for opt, val in opts:
        if opt == "-s":
            start_date = val
        elif opt == "-e":
            end_date = val
            pre_end_date = (
                datetime.strptime(end_date, "%Y-%m-%d") + timedelta(days=3)
            ).strftime("%Y-%m-%d")
        elif opt == "-p":
            pro_num = int(val)
        elif opt == "-m":
            check_mode = True
        elif opt == "-g":
            gen_15min_bool = True
        elif opt == "-o":
            only_on5 = True
        elif opt == "-c":
            trans_cons = False
        elif opt == "-v":
            trans_predict = False  # 仅传输虚拟用户计算结构
        elif opt == "-t":
            trans_vcons = False  # 仅传输预测结果
    if trans_cons:
        print(
            f"执行虚拟用户档案导入mysql， 索引前缀：{config['C_CONS_IDX']}，写入表：{config['C_CONS']}"
        )
        get_cons_df()
    if trans_vcons:
        key_list = [
            "on5",
            "on7",
            "org_no",
            "type_id",
            "cons_no",
            "kt_ratio",
            "data_time",
            "p_total",
            "p_kt",
            "p_base",
            "p_std_left",
            "p_std_right",
            "is_day_max",
        ]
        main(
            pro_num,
            index_name1,
            save_table1,
            start_date,
            end_date,
            key_list,
            ck_mode=check_mode,
        )
    if trans_predict:
        key_list = [
            "on5",
            "on7",
            "type_id",
            "cons_no",
            "data_time",
            "p_total",
            "p_kt",
            "is_day_max",
        ]
        main(
            pro_num,
            predict_index_name,
            predict_save_table,
            start_date,
            pre_end_date,
            key_list,
            ck_mode=check_mode,
            is_predict=True,
        )
