import time
import traceback
from multiprocessing import Pool

import pandas as pd

from air_web.data_platform import sql_engine


def proc_max_time(result, group_field=[], is_write=False):
    group_field.extend(["org_no", "org_name", "data_date"])

    df = result.sort_values("p_total", ascending=False)
    df.drop_duplicates(group_field, inplace=True)

    if is_write:
        res_df = df.loc[
            (df["type_code"] == "all") & (df["type_code_sort"] == "all")
        ]
        res_df = res_df.drop(["type_code", "type_code_sort"], axis=1)
        if "p_org_no" in res_df:
            res_df = res_df.drop(["p_org_no", "p_org_name"], axis=1)
        res_df.rename(
            columns={
                "data_time": "max_time",
                "p_total": "max_p_total",
                "p_kt": "max_p_kt",
                "p_base": "max_p_base",
                "data_date": "compute_date",
            },
            inplace=True,
        )
        sql_engine.insert_df(res_df, "area_max_time")
    df["is_day_max"] = 1
    return df


def stats2(
    data_time, select_sql, group_sql="", where_sql="", sleep_seconds=900
):
    sql = """select {select_sql} type_code, type_code_sort, data_time, 
            data_date, sum(p_total) p_total, sum(p_kt) p_kt, sum(p_base) p_base
            from powerload_area
            where data_date = %s {where_sql}
            {group_sql}
            """.format(
        select_sql=select_sql, group_sql=group_sql, where_sql=where_sql
    )
    value = [str(data_time)]
    result = sql_engine.query(sql, params=value)
    if result.empty:
        print(f"{data_time}为空，跳过")
        time.sleep(sleep_seconds)
        return
    group_field = ["type_code", "type_code_sort"]
    max_df = proc_max_time(result, group_field, is_write=True)
    result = result.merge(
        max_df[
            [
                "org_no",
                "type_code",
                "type_code_sort",
                "data_time",
                "is_day_max",
            ]
        ],
        on=["org_no", "type_code", "type_code_sort", "data_time"],
        how="left",
    )
    result["is_day_max"].fillna(0, inplace=True)
    sql_engine.insert_df(result, "powerload_area")


def stats1(data_date, sleep_seconds=900):
    sql = """select on7 org_no, xian org_name, on5 p_org_no, shi p_org_name, type_code, type_code_sort, data_time,
            data_date, sum(p_total) p_total, sum(p_kt) p_kt, sum(p_base) p_base
            from powerload
            where data_date = %s
            group by on5, shi, on7, xian, type_code, type_code_sort, data_time, data_date
            """
    value = [str(data_date)]
    result = sql_engine.query(sql, params=value)
    if result.empty:
        print(f"{data_date}为空，跳过")
        time.sleep(sleep_seconds)
        return
    group_field = ["type_code", "type_code_sort"]
    max_df = proc_max_time(result, group_field)
    result = result.merge(
        max_df[
            [
                "org_no",
                "type_code",
                "type_code_sort",
                "data_time",
                "is_day_max",
            ]
        ],
        on=["org_no", "type_code", "type_code_sort", "data_time"],
        how="left",
    )
    result["is_day_max"].fillna(0, inplace=True)
    sql_engine.insert_df(result, "powerload_area")

    # 再加一条type_code=all
    all_sql = """select org_no, org_name, p_org_no, p_org_name, 'all' type_code, type_code_sort, data_time,
            data_date, sum(p_total) p_total, sum(p_kt) p_kt, sum(p_base) p_base
            from powerload_area
            where data_date = %s
            group by org_no, org_name, p_org_no, p_org_name, type_code_sort, data_time, data_date
            """
    all_value = [str(data_date)]
    all_result = sql_engine.query(all_sql, params=all_value)
    group_field = ["type_code_sort"]
    max_df = proc_max_time(all_result, group_field)
    all_result = all_result.merge(
        max_df[["org_no", "is_day_max", "type_code_sort", "data_time"]],
        on=["org_no", "type_code_sort", "data_time"],
        how="left",
    )
    all_result["is_day_max"].fillna(0, inplace=True)
    sql_engine.insert_df(all_result, "powerload_area")

    # 再加一条type_code_sort=all
    all_sql = """select org_no, org_name, p_org_no, p_org_name, type_code, 'all' type_code_sort, data_time,
                data_date, sum(p_total) p_total, sum(p_kt) p_kt, sum(p_base) p_base
                from powerload_area
                where data_date = %s and type_code='all'
                group by org_no, org_name, p_org_no, p_org_name, data_time, data_date
                """
    all_value = [str(data_date)]
    all_result = sql_engine.query(all_sql, params=all_value)
    max_df = proc_max_time(all_result, is_write=True)
    all_result = all_result.merge(
        max_df[["org_no", "is_day_max", "data_time"]],
        on=["org_no", "data_time"],
        how="left",
    )
    all_result["is_day_max"].fillna(0, inplace=True)
    sql_engine.insert_df(all_result, "powerload_area")


def stats(data_date):
    stats1(data_date)

    select_sql = "p_org_no org_no, p_org_name org_name, 99 p_org_no, '河北省电力公司' p_org_name,"
    group_sql = "group by p_org_no, p_org_name, type_code, type_code_sort, data_time, data_date"
    stats2(data_date, select_sql, group_sql)

    select_sql = "p_org_no org_no, p_org_name org_name,"
    group_sql = "group by p_org_no, p_org_name, type_code, type_code_sort, data_time, data_date"
    where_sql = "and p_org_no = 99"
    stats2(data_date, select_sql, group_sql, where_sql)


def main():
    date_list = pd.date_range(
        start="2023-02-02", end="2023-02-03", freq="1D"
    ).strftime("%Y-%m-%d")
    pro_pool = Pool(5)
    for data_date in date_list:
        print(f"proc {data_date}")
        # stats(data_date)
        # break
        result = pro_pool.apply_async(func=stats, args=(data_date,))
        try:
            result.get()
        except:
            print(traceback.format_exc())
    pro_pool.close()
    pro_pool.join()


def proc_area_type_map():
    sql = """select on5,shi,on7,xian,type_code,type_code_sort from c_cons """
    df = sql_engine.query(sql)
    res_df = df[
        ["on7", "xian", "type_code", "type_code_sort"]
    ].drop_duplicates()
    res_df.rename(columns={"on7": "org_no", "xian": "org_name"}, inplace=True)

    df2 = df[["on5", "shi", "type_code", "type_code_sort"]].drop_duplicates()
    df2.rename(columns={"on5": "org_no", "shi": "org_name"}, inplace=True)
    res_df = pd.concat([res_df, df2])

    df3 = df[["type_code", "type_code_sort"]].drop_duplicates()
    df3["org_no"] = "99"
    df3["org_name"] = "河北省电力公司"
    res_df = pd.concat([res_df, df3])
    sql_engine.insert_df(res_df, "area_type_map")


if __name__ == "__main__":
    main()
    proc_area_type_map()
