import getopt
import sys
import time
import traceback
from multiprocessing import Pool

import numpy as np
import pandas as pd

from air_web.data_platform import sql_engine


def agg_org_type_day_max(start_time, end_time):
    sql = f"select org_no,org_name,ad_org_name,p_org_no,type_code,type_id,data_time,p_kt_sum,p_total_sum,time_display_name,p_type_id from orgno_typeid_15min where data_time >= '{start_time}' and data_time <'{end_time}'"
    day_org_type_df = sql_engine.query(sql)
    if day_org_type_df.empty:
        print(start_time, len(day_org_type_df))
        time.sleep(5)
        day_org_type_df_new = sql_engine.query(sql)
        if day_org_type_df_new.empty:
            return
        else:
            day_org_type_df = day_org_type_df_new
    try:
        day_org_type_df = day_org_type_df.drop_duplicates(
            ["org_no", "type_id", "data_time"]
        )
        # 按 org 和 type 分组，找到最大、最小值和平均值的行
        print(start_time, len(day_org_type_df))
        max_rows = day_org_type_df.loc[
            day_org_type_df.groupby(["org_no", "type_id"])["p_kt_sum"].rank(
                method="min", ascending=False
            )
            == 1
        ]
        max_rows = max_rows.drop_duplicates(["org_no", "type_id"])
        max_rows["p_kt_rate_max"] = round(
            max_rows["p_kt_sum"] / max_rows["p_total_sum"], 4
        )
        min_rows = day_org_type_df.loc[
            day_org_type_df.groupby(["org_no", "type_id"])["p_kt_sum"].rank(
                method="min", ascending=True
            )
            == 1
        ]
        min_rows = min_rows.drop_duplicates(["org_no", "type_id"])
        min_rows["p_kt_rate_min"] = round(
            min_rows["p_kt_sum"] / min_rows["p_total_sum"], 4
        )
        mean_kt_rows = (
            day_org_type_df.groupby(["org_no", "type_id"])["p_kt_sum"]
            .mean()
            .reset_index(name="avg_p_kt")
        )
        mean_total_rows = (
            day_org_type_df.groupby(["org_no", "type_id"])["p_total_sum"]
            .mean()
            .reset_index(name="avg_p_total")
        )

        # 将最大、最小值和平均值的行合并成一个新的 DataFrame
        result = pd.merge(
            max_rows,
            min_rows,
            on=[
                "org_no",
                "type_id",
                "time_display_name",
                "org_name",
                "ad_org_name",
                "p_org_no",
                "type_code",
                "p_type_id",
            ],
            suffixes=("_max", "_min"),
        )
        result = pd.merge(result, mean_kt_rows, on=["org_no", "type_id"])
        result = pd.merge(result, mean_total_rows, on=["org_no", "type_id"])
        map_dict = {
            "data_time": "p_kt_time",
            "p_kt_sum": "p_kt",
            "p_kt_rate": "p_kt_rate",
            "p_total_sum": "p_kt_total",
        }
        keys = ["max", "min"]
        # rename_dict = {f"{k}_{key}": v.replace('p_kt_', f"{key}_p_kt_") for k, v in map_dict.items() for key in keys}
        rename_dict = {
            f"{name}_{key}": f"{key}_{map_dict[name]}"
            for name in map_dict.keys()
            for key in keys
        }
        rename_dict.update({"time_display_name": "data_time"})
        result = result.rename(columns=rename_dict)
        # res_result = result.drop(columns=["p_total_sum_max","p_total_sum_min"])
        res_result = result.replace({np.nan: None})
        sql_engine.update_df_by_id(res_result, save_table)
        return start_time

    except Exception:
        print(traceback.format_exc())
        print(start_time + "error")


def main(
    process_num,
    start_date,
    end_date,
):
    date_list = pd.date_range(
        start=start_date, end=end_date, freq="1D"
    ).strftime("%Y-%m-%d")
    date_list = sorted(date_list, reverse=True)
    pro_pool = Pool(process_num)
    results = []
    for i in range(len(date_list) - 1):
        end_date = date_list[i]
        start_date = date_list[i + 1]
        results.append(
            pro_pool.apply_async(
                func=agg_org_type_day_max, args=(start_date, end_date)
            )
        )
    try:
        [res.get() for res in results]
    except:
        print(traceback.format_exc())

    pro_pool.close()
    pro_pool.join()
    print(len(results))


if __name__ == "__main__":
    # 统计orgno_typeid_15min表中按区县行业聚合的每天空调最大负荷、最小负荷、平均负荷以及占比
    # 写入orgno_typeid_day_hb表

    t1 = time.time()
    process_num = 1  # 进程数
    start_time = "2019-01-01"  # 开始日期（左闭）
    end_time = "2019-01-05"  # 结束日期（右开）
    save_table = "orgno_typeid_day_stat"  # 写入表
    opts, args = getopt.getopt(sys.argv[1:], "p:s:e:t:")
    for opt, val in opts:
        if opt == "-p":
            process_num = val
        elif opt == "-s":
            start_time = val
        elif opt == "-e":
            end_time = val
        elif opt == "-t":
            save_table = val
    main(process_num, start_time, end_time)
    t2 = time.time()
    print(t2 - t1)
