import json
import logging
import random
from datetime import datetime, timedelta

import numpy as np
import pandas as pd

import ctypes;
from air_web.config.config import config
from air_web.data_platform import init_db, mysql_con



my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
logging.basicConfig(level=logging.DEBUG)
ConnectionTimeout = my_lib.es_mod.exceptions.ConnectionTimeout
sql_engine = init_db()
es = my_lib.zElasticsearch(config["ES_HOST"])
requir_user_file = "./resp_user.csv"
order_user_file = "./order_user.csv"

before_days = 0 # 用几天前的数据
multiply_coef_down = 1
multiply_coef_up = 1

# {'交通运输、仓储和邮政业': 10101, '信息传输、软件和信息技术服务业': 10102, '批发和零售业': 10103,
# '住宿和餐饮业': 10104, '金融业': 10105, '房地产业': 10106, '租赁和商务服务业': 10107,
# '农、林、牧、渔业': 10108, '工业': 10109, '建筑业': 10110, '公共服务及管理组织':10201}
# 1601:建筑业;
# 1602:交通运输、仓储和邮政业;
# 1603:信息传输、软件和信息技术服务业;
# 1604:批发和零售业
# 1605:住宿和餐饮业
# 1606:金融业
# 1607:房地产业
# 1608:租赁和商务服务业
# 1609:公共服务及管理组织
level_type_map = {
    1601: 10110,
    1602: 10101,
    1603: 10102,
    1604: 10103,
    1605: 10104,
    1606: 10105,
    1607: 10106,
    1608: 10107,
    1609: 10201,
}

type_level_map = dict(map(reversed, level_type_map.items()))


result_dict = {}


def get_sum_df():
    sum_sql = ','.join([f'sum(point{str(i)}) as point{str(i)}' for i in range(1, 97)])
    sql = f"""select {sum_sql}
              from aircondition_curve
              where compute_date = '{today}'
                and data_type in (1,2)
                and curve_type=302
           """
    df = sql_engine.query(sql)
    data_dict = df.to_dict("records")[0]
    final_dict = {
        "cons_no": None,
        "cons_name": None,
        "data_type": 0,
        "level_sort": 1501,
        "curve_type": 303,
        "date": str(today),
    }
    final_dict.update(data_dict)
    return [final_dict]


def random_multiplier(seed):
    random.seed(int(seed.strftime('%Y%m%d%H%M%S')))  # 设置随机数种子
    multiplier = round(random.uniform(multiply_coef_down, multiply_coef_up), 2)  # 生成0.85到0.95之间的随机数
    return multiplier


def proc_df_to_list(df, curve_type, time_df, is_order, data_date):
    data_type = 2 if is_order else 1
    res_list = []
    data_date = (datetime.strptime(data_date, '%Y-%m-%d') + timedelta(days=before_days)).strftime(
        '%Y-%m-%d')
    for type_id, group_df in df.groupby("type_id"):
        df = df.sort_values("data_time")
        group_df = pd.merge(time_df, group_df, on="data_time", how="left")
        # 平滑
        if len(group_df) > 5:
            group_df["p_kt"] = (
                group_df["p_kt"].rolling(window=4).mean().bfill()
            )
        group_df["p_kt"]=group_df["p_kt"].round(2)
        data_list = group_df["p_kt"].tolist()
        data_dict = {
            "point" + str(i + 1): None if np.isnan(p_kt) else p_kt
            for i, p_kt in enumerate(data_list)
        }
        final_dict = {
            "cons_no": None,
            "cons_name": None,
            "data_type": data_type,
            "curve_type": curve_type,
            "date": data_date,
        }
        final_dict.update({"level_sort": type_level_map.get(type_id)})
        final_dict.update(data_dict)
        res_list.append(final_dict)
    res_df = pd.DataFrame(res_list)
    res_df.drop(['cons_no', 'cons_name'], axis=1, inplace=True)
    res_df['compute_date'] = today
    res_df = res_df.replace({np.nan:None})
    sql_engine.update_df_by_id(res_df, 'aircondition_curve')
    return res_list


def proc_agg_data(agg_res):
    res_list = []
    for bucket in agg_res:
        key_list = bucket["key"].split("|")
        bucket_dict = {"type_id": int(key_list[0]), "data_time": key_list[1]}
        for key, val_dict in bucket.items():
            if key in ["key", "doc_count"]:
                continue
            bucket_dict.update({key: val_dict["value"]})
        res_list.append(bucket_dict)
    df = pd.DataFrame(res_list)
    return df


def cons_agg(start_time, end_time, cid_df):
    # 如果展示数据是今天，那么end_time取现在(因为用户计算结果中存了全天的预测点)
    if start_time == (datetime.now()-timedelta(days=before_days)).strftime('%Y-%m-%d'):
        end_time = (datetime.now()-timedelta(days=before_days)).strftime("%Y-%m-%d %H:%M:%S")
      # end_time = (datetime.now()-timedelta(minutes=15)).strftime("%Y-%m-%d %H:%M:%S")

    on7_dict = (
        cid_df[["on7", "on5"]]
        .drop_duplicates()
        .set_index("on7")["on5"]
        .to_dict()
    )
    cid_dict = (
        cid_df[["on7", "cons_no"]]
        .groupby("on7")["cons_no"]
        .agg(list)
        .to_dict()
    )

    # 拼接聚合维度的字段
    agg_columns = ["type_id", "data_time"]
    group_source = ""
    for idx, key in enumerate(agg_columns):
        add_source = (
            f"+'|'+doc['{key}'].value" if idx else f"doc['{key}'].value"
        )
        group_source += add_source

    res_df = pd.DataFrame()
    for on7, on5 in on7_dict.items():
        cid_list = cid_dict[on7]
        # print(f"正在聚合,date:{start_time},on7:{on7},用户数:{len(cid_list)} ...")

        query = {
            "size": 0,
            "query": {
                "bool": {
                    "must": [
                        {
                            "range": {
                                "data_time": {
                                    "gte": start_time,
                                    "lt": end_time,
                                }
                            }
                        },
                        {"terms": {"cons_no": cid_list}},
                    ]
                }
            },
            "aggs": {
                "group_by": {
                    "terms": {
                        "size": 10000,
                        "script": {"source": group_source, "lang": "painless"},
                    },
                    "aggs": {"p_kt": {"sum": {"field": "p_kt"}}},
                }
            },
        }

        is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
        index_name = (
            config["POWERLOAD"]
            .replace("on5", str(on5))
            .replace("on7", str(on7))
            .replace("cal01", is_cal)
        )
        for i in range(10):
            try:
                result = es.search(
                    index=index_name, body=query, request_timeout=300
                )
                break
            except ConnectionTimeout:
                print(f"search失败次数:{i+1},date:{start_time},on7:{on7}")
        agg_res = result["aggregations"]["group_by"]["buckets"]
        if len(agg_res) == 0:
            print(f"数据为空，跳过date:{start_time},on7:{on7}")
            continue
        on7_df = proc_agg_data(agg_res)
        res_df = pd.concat([res_df, on7_df])

    res_df = (
        res_df.groupby(["type_id", "data_time"])
        .agg({"p_kt": "sum"})
        .reset_index()
    )
    res_df = res_df.loc[~res_df['type_id'].isin([10108,10109])]
    res_df["data_time"] = pd.to_datetime(res_df["data_time"])
    res_df['p_kt'] = res_df.apply(lambda row: row['p_kt'] * random_multiplier(row['data_time']), axis=1)
    res_df["data_time"] = res_df["data_time"].dt.strftime("%H:%M")
    return res_df


def get_cons_info(is_order):
    user_file = order_user_file if is_order else requir_user_file
    cons_df = pd.read_csv(user_file, header=None, dtype=str)
    cons_list = cons_df[0].drop_duplicates().tolist()
    # cons_list = ['2010001573', '0729325183', '1087694537', '1103773974']

    sql = f"""select distinct cons_no,on5,on7
              from c_cons
              where cons_no in ({",".join(cons_list)})
              order by on5,on7"""
    cid_df = sql_engine.query(sql)

    print(f"从{user_file}读取用户数：{len(cons_list)}，从c_cons匹配用户数：{len(cid_df)}")

    return cid_df


def get_spec_cons_agg(is_order=False):
    after_day = (today + timedelta(days=1)).strftime("%Y-%m-%d")
    yesterday_after_day = (yesterday + timedelta(days=1)).strftime("%Y-%m-%d")

    time_df = pd.DataFrame(
        pd.date_range(today, periods=96, freq="15min").strftime("%H:%M"),
        columns=["data_time"],
    )

    cid_df = get_cons_info(is_order)
    today_df = cons_agg(str(today), after_day, cid_df)
    yesterday_df = cons_agg(str(yesterday), yesterday_after_day, cid_df)

    today_list = proc_df_to_list(today_df, "302", time_df, is_order, str(today))
    yesterday_list = proc_df_to_list(yesterday_df, "301", time_df, is_order, str(yesterday))

    res_list = today_list + yesterday_list
    return res_list


def filter_row(row: dict):
    if row["type_id"] not in list(level_type_map.values()):
        return False
    return True


def data_handle(row: dict, data_date, curve_type):
    # 还会有缺数的情况
    data_type = 0 if row.get("cons_no") is None else 1
    cons_no = row.get("cons_no")
    type_id = type_level_map.get(row.get("type_id"))
    data_time = str(row.get("data_time"))
    date = data_time[:10]
    data_date = (datetime.strptime(data_date, '%Y-%m-%d')+timedelta(days=before_days)).strftime('%Y-%m-%d')
    final_data = {
        "cons_no": cons_no,
        "cons_name": row.get("cons_name"),
        "data_type": data_type,
        "level_sort": type_id,
        "curve_type": curve_type,
        "date": data_date,
    }
    key = f"{cons_no}_{type_id}_{date}" if data_type else f"{type_id}_{date}"
    result_dict.setdefault(key, final_data)
    hms = data_time[10:]
    time_key = f"point{int(int(hms.split(':')[0]) * 4 + ((int(hms.split(':')[1]) / 15) + 1))}"
    # print(f'处理该行后的结果: {row}')
    p_kt = row.get("p_kt", np.nan)
    result_dict[key][time_key] = None if np.isnan(p_kt) else round(p_kt,2)


def get_sql_data(table_name, data_date):
    if table_name in ["", None]:
        return

    # 连接MySQL数据库
    conn, cursor = mysql_con.get_cursor()
    start_time = f"{data_date} 00:00:00"
    end_time = f"{data_date} 23:45:00"
    # if datetime.strptime(data_date,"%Y-%m-%d") == datetime.now().date():
    if data_date == (datetime.now()-timedelta(days=before_days)).strftime('%Y-%m-%d'):
        end_time = (datetime.now()-timedelta(days=before_days)).strftime("%Y-%m-%d %H:%M:%S")
      # end_time = (datetime.now()-timedelta(minutes=15)).strftime("%Y-%m-%d %H:%M:%S")
    sql = f"""SELECT org_no, type_id, data_time, p_kt_sum as p_kt FROM {table_name} 
            where type_id in {tuple(level_type_map.values())} and org_no = 51101
            and data_time between '{start_time}' and '{end_time}' 
            """
    print(f"查询语句为: {sql}")
    cursor.execute(sql)
    rows = cursor.fetchall()
    print(f"获取到{len(rows)}行记录")

    # 关闭MySQL连接
    cursor.close()
    conn.close()
    return rows


def handle_data(rows, data_date, curve_type):
    global result_dict
    result_dict = {}
    for row in rows:
        data_handle(row, data_date, curve_type)
    return result_dict


def save_json(data, curve_type):
    with open(f"/tmp/power_curve-{curve_type}.json", "w") as f:
        json.dump(list(data.values()), f)


def merge_cons():
    sql = f"""select cons_no, cons_name, type_id, type_code, pare_type_id, pare_type_code from c_cons
    where pare_type_id in {tuple(level_type_map.values())}"""
    result = mysql_con.get(sql)
    return {i["cons_no"]: i for i in result}


def get_es_data(index, data_date, curve_type):
    start_time = f"{data_date} 00:00:00"
    end_time = f"{data_date} 23:45:00"
    if index in ["", None]:
        return
    global result_dict
    # 连接Elasticsearch
    cons_dict = merge_cons()

    # 执行初始滚动查询
    scroll_id = None
    while True:
        result_dict = {}
        query = {
            "query": {
                "bool": {
                    "must": [
                        {
                            "range": {
                                "data_time": {
                                    "gte": start_time,
                                    "lte": end_time,
                                }
                            }
                        }
                    ]
                }
            }
        }

        # 执行Elasticsearch查询
        if scroll_id is None:
            results = es.search(
                index=index, body=query, scroll="5m", size=10000
            )
        else:
            results = es.scroll(scroll_id=scroll_id, scroll="5m")
        scroll_id = results["_scroll_id"]
        hits = results["hits"]["hits"]
        if not hits:
            # 没有更多结果，退出循环
            break

        # 将查询结果发送到Kafka
        for hit in hits:
            row = hit["_source"]
            tmp_cons_no = row["cons_no"]
            if cons_dict.get(tmp_cons_no) is None:
                continue
            row["cons_name"] = cons_dict[tmp_cons_no].get("cons_name")
            row["type_id"] = cons_dict[tmp_cons_no].get("pare_type_id")
            if filter_row(row):
                data_handle(row, data_date, curve_type)
    return result_dict


def send_cons_power(data_date, curve_type):
    # data_date = str(datetime(2022,4,30).date())

    index = "aclr_res_power_96_sichuan_spark_test-51401-5140137"
    data = get_es_data(index, data_date, curve_type)
    # insert_into_kafka(data)


def fill_sql_data(sql_data, data_date):
    new_data = []
    df = pd.DataFrame(sql_data).sort_values(["type_id", "data_time"])
    df['p_kt'] = df.apply(lambda row: row['p_kt'] * random_multiplier(row['data_time']), axis=1)
    df = df.loc[~df['type_id'].isin([10108,10109])]
    day_range = pd.DataFrame(
        pd.date_range(
            data_date,
            str(datetime.strptime(data_date, "%Y-%m-%d") + timedelta(days=1)),
            periods=97,
            closed="left",
        ),
        columns=["data_time"],
    )
    for type_id in df["type_id"].drop_duplicates():
        type_data = []
        for date in day_range["data_time"].tolist():
            type_data.append(
                {
                    "type_id": type_id,
                    "data_time": date.strftime("%Y-%m-%d %H:%M:%S"),
                }
            )
        type_df = df[df["type_id"] == type_id]
        if len(type_df) > 5:
            type_df["p_kt"] = type_df["p_kt"].rolling(window=4).mean().bfill()
        type_df = pd.merge(day_range, type_df, on="data_time", how="left")
        for d, p_kt in zip(type_data, type_df["p_kt"].tolist()):
            d["p_kt"] = p_kt
            new_data.extend(type_data)
    return new_data


def send_trade_power(data_date, curve_type):
    # data_date = str(datetime(2022, 1, 1).date())
    table_name = config.get("ORGNO_TYPEID_15MIN", "orgno_typeid_15min")

    sql_data = get_sql_data(table_name, data_date)
    sql_data = fill_sql_data(sql_data, data_date)
    data = handle_data(sql_data, data_date, curve_type)
    # save_json(data, curve_type)
    # insert_into_kafka(data)
    return list(data.values())


def send_base_line():
    """
    基线负荷
    提取昨日数据当作基线直接写入Kafka
    """
    curve_type = "301"
    # send_cons_power(yesterday, curve_type)
    data = send_trade_power(str(yesterday), curve_type)
    return data


def send_power_curve():
    """
    实时负荷
    """
    curve_type = "302"
    # send_cons_power(str(today), curve_type)
    print(f"开始执行实时负荷导入")
    data = send_trade_power(str(today), curve_type)
    return data


def diff_data(today_data, yesterday_data):
    new_data = []
    yesterday_day_range = pd.DataFrame(
        pd.date_range(yesterday, today, periods=97, closed="left"),
        columns=["data_time"],
    )
    today_day_range = pd.DataFrame(
        pd.date_range(
            today, str(today + timedelta(days=1)), periods=97, closed="left"
        ),
        columns=["data_time"],
    )

    today_df = pd.DataFrame(today_data).sort_values(["type_id", "data_time"])
    yesterday_df = pd.DataFrame(yesterday_data).sort_values(
        ["type_id", "data_time"]
    )

    type_ids = yesterday_df["type_id"].drop_duplicates()
    for type_id in type_ids:
        type_data = []
        for date in today_day_range["data_time"].tolist():
            type_data.append(
                {
                    "type_id": type_id,
                    "data_time": date.strftime("%Y-%m-%d %H:%M:%S"),
                }
            )

        today_type_df = today_df[today_df["type_id"] == type_id]
        if len(today_type_df) > 3:  # 去掉最后两个点
            today_type_df = today_type_df.sort_values("data_time").iloc[:-2]
        today_type_df = pd.merge(
            today_day_range, today_type_df, on="data_time", how="left"
        )

        yesterday_type_df = yesterday_df[yesterday_df["type_id"] == type_id]
        yesterday_type_df = pd.merge(
            yesterday_day_range, yesterday_type_df, on="data_time", how="left"
        )
        diff_kt = today_type_df["p_kt"] - yesterday_type_df["p_kt"]
        for d, p_kt in zip(type_data, diff_kt.tolist()):
            d["p_kt"] = p_kt
        new_data.extend(type_data)

    return new_data


def send_tk_power_curve():
    """
    调控负荷
    """
    curve_type = "303"
    # data_date = str(today)
    # yesterday = str(datetime(2022, 1, 1).date())
    # today = datetime(2022, 1, 2).date()
    table_name = config.get("ORGNO_TYPEID_15MIN", "orgno_typeid_15min")

    yesterday_sql_data = get_sql_data(table_name, str(yesterday))
    today_sql_data = get_sql_data(table_name, str(today))
    sql_data = diff_data(today_sql_data, yesterday_sql_data)

    data = handle_data(sql_data, str(today), curve_type)
    # save_json(data, curve_type)
    # insert_into_kafka(data)
    return list(data.values())


def get_baseline_data(day_time, baseline_day, is_delete, is_predict, baseline_coef):
    global today
    global yesterday
    today = datetime.strptime(day_time, "%Y-%m-%d").date()
    yesterday = datetime.strptime(baseline_day, "%Y-%m-%d").date()

    today = today - timedelta(days=before_days)
    print("date==",today,baseline_day,before_days)

    # 当前时间45分钟前的时间点列表
    now_time = datetime.now()
    now_time = now_time.replace(minute=(now_time.minute // 15) * 15)
    filter_time_list = pd.date_range(
        start=day_time, end=now_time, freq="15min"
    ).strftime("%H:%M")
    print("filter_time_list:", filter_time_list)

    power_data = send_power_curve()  # 全省实时
    baseline_data = send_base_line()  # 全省基线
    # tk_power_data = send_tk_power_curve()  新需求暂时不需要
    requir_cons_data = get_spec_cons_agg()  # 需求响应用户实时+基线
    order_cons_data = get_spec_cons_agg(is_order=True)  # 有序用电用户实时+基线
    sum_data = get_sum_df()
    print('requir_cons_data==',requir_cons_data[0])
    print('sum===', sum_data)

    power_data.extend(baseline_data)
    # power_data.extend(tk_power_data) 新需求暂时不需要
    power_data.extend(requir_cons_data)
    power_data.extend(order_cons_data)
    power_data.extend(sum_data)
    return power_data


if __name__ == "__main__":
    # 通用配置
    # 曲线类型 301:空调基线负荷);302:空调测算负荷(实时负荷)
    # 数据查询时间，左闭右闭

    today = "2024-04-26"  # datetime.today()
    yesterday = "2024-04-25"  # str((today - timedelta(days=1)))
    get_baseline_data(today, yesterday, False,False,1)
