import argparse
import datetime
import json

import numpy as np
import pandas as pd
from kafka import KafkaProducer

from air_web.config.config import config
from air_web.data_platform import mysql_con
import ctypes;

# 创建Kafka生产者
def get_kp():
    producer = KafkaProducer(
        bootstrap_servers=kafka_config["bootstrap_servers"]
    )
    return producer


# {'交通运输、仓储和邮政业': 10101, '信息传输、软件和信息技术服务业': 10102, '批发和零售业': 10103,
# '住宿和餐饮业': 10104, '金融业': 10105, '房地产业': 10106, '租赁和商务服务业': 10107, '公共服务及管理组织':10201}
# 1601:建筑业;
# 1602:交通运输、仓储和邮政业;
# 1603:信息传输、软件和信息技术服务业;
# 1604:批发和零售业
# 1605:住宿和餐饮业
# 1606:金融业
# 1607:房地产业
# 1608:租赁和商务服务业
# 1609:公共服务及管理组织
level_type_map = {
    1602: 10101,
    1603: 10102,
    1604: 10103,
    1605: 10104,
    1606: 10105,
    1607: 10106,
    1608: 10107,
    1609: 10201,
}

# level_type_map = {1601: 113,
#                   1602: 106,
#                   1603: 107,
#                   1604: 108,
#                   1605: 109,
#                   1606: 110,
#                   1607: 103,
#                   1608: 104,
#                   1609: 105}
type_level_map = dict(map(reversed, level_type_map.items()))


def filter_row(row: dict):
    if row["type_id"] not in list(level_type_map.values()):
        return False
    return True


result_dict = {}


def data_handle(row: dict, data_date, curve_type):
    # 还会有缺数的情况
    data_type = 0 if row.get("cons_no") is None else 1
    cons_no = row.get("cons_no")
    type_id = type_level_map.get(row.get("type_id"))
    data_time = str(row.get("data_time"))
    date = data_time[:10]
    final_data = {
        "cons_no": cons_no,
        "cons_name": row.get("cons_name"),
        "data_type": data_type,
        "level_sort": type_id,
        "curve_type": curve_type,
        "date": data_date,
    }
    key = f"{cons_no}_{type_id}_{date}" if data_type else f"{type_id}_{date}"
    result_dict.setdefault(key, final_data)
    hms = data_time[10:]
    time_key = f"POINT{int(int(hms.split(':')[0]) * 4 + ((int(hms.split(':')[1]) / 15) + 1))}"
    result_dict[key][time_key] = row.get("p_kt", np.nan)


def get_sql_data(table_name, data_date):
    if table_name in ["", None]:
        return

    # 连接MySQL数据库
    conn, cursor = mysql_con.get_cursor()
    start_time = f"{data_date} 00:00:00"
    end_time = f"{data_date} 23:45:00"
    sql = f"""SELECT org_no, type_id, data_time, p_kt_sum as p_kt FROM {table_name} 
		    where type_id in {tuple(level_type_map.values())} and org_no = 51101
		    and data_time between '{start_time}' and '{end_time}' 
		    """

    cursor.execute(sql)
    rows = cursor.fetchall()

    # 关闭MySQL连接
    cursor.close()
    conn.close()
    return rows


def handle_data(rows, data_date, curve_type):
    global result_dict
    result_dict = {}
    for row in rows:
        data_handle(row, data_date, curve_type)

    return result_dict


def insert_into_kafka(data):
    producer = get_kp()
    for message in data.values():
        value = json.dumps(message).encode("utf-8")
        producer.send(kafka_config["topic"], value=value)
    # 刷新并关闭Kafka生产者
    producer.flush()
    producer.close()

    print("Data successfully pushed to Kafka!")


def merge_cons():
    sql = f"""select c_id, cons_no, cons_name, type_id, type_code, pare_type_id, pare_type_code from c_cons
    where pare_type_id in {tuple(level_type_map.values())}"""
    result = mysql_con.get(sql)
    return {i["c_id"]: i for i in result}


def get_es_data(index, data_date, curve_type):
    start_time = f"{data_date} 00:00:00"
    end_time = f"{data_date} 23:45:00"
    if index in ["", None]:
        return
    global result_dict
    my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
    # 连接Elasticsearch
    es = my_lib.zElasticsearch(config["ES_HOST"])
    cons_dict = merge_cons()

    # 执行初始滚动查询
    scroll_id = None
    while True:
        result_dict = {}
        query = {
            "query": {
                "bool": {
                    "must": [
                        {
                            "range": {
                                "data_time": {
                                    "gte": start_time,
                                    "lte": end_time,
                                }
                            }
                        }
                    ]
                }
            }
        }

        # 执行Elasticsearch查询
        if scroll_id is None:
            results = es.search(
                index=index, body=query, scroll="5m", size=10000
            )
        else:
            results = es.scroll(scroll_id=scroll_id, scroll="5m")
        scroll_id = results["_scroll_id"]
        hits = results["hits"]["hits"]
        if not hits:
            # 没有更多结果，退出循环
            break

        # 将查询结果发送到Kafka
        for hit in hits:
            row = hit["_source"]
            tmp_c_id = row["c_id"]
            if cons_dict.get(tmp_c_id) is None:
                continue
            row["cons_no"] = cons_dict[tmp_c_id].get("cons_no")
            row["cons_name"] = cons_dict[tmp_c_id].get("cons_name")
            row["type_id"] = cons_dict[tmp_c_id].get("pare_type_id")
            if filter_row(row):
                data_handle(row, data_date, curve_type)
    return result_dict


def send_cons_power(data_date, curve_type):
    # data_date = str(datetime.datetime(2022,4,30).date())

    index = "aclr_res_power_96_sichuan_spark_test-51401-5140137"
    data = get_es_data(index, data_date, curve_type)
    insert_into_kafka(data)


def fill_sql_data(sql_data, data_date):
    new_data = []
    df = pd.DataFrame(sql_data).sort_values(["type_id", "data_time"])
    day_range = pd.DataFrame(
        pd.date_range(
            data_date,
            str(
                datetime.datetime.strptime(data_date, "%Y-%m-%d")
                + datetime.timedelta(days=1)
            ),
            periods=97,
            closed="left",
        ),
        columns=["data_time"],
    )
    for type_id in df["type_id"].drop_duplicates():
        type_data = []
        for date in day_range["data_time"].tolist():
            type_data.append(
                {
                    "type_id": type_id,
                    "data_time": date.strftime("%Y-%m-%d %H:%M:%S"),
                }
            )
        type_df = df[df["type_id"] == type_id]
        type_df = pd.merge(day_range, type_df, on="data_time", how="left")
        for d, p_kt in zip(type_data, type_df["p_kt"].tolist()):
            d["p_kt"] = p_kt
            new_data.extend(type_data)
    return new_data


def send_trade_power(data_date, curve_type):
    # data_date = str(datetime.datetime(2022, 1, 1).date())
    table_name = "orgno_typeid_15min"

    sql_data = get_sql_data(table_name, data_date)
    sql_data = fill_sql_data(sql_data, data_date)
    data = handle_data(sql_data, data_date, curve_type)
    insert_into_kafka(data)


def send_base_line():
    """
    基线负荷
    提取昨日数据当作基线直接写入Kafka
    """
    curve_type = "301"
    # send_cons_power(yesterday, curve_type)
    send_trade_power(yesterday, curve_type)


def send_power_curve():
    """
    实时负荷
    """
    curve_type = "302"
    # send_cons_power(str(today), curve_type)
    send_trade_power(str(today), curve_type)


def diff_data(today_data, yesterday_data):
    new_data = []
    yesterday_day_range = pd.DataFrame(
        pd.date_range(yesterday, today, periods=97, closed="left"),
        columns=["data_time"],
    )
    today_day_range = pd.DataFrame(
        pd.date_range(
            today,
            str(today + datetime.timedelta(days=1)),
            periods=97,
            closed="left",
        ),
        columns=["data_time"],
    )

    today_df = pd.DataFrame(today_data).sort_values(["type_id", "data_time"])
    yesterday_df = pd.DataFrame(yesterday_data).sort_values(
        ["type_id", "data_time"]
    )

    type_ids = yesterday_df["type_id"].drop_duplicates()
    for type_id in type_ids:
        type_data = []
        for date in today_day_range["data_time"].tolist():
            type_data.append(
                {
                    "type_id": type_id,
                    "data_time": date.strftime("%Y-%m-%d %H:%M:%S"),
                }
            )

        today_type_df = today_df[today_df["type_id"] == type_id]
        today_type_df = pd.merge(
            today_day_range, today_type_df, on="data_time", how="left"
        )

        yesterday_type_df = yesterday_df[yesterday_df["type_id"] == type_id]
        yesterday_type_df = pd.merge(
            yesterday_day_range, yesterday_type_df, on="data_time", how="left"
        )
        diff_kt = today_type_df["p_kt"] - yesterday_type_df["p_kt"]
        for d, p_kt in zip(type_data, diff_kt.tolist()):
            d["p_kt"] = p_kt
        new_data.extend(type_data)

    return new_data


def send_tk_power_curve():
    """
    调控负荷
    """
    curve_type = "303"
    # data_date = str(today)
    # yesterday = str(datetime.datetime(2022, 1, 1).date())
    # today = datetime.datetime(2022, 1, 2).date()
    table_name = "orgno_typeid_15min"

    yesterday_sql_data = get_sql_data(table_name, yesterday)
    today_sql_data = get_sql_data(table_name, str(today))
    sql_data = diff_data(today_sql_data, yesterday_sql_data)

    data = handle_data(sql_data, str(today), curve_type)
    insert_into_kafka(data)


if __name__ == "__main__":
    # 通用配置
    # 曲线类型 301:空调基线负荷);302:空调测算负荷(实时负荷)
    # 数据查询时间，左闭右闭

    parser = argparse.ArgumentParser(prog="send data to kafka")
    parser.add_argument(
        "-realtime",
        dest="realtime",
        default=False,
        help="是否实时推送，是为 true 否为 false",
    )
    parser.add_argument(
        "-is_baseline",
        dest="is_baseline",
        default=False,
        help="是否推送基线负荷，是为 true 否为false",
    )
    args = parser.parse_args()
    today = datetime.date.today()
    yesterday = str((today - datetime.timedelta(days=1)))

    # Kafka连接配置
    kafka_config = {
        "bootstrap_servers": "25.214.12.132:21007,25.214.13.227:21007,25.214.13.5:21007,25.214.13.57:21007,25.214.13.156:21007,25.214.12.21:21007,25.214.12.71:21007,25.214.12.59:21007,25.214.13.194:21007,25.214.13.38:21007,25.214.13.221:21007,25.214.13.116:21007,25.214.13.80:21007,25.214.13.140:21007,25.214.13.225:21007,25.214.13.224:21007",  # Kafka服务器地址
        "topic": "dws_aircondition_zhx_df",  # Kafka主题名称
    }
    print(args)
    if args.realtime:
        send_power_curve()
        send_tk_power_curve()
    if args.is_baseline:
        send_base_line()
