# encoding = utf-8
import datetime

import pandas as pd

from application.logging import logger
from application.utils.CodeTimingUtil import CodeTimingUtil
from application.utils.TimeSleepUtil import TimeSleepUtil


@CodeTimingUtil(name="每天累热数据计算[process_data_each_day_calculate]")
def process_data_each_day_calculate(data: pd.DataFrame):
    """
    每天累热数据计算
    TODO 每天累热计算逻辑优化，数据不全，数据差异太大，导致模型不准
    :param data:
    :return:
    """
    # 数据过滤:前
    logger.info(f"数据过滤[data[data['total_accum_heat'] > 0]]前: {len(data)}")
    logger.info(f"{data['total_accum_heat']}")

    # 过滤数据:后:累计热量 >0
    data = data[data["total_accum_heat"] > 0]
    logger.info(f"数据过滤[data[data['total_accum_heat'] > 0]]后: {len(data)}")

    # 数据为空:累热数据为空,返回空数据集
    # if ->
    if len(data) <= 0:
        df_empty = pd.DataFrame([])
        logger.info(f"没有符合条件的数据")
        return df_empty  # if <-

    # 类型转换:日期格式
    # datetime64[ns] collect_time字段从数据库查出来之后就时这个类型
    # 确保 collect_time 列的类型为 datetime64
    data["collect_time_save"] = data["collect_time"] = pd.to_datetime(data["collect_time"])
    logger.info(f"日期格式转换后日期列数据:data['collect_time_save']: \n {data['collect_time_save']}")

    # 读取年，月，日，时
    data["year"] = data["collect_time"].apply(lambda x: x.year)
    data["month"] = data["collect_time"].apply(lambda x: x.month)
    data["day"] = data["collect_time"].apply(lambda x: x.day)
    data["hour"] = data["collect_time"].apply(lambda x: x.hour)
    # logger.info(f"日期格式转换后日期列数据:年，月，日，时：\n {data['year']} \n {data['month']} \n {data['day']} \n {data['hour']}")

    # 时间排序
    collect_time_sorted = sorted(list(data["collect_time"]))
    # logger.info(f"时间排序[collect_time_sorted]: \n {collect_time_sorted}")
    logger.info(f"时间排序[collect_time_sorted][len]: {len(collect_time_sorted)}")

    # 开始时间，结束时间
    collect_time_sorted_sta = collect_time_sorted[0]
    collect_time_sorted_end = collect_time_sorted[-1]
    logger.info(
        f"时间排序[collect_time_sorted_sta, collect_time_sorted_end]: {collect_time_sorted_sta}, {collect_time_sorted_end}"
    )
    #
    # 类型转换:转换成 datetime.date 类型
    start = datetime.datetime.strptime(str(collect_time_sorted_sta), "%Y-%m-%d %H:%M:%S").date()
    end = datetime.datetime.strptime(str(collect_time_sorted_end), "%Y-%m-%d %H:%M:%S").date()

    # 日期格式:{yyyy-MM-dd}
    logger.info(f"时间转换[datetime.date][start,end]:{start},{end}")

    # 数据分组
    group = data.groupby(["hour"])
    logger.info(f"数据分组[group]: {group}")

    #
    list_all_data = []
    # for ->
    for a in group:
        # a (tuple 元组类型)
        logger.info(f"分组数据[group][a]:{type(a)}:\n {a}")

        # a_0 (0 int)
        a_0: int = a[0]
        logger.info(f"分组数据[group][a_0]:{type(a_0)}: {a_0}")

        # a_1 (pandas.core.frame.DataFrame)
        a_1: pd.DataFrame = a[1]
        logger.info(f"分组数据[group][a_1]:{type(a_1)}\n {a_1}")
        logger.info(f"分组数据[group][a_1]:[hour]:\n {a_1['hour']}")
        logger.info(f"分组数据[group][a_1]:[hour][to_numpy()]:\n {a_1['hour'].to_numpy()}")

        # 时间排序:正序(ascending=True) TODO axis=0
        b: pd.DataFrame = a_1.sort_values(by="collect_time", axis=0, ascending=True)
        logger.info(f"采集时间排序[b][collect_time]:\n {b}\n")

        # 数据去重: 按年月日(subset),保留第一条(keep="first")
        b: pd.DataFrame = b.drop_duplicates(subset=["year", "month", "day"], keep="first")
        logger.info(f"数据去重: 按年月日,保留第一条[b]: \n{b}\n")

        # 采集时间格式转换: (datetime.date)(yyyy-MM-dd)
        b['collect_time'] = pd.to_datetime(data["collect_time"]).dt.date
        logger.info(f"采集时间格式转换[b['collect_time']][yyyy-MM-dd]:\n {b['collect_time']} \n")

        # 设置索引:将collect_time设置成索引列
        b.set_index(["collect_time"], inplace=True)
        logger.info(f"设置索引[collect_time][b]:\n {b}")
        logger.info(f"设置索引[collect_time][b.index]:\n {b.index}")

        #
        # 新数据集:索引为时间范围(start,end)
        df_new: pd.DataFrame = pd.DataFrame(
            # 索引列:日期范围(start,end)
            index=pd.date_range(start=start, end=end)[:-1],
            # 其他列:默认值都为NaN
            columns=[
                'station_id',
                'collect_time',
                'second_supply_water_temp',
                'second_back_water_temp',
                'outside_temper',
                'max_inside_temper',
                'min_inside_temper',
                'average_inside_temper',
                'total_accum_heat']
        )
        logger.info(f"新数据集[df_new]: \n {df_new}")
        logger.info(f"新数据集[df_new.index]\n {df_new.index}")

        #
        # 将数值为空(NaN)的列设置成0:
        # Fill NA/NaN values using the specified method.
        df_new = df_new.fillna(0).astype(int)
        logger.info(f"新数据集[df_new][fillna]: \n {df_new}")

        #
        # 填充数据:索引相同的数据,替换成右侧的值;未匹配到的,填充成Nan
        df_new["station_id"] = b["station_id"]
        df_new["collect_time"] = b["collect_time_save"]
        df_new["second_supply_water_temp"] = b["second_supply_water_temp"]
        df_new["second_back_water_temp"] = b["second_back_water_temp"]
        df_new["average_outside_temper"] = b["average_outside_temper"]
        df_new["min_outside_temper"] = b["min_outside_temper"]
        df_new["max_outside_temper"] = b["max_outside_temper"]
        df_new["max_inside_temper"] = b["max_inside_temper"]
        df_new["min_inside_temper"] = b["min_inside_temper"]
        df_new["average_inside_temper"] = b["average_inside_temper"]
        df_new["total_accum_heat"] = b["total_accum_heat"]
        logger.info(f"新数据集[df_new][拷贝填充]: \n {df_new}")

        #
        # 新数据集:指包含累热列:[total_accum_heat]
        df_heat = pd.DataFrame(
            # 索引列:日期范围(start,end)
            index=pd.date_range(start, end)[:-1],
            # 其他列:累热
            columns=["total_accum_heat"]
        )
        logger.info(f"新数据集[df_heat]\n {df_heat}")
        # 将数值为空(NaN)的列设置成0:
        # Fill NA/NaN values using the specified method.
        df_heat = df_heat.fillna(0).astype(int)
        logger.info(f"新数据集[df_heat][fillna]\n {df_heat}")

        #
        # 填充数据
        df_heat["total_accum_heat"] = b["total_accum_heat"]
        logger.info(f"新数据集[df_heat][填充数据]\n {df_heat}")
        logger.info(f"新数据集[df_heat][数据列表][total_accum_heat][to_numpy]\n {df_heat['total_accum_heat'].to_numpy()}")

        #
        # 逐位相减(Difference with previous row)
        # FIXME 负数: 值比前值小
        # FIXME NaN: 前值或后值为NaN, 前值和后值都为NaN
        cal_heat = df_heat.diff()
        logger.info(f"逐位相减[cal_heat][df_heat.diff()]\n {cal_heat}")
        logger.info(f"逐位相减[数据列表][to_numpy()]:\n {cal_heat['total_accum_heat'].to_numpy()}")

        #
        # 添加一列:day_heat:每日累热
        df_new["day_heat"] = cal_heat['total_accum_heat']
        logger.info(f"每日累热[day_heat]: \n {df_new['day_heat']}")
        logger.info(f"每日累热[数据列表][to_numpy()]:\n {df_new['day_heat'].to_numpy()}")

        #
        # 数据过滤:排除非正值(负值,0,NaN)
        df_new = df_new[df_new["day_heat"] > 0]
        logger.info(f"排除非正值[df_new][day_heat>0]:\n {df_new}")
        logger.info(f"排除非正值[数据列表][df_new][day_heat][to_numpy()]:\n {df_new['day_heat'].to_numpy()}")

        #
        # 添加到数据列表:待循环完成进行数据合并
        list_all_data.append(df_new)

        #
        # 暂停5秒 TODO time.sleep(5)
        TimeSleepUtil.sleep(seconds=5)
        pass  # for <-

    #
    # 打印合并前的数据
    logger.info(f"合并数据前[list_all_data]: \n {list_all_data}")
    TimeSleepUtil.sleep(seconds=10)  # TODO time.sleep(10)

    #
    # 合并数据: 存在相同的KEY值
    total_data = pd.concat(list_all_data)

    #
    # 打印合并后的数据
    logger.info(f"合并数据后[total_data]: \n {total_data}")
    logger.info(f"合并数据后[total_data][keys]: \n {total_data.keys()}")
    logger.info(f"合并数据后[total_data][index]: \n {total_data.index}")
    # logger.info(f"合并数据后[total_data][index][to_numpy()]: \n {total_data.index.to_numpy()}")
    TimeSleepUtil.sleep(seconds=10)  # TODO time.sleep(10)
    # 返回数据
    return total_data
    pass


if __name__ == '__main__':
    # process_data_each_day_calculate(data=[])
    pass
