"""
Author: 'silencesoup' 'silencesoup@outlook.com'
Date: 2024-12-03 13:42:31
LastEditors: 'silencesoup' 'silencesoup@outlook.com'
LastEditTime: 2024-12-25 15:34:23
FilePath: \neimeng_2024\data_source.py
Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
"""

import pandas as pd
from hydroevaluate.dataloader.data_source import CustomDataSourceForTorchHydro
import xarray as xr


class PastDataSource(CustomDataSourceForTorchHydro):
    """
    Custom implementation of SelfMadeDataSource that provides fake data with required attributes.
    """

    def __init__(self, tp_data=None):
        super().__init__()
        self.tp_data = tp_data
        # some functions should be related to data_cfgs, here is just an example

    def read_ts_xrdataset(self, gage_id_lst, t_range, var_lst, time_units):
        # 读取数据并选择指定的流域和变量
        tp_data = self.tp_data
        time_unit = time_units[0]
        basin = gage_id_lst[0]
        if "3h" in time_units[0]:
            aggregated_df = agg_rain_data(tp_data, t_range, time_unit)
            aggregated_ds = xr.Dataset(
                {
                    "tp": (
                        ["time", "basin"],
                        aggregated_df["tp"].values[:, None],
                    )  # Define variable tp with time and basin dimensions
                },
                coords={
                    "time": aggregated_df["time"],  # Define time coordinate
                    "basin": [basin],  # Define basin coordinate with one value
                },
            )
            # 返回一个包含 time_units 键的字典
            result = {time_units[0]: aggregated_ds}
        elif "1D" in time_units[0]:
            aggregated_df = agg_rain_data(tp_data, t_range, time_unit)
            aggregated_ds = xr.Dataset(
                {
                    "tp": (
                        ["time", "basin"],
                        aggregated_df["tp"].values[:, None],
                    )  # Define variable tp with time and basin dimensions
                },
                coords={
                    "time": aggregated_df["time"],  # Define time coordinate
                    "basin": [basin],  # Define basin coordinate with one value
                },
            )
            # 返回一个包含 time_units 键的字典
            result = {time_units[0]: aggregated_ds}

        return result

    def read_attr_xrdataset(self, gage_id_lst, var_lst):
        return xr.open_dataset("data/attributes.nc").sel(basin=gage_id_lst)[var_lst]

    def read_mean_prcp(self, gage_id_lst=None, unit="mm/3h"):
        """read mean precipitation of each basin
        default unit is mm/d, but one can chose other units and we will convert the unit to the specified unit

        Parameters
        ----------
        gage_id_lst : list, optional
            the list of gage ids, by default None
        unit : str, optional
            the unit of precipitation, by default "mm/d"

        Returns
        -------
        xr.Dataset
            the mean precipitation of each basin
        """
        pre_mm_syr = xr.open_dataset("data/attributes.nc").sel(basin=gage_id_lst)
        converted_data = pre_mm_syr["pre_mm_syr"] / (8760 / 3)
        converted_data.attrs["units"] = unit
        pre_mm_syr["pre_mm_syr"] = converted_data
        pre_mm_syr = pre_mm_syr[["pre_mm_syr"]]
        return pre_mm_syr


def agg_rain_data(tp_data, t_range, time_unit):
    formatted_time_list = [
        pd.to_datetime(time[:10] + "T" + time[11:], format="%Y-%m-%dT%H")
        for time in t_range
    ]
    freq = "h" if time_unit == "3h" else "D"
    time_range = pd.date_range(
        start=formatted_time_list[0], end=formatted_time_list[1], freq=freq
    )
    df = pd.DataFrame({"time": time_range, "tp": 0})  # Set all tp values to 0
    tp_df = pd.DataFrame(tp_data)
    tp_df["time"] = pd.to_datetime(tp_df["time"])
    df["tp"] = df["tp"].astype(float)
    tp_df["tp"] = tp_df["tp"].astype(float)
    df.set_index("time", inplace=True)
    tp_df.set_index("time", inplace=True)
    if freq == "D":
        tp_df = tp_df.resample("D").sum(numeric_only=True)
    df.update(tp_df["tp"])  # Update tp values where time matches
    df.reset_index(inplace=True)  # Reset index to restore original structure
    df["time_group"] = (df["time"] - df["time"].min()) // pd.Timedelta(
        time_unit
    )  # Create groups

    # Aggregate while keeping the first time in each group
    aggregated_df = (
        df.groupby("time_group")
        .agg(
            {
                "time": "first",  # Use the first time as the group time
                "tp": "sum",  # Sum tp values in each group
            }
        )
        .reset_index(drop=True)
    )
    return aggregated_df
