# -*- coding:utf-8 -*-
import json
import os
import math
from math import sqrt
import itertools
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
import torch
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from torch.utils.data import Dataset, DataLoader
from scipy.spatial import distance
from gwlsa_settings import net_params
from utils.landslide_utils import load_df_fromfile

r"""
The package of `datasets` includes the following functions:
    1. init_dataset: initialize the dataset for training, validation and testing
    2. BasicDistance: calculate the distance matrix of spatial/spatio-temporal data
    3. baseDataset: the base class of dataset
"""

def calc_max_neighbours(buffer_distance, resolution):
    '''
        根据缓冲区距离buffer_distance和空间分辨率resolution计算最大的邻居数
    '''
    max_num_neighbours = 0
    magnification = math.ceil(buffer_distance / resolution)  # 距离对分辨率的倍数，向上取整
    list_x = [x for x in range(-magnification, magnification+1)]
    list_y = [y for y in range(-magnification, magnification+1)]
    # 计算笛卡尔积
    cartesian_product = list(itertools.product(list_x, list_y))
    points_data = {
        'x': [d[0] for d in cartesian_product],
        'y': [d[1] for d in cartesian_product]
    }
    # 创建 GeoDataFrame
    geometry = [Point(xy) for xy in cartesian_product]
    gdf = gpd.GeoDataFrame(points_data, geometry=geometry)

    # 圆的中心点和半径
    circle_center = Point(0, 0)
    radius = buffer_distance / resolution  # 半径为距离相对于分辨率的倍数，可以是小数

    # 创建圆,希望边界上的点也被认为是在缓冲区内，可以稍微增加缓冲区的大小以确保边界上的点被包含。
    circle = circle_center.buffer(radius + 0.01)
    # 判断点是否在圆内
    gdf['in_circle'] = gdf['geometry'].apply(lambda geom: circle.contains(geom))

    # 统计在圆内的点的数量
    count_in_circle = (gdf['in_circle'] == True).sum()

    max_num_neighbours = count_in_circle
    return max_num_neighbours


def find_neighbours(target_x_utm, target_y_utm, gdf, radius_utm=127.28):
    '''
        根据半径，找到查询点的邻居。
    Parameters
    ----------
    target_x_utm: float
        查询点x坐标，utm坐标系
    target_y_utm: float
        查询点y坐标，utm坐标系
    gdf: GeoDataFrame
        原始数据GeoDataFrame
    radius_utm: float
        默认缓冲区半径：√2 * 90米 ≈ 127.28米，根据实际情况调整。缓冲区越大，包含的邻居点越多。
    Returns
    -------
    DataFrame
        返回一个包含所有邻居数据的DataFrame
    '''
    # gdf = gdf.set_index('id', drop=False)
    # 创建目标点的缓冲区（在UTM坐标系中）
    target_point_utm = Point(target_x_utm, target_y_utm)
    buffered_point_utm = target_point_utm.buffer(radius_utm+0.01)

    # 使用空间索引查询邻居点
    # 注意：这里假设gdf的坐标参考系统已经是UTM
    neighbors = gdf[gdf.intersects(buffered_point_utm)]
    return neighbors

def split_train_val_test(gdf, test_ratio=0.2, val_ratio=0.2, sample_seed=42):
    '''
        将GeoDataFrame分割数据为训练集、验证集和测试集
    '''
    # 初始化 StratifiedShuffleSplit
    sss = StratifiedShuffleSplit(n_splits=1, test_size=test_ratio, random_state=sample_seed)
    # 获取标签列
    labels = gdf[net_params['y_column_name']]
    # 将数据集分割为训练+验证集和测试集
    for train_val_index, test_index in sss.split(gdf, labels):
        train_val_df = gdf.iloc[train_val_index].reset_index(drop=True)
        test_df = gdf.iloc[test_index].reset_index(drop=True)
    # 显示分割后的类别分布
    # print("\nInitial train+validation set class distribution:")
    # print(train_val_df[net_params['y_column_name']].value_counts())
    # print("\nTest set class distribution:")
    # print(test_df[net_params['y_column_name']].value_counts())

    # 初始化 StratifiedShuffleSplit 再次用于分割训练+验证集
    sss_val = StratifiedShuffleSplit(n_splits=1, test_size=val_ratio / (1 - test_ratio), random_state=sample_seed)
    # 从训练+验证集中分割出验证集
    for train_index, val_index in sss_val.split(train_val_df, train_val_df[net_params['y_column_name']]):
        train_df = train_val_df.iloc[train_index].reset_index(drop=True)
        val_df = train_val_df.iloc[val_index].reset_index(drop=True)
    # 显示最终分割后的类别分布
    # print("\nFinal training set class distribution:")
    # print(train_df[net_params['y_column_name']].value_counts())
    # print("\nFinal validation set class distribution:")
    # print(val_df[net_params['y_column_name']].value_counts())
    # print("\nFinal test set class distribution:")
    # print(test_df[net_params['y_column_name']].value_counts())

    return train_df, val_df, test_df


class baseDataset(Dataset):
    r"""
    baseDataset is the base class of dataset, which is used to store the data and other information.
    it also provides the function of data scaling, data saving and data loading.

    Parameters
    ----------
    data: geopandas.GeoDataFrame
        geodataframe，可以是训练数据集、验证数据集或测试数据集。注意，这里的data是经过了缩放的
    x_columns: list
        independent variable column name
    y_column: list
        dependent variable column name
    id_column: str
        id column name
    """

    def __init__(self, data=None, x_columns: list = None, y_column: list = None, id_column=None,
                 spatial_columns=None, buffer_distance=127.28, resolution=90):
        self.x_columns = x_columns
        self.y_column = y_column
        self.spatial_columns = spatial_columns

        self.id = id_column[0] if type(id_column)==list else id_column
        self.buffer_distance = buffer_distance
        # 根据缓冲区距离buffer_distance和空间分辨率resolution计算最大的邻居数
        self.max_num_neighbours = calc_max_neighbours(buffer_distance, resolution)
        # print(f'self.max_num_neighbours:{self.max_num_neighbours}')
        # 将self.id列设置为索引的同时，在数据区域中保留它
        self.dataframe = data    # 注意，这里的data是经过了缩放的

        if data is None:
            self.x_data = None
            self.datasize = -1
            self.coefsize = -1
            self.y_data = None
            self.id_data = None
        else:
            self.x_data = data[x_columns].astype(np.float32).values  # x_data is independent variables data
            self.datasize = self.x_data.shape[0]  # datasize is the number of samples
            self.coefsize = len(x_columns) + 1  # coefsize is the number of coefficients， 系数里面有bias，因此要加1
            self.y_data = data[y_column].astype(np.float32).values  # y_data is dependent variables data
            if id_column is not None:
                self.id_data = data[self.id].astype(np.int64).values
            else:
                raise ValueError("id_column is None!")
            if spatial_columns is None:
                raise ValueError("spatial_columns is None!")

        self.batch_size = None
        self.shuffle = None

    def __len__(self):
        """
        :return: the number of samples
        """
        return self.dataframe.shape[0]

    def __getitem__(self, index):
        """
        :param index: the index of sample
        :return: the index-th distance matrix and the index-th sample
        """
        df_neighbours_X = self.dataframe.loc[index]['neighbours'][self.x_columns+['xb']]
        arr_neighbours_X = df_neighbours_X.values
        arr_distances = self.dataframe.loc[index]['distances']['distance'].values

        # 返回的y值只需要1个，并不需要将邻居的y值返回
        cur_y = self.dataframe.loc[index][self.y_column]
        cur_id = self.dataframe.loc[index][self.id]
        arr_cur_x = self.dataframe.loc[index][self.x_columns+['xb']].values.astype(float) # 给当前样本点的X添加偏置列bias, bias的值为1.0, xb为偏置列

        return (torch.tensor(arr_distances, dtype=torch.float),
                torch.tensor(arr_neighbours_X, dtype=torch.float),
                torch.tensor(arr_cur_x, dtype=torch.float),
                torch.tensor(cur_y, dtype=torch.float),
                torch.tensor(cur_id, dtype=torch.float))
    def scale(self, scale_fn=None, scale_params=None):
        """
        scale the data by MinMaxScaler or StandardScaler
        | the scale function will scale the independent variable data and add a column of 1 to the data

        Parameters
        ----------
        scale_fn: str
            scale function name
            | if ``minmax_scale``, use MinMaxScaler
            | if ``standard_scale``, use StandardScaler
        scale_params: list
            scaler with scale parameters
            | if ``minmax_scale``, scale_params is a list of MinMaxScaler
            | if ``standard_scale``, scale_params is a list of StandardScaler

        """
        if scale_fn == "minmax_scale":
            self.scale_fn = "minmax_scale"
            x_scale_params = scale_params[0]
            y_scale_params = scale_params[1]
            self.x_scale_info = {"min": x_scale_params.data_min_, "max": x_scale_params.data_max_}
            self.x_data = x_scale_params.transform(pd.DataFrame(self.x_data, columns=self.x))
            self.y_scale_info = {"min": y_scale_params.data_min_, "max": y_scale_params.data_max_}
        elif scale_fn == "standard_scale":
            self.scale_fn = "standard_scale"
            x_scale_params = scale_params[0]
            y_scale_params = scale_params[1]
            self.x_scale_info = {"mean": x_scale_params.mean_, "var": x_scale_params.var_}
            self.x_data = x_scale_params.transform(pd.DataFrame(self.x_data, columns=self.x))
            self.y_scale_info = {"mean": y_scale_params.mean_, "var": y_scale_params.var_}

        self.getScaledDataframe()

        self.x_data = np.concatenate((self.x_data, np.ones(
            (self.datasize, 1))), axis=1)

    def scale2(self, scale_fn, scale_params):
        """
        scale the data with the scale function and scale parameters

        Parameters
        ----------
        scale_fn: str
            scale function name
            | if ``minmax_scale``, use MinMaxScaler
            | if ``standard_scale``, use StandardScaler
        scale_params: list
            scaler with scale parameters
            | if ``minmax_scale``, scale_params is a list of dict with ``min`` and ``max``
            | if ``standard_scale``, scale_params is a list of dict with ``mean`` and ``var``
        """
        if scale_fn == "minmax_scale":
            self.scale_fn = "minmax_scale"
            x_scale_params = scale_params[0]
            y_scale_params = scale_params[1]
            self.x_data = (self.x_data - x_scale_params["min"]) / (x_scale_params["max"] - x_scale_params["min"])
        elif scale_fn == "standard_scale":
            self.scale_fn = "standard_scale"
            x_scale_params = scale_params[0]
            y_scale_params = scale_params[1]
            self.x_data = (self.x_data - x_scale_params['mean']) / np.sqrt(x_scale_params["var"])

        self.getScaledDataframe()

        self.x_data = np.concatenate((self.x_data, np.ones((self.datasize, 1))), axis=1)

    def getScaledDataframe(self):
        """
        get the scaled dataframe and save it in ``scaledDataframe``
        """
        columns = np.concatenate((self.x, self.y), axis=0)
        scaledData = np.concatenate((self.x_data, self.y_data), axis=1)
        self.scaledDataframe = pd.DataFrame(scaledData, columns=columns)

    def rescale(self, x):
        """
        rescale the data with the scale function and scale parameters

        Parameters
        ----------
        x: numpy.ndarray
            independent variable data
        y: numpy.ndarray
            dependent variable data

        Returns
        -------
        x: numpy.ndarray
            rescaled independent variable data
        y: numpy.ndarray
            rescaled dependent variable data
        """
        if self.scale_fn == "minmax_scale":
            x = np.multiply(x, self.x_scale_info["max"] - self.x_scale_info["min"]) + self.x_scale_info["min"]
        elif self.scale_fn == "standard_scale":
            x = np.multiply(x, np.sqrt(self.x_scale_info["var"])) + self.x_scale_info["mean"]
        else:
            raise ValueError("invalid process_fn")
        return x


    def save(self, dirname, prefix, save_distance=True):
        """
        save the dataset

        :param dirname: save directory
        """
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if self.dataframe is None:
            raise ValueError("dataframe is None")
        x_scale_info = {}
        y_scale_info = {}
        for key, value in self.x_scale_info.items():
            x_scale_info[key] = value.tolist()
        for key, value in self.y_scale_info.items():
            y_scale_info[key] = value.tolist()
        with open(os.path.join(dirname, "dataset_info.json"), "w") as f:
            distance_scale_info = {}
            for key in self.distances_scale_param.keys():
                distance_scale_info[key] = self.distances_scale_param[key].tolist()
            json.dump({"x": self.x,
                       "y": self.y,
                       "id": self.id,
                       "batch_size": self.batch_size,
                       "shuffle": self.shuffle,
                       "is_need_STNN": self.is_need_STNN,
                       "scale_fn": self.scale_fn,
                       "x_scale_info": json.dumps(x_scale_info),
                       "y_scale_info": json.dumps(y_scale_info),
                       "distance_scale_info": json.dumps(distance_scale_info),
                       'simple_distance': self.simple_distance
                       }, f)
        # save the distance matrix
        if save_distance:
            np.save(os.path.join(dirname, "{0}_distances.npy".format(prefix)), self.distances)
        # save dataframe
        self.dataframe.to_csv(os.path.join(dirname, "{0}_dataframe.csv".format(prefix)), index=False)
        self.scaledDataframe.to_csv(os.path.join(dirname, "{0}_scaledDataframe.csv".format(prefix)), index=False)

    def read(self, dirname):
        """
        read the dataset by the directory

        :param dirname: read directory
        """
        if not os.path.exists(dirname):
            raise ValueError("dir is not exists")
        # read the information of dataset
        with open(os.path.join(dirname, "dataset_info.json"), "r") as f:
            dataset_info = json.load(f)
        self.x = dataset_info["x"]
        self.y = dataset_info["y"]
        self.id = dataset_info["id"]
        self.batch_size = dataset_info["batch_size"]
        self.shuffle = dataset_info["shuffle"]
        self.is_need_STNN = dataset_info["is_need_STNN"]
        self.scale_fn = dataset_info["scale_fn"]
        self.simple_distance = dataset_info["simple_distance"]
        self.x_scale_info = json.loads(dataset_info["x_scale_info"])
        self.y_scale_info = json.loads(dataset_info["y_scale_info"])
        self.distances_scale_param = json.loads(dataset_info["distance_scale_info"])
        x_scale_info = self.x_scale_info
        y_scale_info = self.y_scale_info
        for key, value in x_scale_info.items():
            x_scale_info[key] = np.array(value)
        for key, value in y_scale_info.items():
            y_scale_info[key] = np.array(value)
        # read the distance matrix
        self.distances = np.load(os.path.join(dirname, "distances.npy")).astype(np.float32)
        # read dataframe
        self.dataframe = pd.read_csv(os.path.join(dirname, "dataframe.csv"))
        self.x_data = self.dataframe[self.x].astype(np.float32).values
        self.datasize = self.x_data.shape[0]
        self.y_data = self.dataframe[self.y].astype(np.float32).values
        self.id_data = self.dataframe[self.id].astype(np.int64).values
        self.coefsize = len(self.x) + 1
        self.scale2(self.scale_fn, [self.x_scale_info, self.y_scale_info])


class predictDataset(Dataset):
    """
    Predict dataset is used to predict the dependent variable of the data.

    :param data: dataframe,原始数据
    :param x_column: independent variable column name
    :param process_fn: process function name
    :param scale_info: process function parameters
    :param is_need_STNN: whether to need STNN
    :param spatial_columns: 指示空间坐标（x,y）的列
    """

    def __init__(self, data, x_column, process_fn="minmax_scale", scale_info=None, is_need_STNN=False, spatial_columns=None):

        # data = data.astype(np.float32)
        if scale_info is None:
            scale_info = []
        self.dataframe = data
        self.x = x_column
        self.spatial_columns = spatial_columns
        if spatial_columns is not None:
            self.pos_data = data[spatial_columns].astype(np.float32).values
        else:
            raise ValueError("spatial_columns is None")
        if data is None:
            self.x_data = None
            self.datasize = -1
            self.coefsize = -1
        else:
            self.x_data = data[x_column].astype(np.float32).values  # x_data is independent variables data
            self.datasize = self.x_data.shape[0]  # datasize is the number of samples
            self.coefsize = len(x_column) + 1  # coefsize is the number of coefficients
        self.is_need_STNN = is_need_STNN
        self.process_fn = process_fn
        if len(scale_info):
            self.scale_info_x = scale_info[0]  # scale information of x_data
            self.use_scale_info = True
        else:
            self.use_scale_info = False
        # 数据预处理
        if process_fn == "minmax_scale":
            self.scale_fn = "minmax_scale"
            # stander = MinMaxScaler()
            # self.x_data = stander.fit_transform(self.x_data)
            if self.use_scale_info:
                self.x_data = self.minmax_scaler(self.x_data, self.scale_info_x[0], self.scale_info_x[1])
            else:
                self.x_data = self.minmax_scaler(self.x_data)
        elif process_fn == "standard_scale":
            self.scale_fn = "standard_scale"
            # stander = StandardScaler()
            # self.x_data = stander.fit_transform(self.x_data)
            if self.use_scale_info:
                self.x_data = self.standard_scaler(self.x_data, self.scale_info_x[0], self.scale_info_x[1])
            else:
                self.x_data = self.standard_scaler(self.x_data)

        else:
            raise ValueError("invalid process_fn")

        self.x_data = np.concatenate((self.x_data, np.ones(
            (self.datasize, 1))), axis=1)

        self.distances = None
        self.temporal = None

    def __len__(self):
        """
        :return: the number of samples
        """
        return len(self.x_data)

    def __getitem__(self, index):
        """
        :param index: sample index
        :return: distance matrix and independent variable data and dependent variable data
        """
        # if self.is_need_STNN:
        #     return torch.cat((torch.tensor(self.distances[index], dtype=torch.float),
        #                       torch.tensor(self.temporal[index], dtype=torch.float)), dim=-1), torch.tensor(
        #         self.x_data[index], dtype=torch.float)
        # 当前点与其他各个点的距离
        pt = self.pos_data[index]
        pt = pt.reshape((-1, pt.shape[0]))  # pt.shape为：(1,2)
        all_pts = self.pos_data  # all_pts.shape: (batch_size, 2)
        pt_to_other_distances = BasicDistance(pt, all_pts)
        # 对距离矩阵（邻近）归一化
        distance_scale = MinMaxScaler()
        pt_to_other_distances = distance_scale.fit_transform(
            pt_to_other_distances.reshape(-1, pt_to_other_distances.shape[-1])).reshape(
            pt_to_other_distances.shape)  # 维度为: (1, samples)
        pt_to_other_distances = np.squeeze(pt_to_other_distances, axis=0)  # 现在维度为：(samples, )
        return (torch.tensor(pt_to_other_distances, dtype=torch.float),
                torch.tensor(self.x_data[index], dtype=torch.float))

    def rescale(self, x):
        """
        rescale the attribute data

        :param x: Input attribute data
        :return: rescaled attribute data
        """
        if self.scale_fn == "minmax_scale":
            x = x * (self.scale_info_x[1] - self.scale_info_x[0]) + self.scale_info_x[0]
        elif self.scale_fn == "standard_scale":
            x = x * np.sqrt(self.scale_info_x[1]) + self.scale_info_x[0]
        else:
            raise ValueError("invalid process_fn")

        return x

    def minmax_scaler(self, x, min=None, max=None):
        """
        function of minmax scaler

        :param x: Input attribute data
        :param min: minimum value of each attribute
        :param max: maximum value of each attribute
        :return: Output attribute data
        """
        if max is None:
            max = []
        if min is None:
            min = []
        if len(min) == 0:
            x = (x - x.min(axis=0)) / (x.max(axis=0) - x.min(axis=0))
        else:
            x = (x - min) / (max - min)
        return x

    def standard_scaler(self, x, mean=None, std=None):
        """
        function of standard scaler

        :param x: Input attribute data
        :param mean: mean value of each attribute
        :param std: standard deviation of each attribute
        :return: Output attribute data
        """
        if std is None:
            std = []
        if mean is None:
            mean = []
        if len(mean) == 0:
            x = (x - x.mean(axis=0)) / x.std(axis=0)
        else:
            x = (x - mean) / std
        return x


def BasicDistance(x, y):
    """
    Calculate the distance between two points

    :param x: Input point coordinate data
    :param y: Input target point coordinate data
    :return: distance matrix
    """
    x = np.float32(x)
    y = np.float32(y)
    dist = distance.cdist(x, y, 'euclidean')
    return dist

def LocalDistance(p1_arr, p2_arr, max_distance=60, resolution=30):
    """
    Calculate the distance between two points

    :param p1_arr: Input point coordinate data
    :param p2_arr: Input target point coordinate data
    :return: distance matrix
    """
    p1_arr = np.float32(p1_arr)
    p2_arr = np.float32(p2_arr)
    nb = len(p1_arr)
    dist = np.zeros((nb, nb), dtype=float)
    for idx1, pt1 in enumerate(p1_arr):
        for idx2, pt2 in enumerate(p2_arr):
            x1 = pt1[0]
            x2 = pt2[0]
            y1 = pt1[1]
            y2 = pt2[1]
            if abs(x1 - x2) > (max_distance / resolution) * sqrt(2) * resolution:
                dist[idx1, idx2] = 0
            else:
                sum = (x1 - x2) ** 2 + (y1 - y2) ** 2
                if sum!=0.0:
                    dist[idx1, idx2] = 1.0 / sum
                else:
                    dist[idx1, idx2] = 1.0
    return dist


def Manhattan_distance(x, y):
    """
    Calculate the Manhattan distance between two points

    :param x: Input point coordinate data
    :param y: Input target point coordinate data
    :return: distance matrix
    """
    return np.float32(np.sum(np.abs(x[:, np.newaxis, :] - y), axis=2))


def init_dataset(data_dir, x_column, y_column, spatial_column=None,
                 id_column=None, sample_seed=42,
                 batch_size=32, shuffle=True,
                 use_class=baseDataset,
                 max_val_size=-1, max_test_size=-1,
                 buffer_distance=127.28, resolution=90, num_works=0,print_scale_info=False):
    """
    Initialize the dataset and return the training set, validation set and test set for the model

    :param data: the dir where train, val and test data exists
    :param test_ratio: test data ratio
    :param valid_ratio: valid data ratio
    :param x_column: input attribute column name
    :param y_column: output attribute column name
    :param spatial_column: spatial attribute column name
    :param temp_column: temporal attribute column name
    :param id_column: id column name
    :param sample_seed: random seed
    :param process_fn: data pre-process function
    :param batch_size: batch size
    :param max_val_size: max valid data size in one injection
    :param max_test_size: max test data size in one injection
    :param shuffle: shuffle data
    :param use_class: dataset class
    :param spatial_fun: spatial distance calculate function
    :param temporal_fun: temporal distance calculate function
    :param from_for_cv: the start index of the data for cross validation
    :param is_need_STNN: whether to use STNN
    :param Reference: reference points to calculate the distance，计算距离的参考DataFrame，本代码中作废
    :param simple_distance: whether to use simple distance function to calculate the distance
    :param num_works: DataLoader使用的worker数量，默认为0
    :param print_scale_info: 是否打印数据缩放信息，默认为False不打印
    :return: train dataset, valid dataset, test dataset
    """
    # if not isinstance(data, gpd.GeoDataFrame):
    #     raise ValueError("whole data must be a geoPandas.GeoDataFrame")


    if spatial_column is None:
        # if dist_column is None, raise error
        raise ValueError(
            "dist_column must be a column name in data")

    np.random.seed(sample_seed)

    train_df, val_df, test_df = load_df_fromfile(data_dir, buffer_distance, resolution, file_format=net_params['data_load_format'])

    # Use the parameters of the dataset to normalize the train_dataset, val_dataset, and test_dataset
    train_dataset = use_class(train_df, x_column, y_column, id_column,
                              spatial_columns=spatial_column, buffer_distance=buffer_distance, resolution=resolution)
    val_dataset = use_class(val_df, x_column, y_column, id_column,
                            spatial_columns=spatial_column, buffer_distance=buffer_distance, resolution=resolution)
    test_dataset = use_class(test_df, x_column, y_column, id_column,
                             spatial_columns=spatial_column, buffer_distance=buffer_distance, resolution=resolution)


    train_dataset.spatial_column = val_dataset.spatial_column = test_dataset.spatial_column = spatial_column
    train_dataset.x_column = val_dataset.x_column = test_dataset.x_column = x_column
    train_dataset.y_column = val_dataset.y_column = test_dataset.y_column = y_column

    # initialize dataloader for train/val/test dataset
    if max_val_size < 0:
        max_val_size = len(val_dataset)
    if max_test_size < 0:
        max_test_size = len(test_dataset)

    if num_works>0:
        is_pin_memory = True
        prefet_factor = 1
        pers_workers = True
    else:
        is_pin_memory = False
        prefet_factor = 2   # pytorch 2.8，prefet_factor的默认值为2，设置为None会报错
        pers_workers = False    # num_workers=0时，persistent_workers必须为False
    train_dataset.dataloader = DataLoader(
        train_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_works, pin_memory=is_pin_memory, persistent_workers=pers_workers, prefetch_factor=prefet_factor)
    val_dataset.dataloader = DataLoader(
        val_dataset, batch_size=max_val_size, shuffle=False, num_workers=num_works, pin_memory=is_pin_memory, persistent_workers=pers_workers, prefetch_factor=prefet_factor)
    test_dataset.dataloader = DataLoader(
        test_dataset, batch_size=max_test_size, shuffle=False, num_workers=num_works, pin_memory=is_pin_memory, persistent_workers=pers_workers, prefetch_factor=prefet_factor)
    train_dataset.batch_size, train_dataset.shuffle = batch_size, shuffle
    val_dataset.batch_size, val_dataset.shuffle = max_val_size, False
    test_dataset.batch_size, test_dataset.shuffle = max_test_size, False
    return train_dataset, val_dataset, test_dataset


def init_dataset_cv(data, test_ratio, k_fold, x_column, y_column, spatial_column=None, temp_column=None,
                    id_column=None,
                    sample_seed=100,
                    process_fn="minmax_scale", batch_size=32, shuffle=True, use_class=baseDataset,
                    spatial_fun=BasicDistance, temporal_fun=Manhattan_distance, max_val_size=-1, max_test_size=-1,
                    is_need_STNN=False, Reference=None, simple_distance=True):
    """
    initialize dataset for cross validation


    :param data: input data
    :param test_ratio: test set ratio
    :param k_fold:  k of k-fold
    :param x_column: attribute column name
    :param y_column: label column name
    :param spatial_column: spatial distance column name
    :param temp_column: temporal distance column name
    :param id_column: id column name
    :param sample_seed: random seed
    :param process_fn: data process function
    :param batch_size: batch size
    :param shuffle: shuffle or not
    :param use_class: dataset class
    :param spatial_fun: spatial distance calculate function
    :param temporal_fun: temporal distance calculate function
    :param max_val_size: validation set size
    :param max_test_size: test set size
    :param is_need_STNN: whether need STNN
    :param Reference: reference data
    :param simple_distance: is simple distance
    :return: cv_data_set, test_dataset
    """
    cv_data_set = []
    valid_ratio = (1 - test_ratio) / k_fold
    test_dataset = None
    for i in range(k_fold):
        train_dataset, val_dataset, test_dataset = init_dataset(data, test_ratio, valid_ratio, x_column, y_column,
                                                                spatial_column,
                                                                temp_column,
                                                                id_column,
                                                                sample_seed,
                                                                process_fn, batch_size, shuffle, use_class,
                                                                spatial_fun, temporal_fun, max_val_size, max_test_size,
                                                                i, is_need_STNN, Reference, simple_distance)
        cv_data_set.append((train_dataset, val_dataset))
    return cv_data_set, test_dataset


def init_predict_dataset(data, train_dataset, x_column, spatial_column=None, temp_column=None,
                         process_fn="minmax_scale", scale_sync=True, use_class=predictDataset,
                         spatial_fun=BasicDistance, temporal_fun=Manhattan_distance, max_size=-1, is_need_STNN=False):
    """
    initialize predict dataset

    :param data: input data
    :param train_dataset: train data
    :param x_column: attribute column name
    :param spatial_column: spatial distance column name
    :param temp_column: temporal distance column name
    :param process_fn: data process function
    :param scale_sync: scale sync or not
    :param max_size: max size of predict dataset
    :param use_class: dataset class
    :param spatial_fun: spatial distance calculate function
    :param temporal_fun: temporal distance calculate function
    :param is_need_STNN: is need STNN or not
    :return: predict_dataset
    """
    if spatial_fun is None:
        # if dist_fun is None, raise error
        raise ValueError(
            "dist_fun must be a function that can process the data")

    if spatial_column is None:
        # if dist_column is None, raise error
        raise ValueError(
            "dist_column must be a column name in data")

    # initialize the predict_dataset
    if train_dataset.scale_fn == "minmax_scale":
        process_params = [[train_dataset.x_scale_info['min'], train_dataset.x_scale_info['max']]]
    elif train_dataset.scale_fn == "standard_scale":
        process_params = [[train_dataset.x_scale_info['mean'], train_dataset.x_scale_info['std']]]
    else:
        raise ValueError("scale_fn must be minmax_scale or standard_scale")
    # print("ProcessParams:",process_params)
    if scale_sync:
        predict_dataset = use_class(data=data, x_column=x_column, process_fn=process_fn, scale_info=process_params,
                                    is_need_STNN=is_need_STNN)
    else:
        predict_dataset = use_class(data=data, x_column=x_column, process_fn=process_fn, is_need_STNN=is_need_STNN)

    # train_data = train_dataset.dataframe
    reference_data = train_dataset.reference

    if not is_need_STNN:
        # if not use STNN, calculate spatial/temporal distance matrix and concatenate them
        if train_dataset.simple_distance:
            predict_dataset.distances = spatial_fun(
                data[spatial_column].values, reference_data[spatial_column].values)

            if temp_column is not None:
                # if temp_column is not None, calculate temporal distance matrix
                predict_dataset.temporal = temporal_fun(
                    data[temp_column].values, reference_data[temp_column].values)

                predict_dataset.distances = np.concatenate(
                    (predict_dataset.distances[:, :, np.newaxis], predict_dataset.temporal[:, :, np.newaxis]),
                    axis=2)  # concatenate spatial and temporal distance matrix
        else:
            predict_dataset.distances = np.repeat(data[spatial_column].values[:, np.newaxis, :],
                                                  len(reference_data),
                                                  axis=1)
            predict_temp_distance = np.repeat(reference_data[spatial_column].values[:, np.newaxis, :],
                                              predict_dataset.datasize,
                                              axis=1)
            predict_dataset.distances = np.concatenate(
                (predict_dataset.distances, np.transpose(predict_temp_distance, (1, 0, 2))), axis=2)

            if temp_column is not None:
                predict_dataset.temporal = np.repeat(data[temp_column].values[:, np.newaxis, :],
                                                     len(reference_data),
                                                     axis=1)
                predict_temp_temporal = np.repeat(reference_data[temp_column].values[:, np.newaxis, :],
                                                  predict_dataset.datasize,
                                                  axis=1)
                predict_dataset.temporal = np.concatenate(
                    (predict_dataset.temporal, np.transpose(predict_temp_temporal, (1, 0, 2))), axis=2)
            predict_dataset.distances = np.concatenate(
                (predict_dataset.distances, predict_dataset.temporal), axis=2)

    else:
        # if use STNN, calculate spatial/temporal point matrix
        # spatial distances matrix
        predict_dataset.distances = np.repeat(data[spatial_column].values[:, np.newaxis, :], len(reference_data),
                                              axis=1)
        predict_temp_distance = np.repeat(reference_data[spatial_column].values[:, np.newaxis, :],
                                          predict_dataset.datasize,
                                          axis=1)
        predict_dataset.distances = np.concatenate(
            (predict_dataset.distances, np.transpose(predict_temp_distance, (1, 0, 2))), axis=2)

        # temporal distances matrix
        if temp_column is not None:
            predict_dataset.temporal = np.repeat(data[temp_column].values[:, np.newaxis, :], len(reference_data),
                                                 axis=1)
            predict_temp_temporal = np.repeat(reference_data[temp_column].values[:, np.newaxis, :],
                                              predict_dataset.datasize,
                                              axis=1)
            predict_dataset.temporal = np.concatenate(
                (predict_dataset.temporal, np.transpose(predict_temp_temporal, (1, 0, 2))), axis=2)
    if process_fn == "minmax_scale":
        predict_dataset.distances = predict_dataset.minmax_scaler(predict_dataset.distances,
                                                                  train_dataset.distances_scale_param['min'],
                                                                  train_dataset.distances_scale_param['max'])
    else:
        predict_dataset.distances = predict_dataset.standard_scaler(predict_dataset.distances,
                                                                    train_dataset.distances_scale_param['mean'],
                                                                    train_dataset.distances_scale_param['var'])
    # initialize dataloader for train/val/test dataset
    if max_size < 0:
        max_size = len(predict_dataset)
    predict_dataset.dataloader = DataLoader(
        predict_dataset, batch_size=max_size, shuffle=False)

    return predict_dataset


def load_dataset(directory, use_class=baseDataset):
    dataset = use_class()
    dataset.read(directory)
    dataset.dataloader = DataLoader(dataset, batch_size=dataset.batch_size, shuffle=dataset.shuffle)
    return dataset
