import os
import numpy as np
import mat73
import torch
import scipy.io
import glob
from torch.utils.data import Dataset, DataLoader, DistributedSampler
from sklearn.model_selection import train_test_split


# Define a function to perform z-score normalization on the data
def zscore_norm(data):
    # Calculate the mean and standard deviation for each channel in each batch
    mean = torch.mean(data, dim=(1, 2))
    std = torch.std(data, dim=(1, 2))

    # Subtract the mean from each channel in each batch and divide by the standard deviation
    norm_data = (data - mean[:, None, None]) / std[:, None, None]

    return norm_data



# Define a function to perform min-max normalization on the data
def minmax_norm(data):
    # Calculate the minimum and maximum values for each channel and sequence in the batch
    min_vals = torch.min(data, dim=-1)[0]
    max_vals = torch.max(data, dim=-1)[0]

    # Scale the data to the range [0, 1]
    norm_data = (data - min_vals.unsqueeze(-1)) / (
            max_vals.unsqueeze(-1) - min_vals.unsqueeze(-1)
    )

    return norm_data


class EEGDataset(Dataset):
    "Characterizes a dataset for PyTorch"

    def __init__(self, X, Y, transform=None):
        "Initialization"
        self.X = X
        self.Y = Y
        self.transform = transform

    def __len__(self):
        "Denotes the total number of samples"
        return len(self.X)

    def __getitem__(self, index):
        "Generates one sample of data"
        # Load data and get label
        x = self.X[index]
        y = self.Y[index]
        if self.transform:
            x = self.transform(x)
        # print(f"样本 {index} 数据维度: {x.shape}")
        return x, y
        # return torch.tensor(x, device="cuda:0"), torch.tensor(y, device="cuda:0")





# def load_data(root_dir):
#     import scipy.io
#     import glob
#     import numpy as np
#
#     mat_files = glob.glob(f"{root_dir}/*.mat")  # 获取所有 .mat 文件路径
#     data_list = []
#     label_list = []
#
#     for mat_file in mat_files:
#         mat_data = scipy.io.loadmat(mat_file)
#
#         # 找到以 'a020' 开头的字段名（EEG 数据字段）
#         eeg_key = [key for key in mat_data.keys() if key.startswith('a020')][0]
#         eeg_data = mat_data[eeg_key][:-1, :]  # 去掉最后一行（参考电极）
#
#         # 强制转换字节序为本地系统支持的格式
#         if not eeg_data.dtype.isnative:
#             eeg_data = eeg_data.byteswap().newbyteorder()
#
#         # 根据文件名确定标签
#         label = 0 if "0201" in mat_file else 1  # 抑郁患者为 0，健康为 1
#
#         # 添加到列表
#         data_list.append(torch.tensor(eeg_data, dtype=torch.float32))
#         label_list.append(label)
#
#     # 拼接为完整的数据集
#     X = torch.stack(data_list, dim=0)  # Shape: (N_samples, N_channels, N_points)
#     Y = torch.tensor(label_list, dtype=torch.int64)  # Shape: (N_samples,)
#
#     return X, Y

def load_data(root_dir):
    import scipy.io
    import glob
    import numpy as np

    mat_files = glob.glob(f"{root_dir}/*.mat")  # 获取所有 .mat 文件路径
    data_list = []
    label_list = []

    # 目标长度
    target_length = 83872

    for mat_file in mat_files:
        mat_data = scipy.io.loadmat(mat_file)
        eeg_key = [key for key in mat_data.keys() if key.startswith('a020')][0]
        eeg_data = mat_data[eeg_key][:-1, :]  # 去掉最后一行（参考电极）

        # 如果数据的时间维度不等于目标长度，进行填充或截断
        if eeg_data.shape[1] < target_length:
            # 填充到目标长度
            pad_width = target_length - eeg_data.shape[1]
            eeg_data = np.pad(eeg_data, ((0, 0), (0, pad_width)), mode='constant')
        elif eeg_data.shape[1] > target_length:
            # 截断到目标长度
            eeg_data = eeg_data[:, :target_length]

        # 转换字节序为本地系统支持的格式
        if not eeg_data.dtype.isnative:
            eeg_data = eeg_data.byteswap().newbyteorder()

        # 根据文件名确定标签
        label = 0 if "0201" in mat_file else 1  # 抑郁患者为 0，健康为 1
        data_list.append(eeg_data)
        label_list.append(label)

    # 拼接为完整的数据集
    X = torch.stack([torch.tensor(data, dtype=torch.float32) for data in data_list],
                    dim=0)  # Shape: (N_samples, N_channels, target_length)
    Y = torch.tensor(label_list, dtype=torch.int64)  # Shape: (N_samples,)

    return X, Y



# for-DataParallel
def get_dataloader(X, Y, batch_size, batch_size2, seed, shuffle=True):
    X_train, X_test, Y_train, Y_test = train_test_split(
        X, Y, test_size=0.2, shuffle=shuffle, stratify=Y, random_state=seed
    )

    training_set = EEGDataset(X_train, Y_train)
    test_set = EEGDataset(X_test, Y_test)

    training_loader = DataLoader(training_set, batch_size=batch_size, shuffle=shuffle)
    test_loader = DataLoader(test_set, batch_size=batch_size2, shuffle=False)

    return training_loader, test_loader

# for-DistributedDataParallel
# def get_dataloader(X, Y, batch_size, batch_size2, seed, shuffle=True, distributed=False, rank=0, world_size=1):
#     X_train, X_test, Y_train, Y_test = train_test_split(
#         X, Y, test_size=0.2, shuffle=shuffle, stratify=Y, random_state=seed
#     )
#
#     training_set = EEGDataset(X_train, Y_train)
#     test_set = EEGDataset(X_test, Y_test)
#
#     if distributed:
#         # 分布式采样器，确保每个 GPU 处理不同的数据
#         train_sampler = DistributedSampler(training_set, num_replicas=world_size, rank=rank, shuffle=shuffle)
#         test_sampler = DistributedSampler(test_set, num_replicas=world_size, rank=rank, shuffle=False)
#         shuffle = False  # 使用分布式采样器时禁用 shuffle
#     else:
#         train_sampler = None
#         test_sampler = None
#
#     training_loader = DataLoader(training_set, batch_size=batch_size, shuffle=shuffle, sampler=train_sampler)
#     test_loader = DataLoader(test_set, batch_size=batch_size2, shuffle=False, sampler=train_sampler)
#
#     return training_loader, test_loader
