import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np


class PumpDataset(Dataset):
    def __init__(self, X, y):
        """
        PyTorch 数据集类，用于泵的条件监测维护数据

        参数:
            X : array (样本数, 通道数, 序列长度)
                传感器的测量数据
            y : array (样本数, 标签数)
                系统状态的标签
        """
        self.X = X.astype(np.float32)  # 确保传感器数据是 float32 类型
        self.y = y.astype(np.int64)  # 确保标签数据是 int64 类型（适用于分类问题）

    def __len__(self):
        """返回数据集的大小，即样本的数量"""
        return len(self.X)

    def __getitem__(self, idx):
        """根据索引获取数据样本"""
        sequence = self.X[idx]  # 获取当前样本的传感器数据
        label = self.y[idx]  # 获取当前样本的标签

        # 将数据转换为 PyTorch Tensor
        # 注意这里对 sequence 的维度进行了调整：从 (channels, sequence_length) -> (sequence_length, channels)
        sequence = torch.tensor(sequence, dtype=torch.float32).transpose(0,
                                                                         1)  # 转置维度 (channels, sequence_length) -> (sequence_length, channels)

        sample = {
            'sequence': sequence,  # 传感器数据的维度为 (sequence_length, channels)
            'label': torch.tensor(label, dtype=torch.long)  # 标签数据
        }

        return sample  # 返回样本字典

# 创建 DataLoader 函数
def make_loaders(X_train, y_train, X_test, y_test, batch_size):
    """
    创建训练集和测试集的 DataLoader

    输入:
        X_train, y_train : 训练集数据和标签
        X_test, y_test : 测试集数据和标签
        batch_size : 批次大小

    输出:
        train_loader : 训练集的 DataLoader
        test_loader : 测试集的 DataLoader
    """
    # 创建训练集和测试集的数据集对象
    train_dataset = PumpDataset(X_train, y_train)
    test_dataset = PumpDataset(X_test, y_test)

    # 使用 DataLoader 创建训练集和测试集的加载器
    # drop_last=True 用于丢弃最后一个不完整的批次
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)

    return train_loader, test_loader


# # 假设数据已经加载
# X_train = np.load('X_train.npy')
# y_train = np.load('y_train.npy')
# X_test = np.load('X_test.npy')
# y_test = np.load('y_test.npy')
#
# # 创建 DataLoader
# train_loader, test_loader = make_loaders(X_train, y_train, X_test, y_test, batch_size=32)
#
# # 测试 DataLoader 是否正常工作
# for batch_idx, sample in enumerate(train_loader):
#     print(f"Batch {batch_idx}:")
#     print(f"Sequence shape: {sample['sequence'].shape}")  # (batch_size, channels, sequence_length)
#     print(f"Label shape: {sample['label'].shape}")  # (batch_size, num_labels)
#     if batch_idx >= 1:  # 只打印前两个批次
#         break