import torch
from torch.utils.data import Dataset
from torch.utils.data import random_split
from datamaker import OriginData


class MyDataSet(Dataset):
    """自定义DataLoader"""

    def __init__(self):
        super(MyDataSet, self).__init__()
        self.data = OriginData()
        self.data.load_data()

    def __len__(self):
        # print(len(od))
        return len(self.data.features)

    def __getitem__(self, idx):
        # print(self.data[idx])
        # print("torch.squeeze(self.data[idx][1],dim=0)",torch.squeeze(self.data[idx][1],dim=0))
        return self.data[idx][0], torch.squeeze(self.data[idx][1], dim=0)


# #
# for i in loader:
#     print(i)
# for inputs, targets in loader:
#     print(inputs, targets)
m_data = MyDataSet()
train_size = int(0.7 * len(m_data))
val_size = int(0.2 * len(m_data))
test_size = len(m_data) - train_size - val_size
# 使用random_split进行拆分
train_dataset, val_dataset, test_dataset = random_split(m_data, [train_size, val_size, test_size])
# print(test_dataset)

# val_input = torch.stack([i[0] for i in val_dataset])
# test_input = torch.stack([i[0] for i in test_dataset])
# print(val_input.shape,test_input.shape)