"""
dataset: define how data records are loaded
dataloader: handle dataset shuffling, assembling the data record into batches, and distributing the batches to the GPU
                                    Dataset                 Dataloader
                      -------->training dataset  ---> train dataloader
custom dataset class
                      -------->test dataset      ---> test dataloader
"""
import torch
from torch.utils.data import Dataset, DataLoader

# 5 training example
x_train = torch.tensor([
    [-1.2, 3.1],
    [-0.9, 2.9],
    [-0.5, 2.6],
    [2.3, -1.1],
    [2.7, -1.5]
])

y_train = torch.tensor([0, 0, 0, 1, 1])

x_test = torch.tensor([
    [-0.8, 2.8],
    [2.6, -1.6]
])

y_test = torch.tensor([0, 1])


class ToyDataset(Dataset):
    def __init__(self, x, y):
        """
        define data source
        initialize: set up attributes for the dataset, it could be file paths, file objects, database connections, etc.
        """
        self.features = x
        self.labels = y

    def __getitem__(self, index):
        """
        how to retrieve a sample from the dataset
        returning one item from dataset
        """
        one_x = self.features[index]
        one_y = self.labels[index]
        return one_x, one_y

    def __len__(self):
        """
        define the length of the dataset
        """
        return self.labels.shape[0]


train_dataset = ToyDataset(x_train, y_train)
test_dataset = ToyDataset(x_test, y_test)


torch.manual_seed(123)

# batch_size: 每个batch包含的样本数
# shuffle: 是否打乱数据集
# num_workers: 多线程加载数据，0表示不使用多线程
# drop_last: 如果最后一个batch的样本数小于batch_size，则丢弃
train_loader = DataLoader(
    dataset=train_dataset,
    batch_size=2,
    shuffle=True,
    num_workers=0,
    drop_last=True
)

test_loader = DataLoader(
    dataset=test_dataset,
    batch_size=2,
    shuffle=False,
    num_workers=0
)


def a6_test():
    for idx, (x, y) in enumerate(train_loader):
        print(f"Batch {idx + 1}", x, y)
