from torch.utils.data import Dataset
import torchvision.transforms as transforms
from PIL import Image
import os

class MyDataset(Dataset):
    def __init__(self, data_path, label_path):
        self.test_transforms = transforms.Compose(
            [
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
            ]
        )

        self.data = []
        imglist = os.listdir(data_path)
        self.path2label = self.get_labels(label_path=label_path)
        for img in imglist:
            self.data.append((os.path.join(data_path, img), self.path2label[img]))
        print('len(data) is ', len(self.data))

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        (img_path, label) = self.data[index]

        img = Image.open(img_path).convert("RGB")
        img = self.test_transforms(img)
        return img, label

    def get_labels(self, label_path):
        path = label_path
        path2label = {}
        with open(path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                arrs = line.split()
                key, value = arrs[0], int(arrs[1])
                path2label[key] = value
        return path2label