import torchvision.transforms as T
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from tqdm import tqdm
import os

class Caltech(Dataset):

    def __init__(self, root, transforms=None):
        self.transforms = transforms
        self.img_path = []
        self.label = []
        with open(root, "r") as f:
            for line in f:
                path, label = line.strip().split()
                self.img_path.append(os.path.join('Caltech256', path.replace("\\", "/")))
                self.label.append(int(label))

    def __len__(self):
        return len(self.label)
    
    def __getitem__(self, idx):
        img_path = self.img_path[idx]
        label = self.label[idx]
        img = Image.open(img_path).convert('RGB')
        if self.transforms:
            im = self.transforms(img)
        return im, label


if __name__ =='__main__':
    transforms = T.Compose([T.Resize((224, 224)), 
                            T.ToTensor(), 
                            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
    dataset = Caltech(root='Caltech256/labels/train.txt', transforms = transforms)
    print(len(dataset))