from torch.utils.data import Dataset
from PIL import Image
import matplotlib.pyplot as plt
from torchvision.transforms import transforms as T
import torch
class UnconditionalDDPMDataset(Dataset):
    def __init__(self, config, transforms = None, mode = 'train', max_nums=None):
        super(UnconditionalDDPMDataset, self).__init__()
        if mode == 'train':
            self.images = config.train_images_file_list
        elif mode == 'valid':
            self.images = config.valid_images_file_list
        else:
            self.images = config.test_images_file_list
        if transforms == None:
            self.transforms = T.Compose(
                [
                    T.Resize((config.image_size, config.image_size)),
                    #transforms.RandomHorizontalFlip(),
                    T.ToTensor(),
                    T.Normalize([0.5], [0.5]),
                ]
            )
        else:
            self.transforms = transforms
        if(max_nums!=None):
            self.images = self.images[:max_nums]
    def __len__(self):
        return len(self.images)
    def __getitem__(self, idx):
        img = Image.open(self.images[idx]).convert("RGB")
        return self.transforms(img)
    def show_image(self, idx):
        img = Image.open(self.images[idx]).convert("RGB")
        plt.imshow(img)
        plt.axis("off")
        plt.show()
class ConditionalDDPMDataset(Dataset):
    def __init__(self, config, transforms = None, mode = 'train', max_nums=None):
        super(ConditionalDDPMDataset, self).__init__()
        if mode == 'train':
            self.images, self.labels = config.train_images_file_list, config.train_labels_list
        elif mode == 'valid':
            self.images, self.labels = config.valid_images_file_list, config.valid_labels_list
        else:
            self.images, self.labels = config.test_images_file_list, config.test_labels_list
        if transforms == None:
            self.transforms = T.Compose(
                [
                    T.Resize((config.image_size, config.image_size)),
                    #transforms.RandomHorizontalFlip(),
                    T.ToTensor(),
                    T.Normalize([0.5], [0.5]),
                ]
            )
        else:
            self.transforms = transforms
        if(max_nums!=None):
            self.images = self.images[:max_nums]
    def __len__(self):
        return len(self.images)
    def __getitem__(self, idx):
        img = Image.open(self.images[idx]).convert("RGB")
        return self.transforms(img), torch.tensor(self.labels[idx])
    def show_image(self, idx):
        img = Image.open(self.images[idx]).convert("RGB")
        plt.imshow(img)
        plt.axis("off")
        plt.show()