import glob
import random
import os

from torch.utils.data import Dataset
from PIL import Image


class CycleGAN_dataset(Dataset):

    def __init__(self, img_paths, labels, transform) -> None:
        super().__init__()
        self.imgs, self.labels, self.transform = img_paths, labels, transform

    def __getitem__(self, index):
        img, label = self.imgs[index], self.labels[index]
        r_img = self.transform(Image.open(img).convert('RGB'))

        return r_img, label
    
    def __len__(self):
        return len(self.imgs)
    

class ImageDataset(Dataset):

    def __init__(self, root, transform=None, unaligned=False, mode="train") -> None:
        super().__init__()
        self.transform = transform
        self.unaligned = unaligned

        self.files_A = sorted(glob.glob(os.path.join(root, "%sA" % mode) + "/*.*"))
        self.files_B = sorted(glob.glob(os.path.join(root, "%sB" % mode) + "/*.*"))

    def __getitem__(self, index):
        img_a = Image.open(self.files_A[index % len(self.files_A)])

        if self.unaligned: 
            img_b = Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)])
        else:
            img_b = Image.open(self.files_B[index % len(self.files_B)])

        if img_a.mode != "RGB":
            img_a = self.gray_to_rgb(img_a)
        if img_b.mode != "RGB":
            img_b = self.gray_to_rgb(img_b)

        item_a = self.transform(img_a)
        item_b = self.transform(img_b)
        return {
            'a': item_a, 'b': item_b,
        }
    
    def __len__(self):
        return max(len(self.files_A), len(self.files_B))

    @staticmethod
    def gray_to_rgb(img):
        rgb_img = Image.new("RGB", img.size)
        rgb_img.paste(img)
        return rgb_img
