from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
from torchvision import transforms
import os
from os.path import join
import cv2
import random


class AlignedDataset(BaseDataset):
    """This dataset class can load a set of images specified by the path --dataroot /path/to/data.

    It can be used for generating CycleGAN results only for one side with the model option '-model test'.
    """

    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """

        def filter_name(thestr):
            if '.jpg' in thestr or '.JPG' in thestr or '.png' in thestr:
                return True
            else:
                return False
        BaseDataset.__init__(self, opt)
        self.img_path = opt.dataroot
        self.label_path = opt.gtroot
        self.img_paths = os.listdir(self.img_path)
        self.img_paths = list(filter(filter_name, self.img_paths))
        self.label_paths = os.listdir(self.label_path)
        self.label_paths = list(filter(filter_name, self.label_paths))
        self.preprocess = opt.preprocess
        # self.transform = get_transform(opt, grayscale=(input_nc == 1))
        self.transform = transforms.Compose([
            # transforms.ToPILImage(),
            # transforms.RandomCrop(opt.crop_size),
            # transforms.Resize(opt.crop_size),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])



    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index - - a random integer for data indexing

        Returns a dictionary that contains A and A_paths
            A(tensor) - - an image in one domain
            A_paths(str) - - the path of the image
        """
        img = cv2.imread(join(self.img_path, self.label_paths[index]))
        O_s = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        label = cv2.imread(join(self.label_path, self.label_paths[index]))
        B_s = cv2.cvtColor(label, cv2.COLOR_BGR2RGB)
        # width, height = A_img.size[0], A_img.size[1]
        # A_img = A_img.resize((int(width/4), int(height/4)))

        if not self.preprocess == 'none':
            # crop O_s and B_s
            img_size = min(O_s.shape[0], O_s.shape[1])
            crop_size = self.opt.crop_size
            if self.preprocess == 'crop_center':
                crop_x_loc = int(O_s.shape[0]/2 - crop_size/2)
                crop_y_loc = int(O_s.shape[1]/2 - crop_size/2)
            else:
                crop_x_loc = random.randint(0, img_size-crop_size)
                crop_y_loc = random.randint(0, img_size-crop_size)
            O_s = O_s[crop_x_loc:crop_x_loc + crop_size, crop_y_loc:crop_y_loc + crop_size, :]
            B_s = B_s[crop_x_loc:crop_x_loc + crop_size, crop_y_loc:crop_y_loc + crop_size, :]


        Os = self.transform(O_s)
        Bs = self.transform(B_s)
        return {'O_s': Os,
                'O_t': Os,
                'B_s': Bs,
                'path': self.img_paths[index]}

    def __len__(self):
        """Return the total number of images in the dataset."""
        return len(self.label_paths)
