import torch
import torch.utils.data
import torchvision
import torchvision.io
import os
import torchvision.transforms.functional
import PIL.Image as Image
import numpy


class Ade20kSeg(torch.utils.data.Dataset):

    def __init__(self, root, images_path, seg_path, image_size, transform=None, target_transform=None):
        super().__init__()
        self.root = root
        self.num_classes = 151
        self.images_path = images_path
        self.images_root = os.path.join(root, 'images', images_path)
        self.seg_root = os.path.join(root, 'annotations', seg_path)
        self.images = []
        self.seg_files = []
        self.image_size = image_size
        self.transform = transform
        self.target_transform = target_transform
        for image in os.listdir(self.images_root):
            self.images.append(os.path.join(self.images_root, image))
            image_idx = image.split('.')[0]
            self.seg_files.append(os.path.join(self.seg_root, image_idx + '.png'))

    def __len__(self):
        return len(self.images[0:2000])

    def __getitem__(self, item):
        seg_filename = self.seg_files[item]
        image_file = self.images[item]
        image = Image.open(image_file)
        image = torch.tensor(numpy.transpose(numpy.array(image), (2, 0, 1)))
        seg = self._label(seg_filename)
        # image = torchvision.io.read_image(image_file)

        padded_image = self._pad(image, image.shape[1], image.shape[2], require_size=self.image_size)
        padded_seg = self._pad(seg, seg.shape[0], seg.shape[1], require_size=self.image_size)

        rect = torchvision.transforms.RandomCrop.get_params(padded_image, self.image_size)

        cropped_image = torchvision.transforms.functional.crop(padded_image, *rect).float()
        cropped_seg = torchvision.transforms.functional.crop(padded_seg, *rect).long()
        if cropped_image.size(0) == 1:
            cropped_image = torch.cat([cropped_image, cropped_image, cropped_image], dim=0)

        if self.transform:
            cropped_image = self.transform(cropped_image)
        if self.target_transform:
            cropped_seg = self.target_transform(cropped_image)
        return cropped_image, cropped_seg

    def _pad(self, image, h, w, require_size):
        r_h, r_w = require_size[0:2]
        padding_h = max(r_h - h, 0)
        padding_w = max(r_w - w, 0)
        return torchvision.transforms.functional.pad(image, padding=[padding_w, padding_h])

    def _label(self, seg_filename):
        seg = Image.open(seg_filename)
        seg = torch.tensor(numpy.array(seg))
        # seg = torchvision.io.read_image(seg_filename).squeeze().long()
        return seg