import torch
import torch.utils.data
import PIL.Image as Image
import os
import numpy
import torchvision.transforms.functional
import torchvision
from tqdm import tqdm
import pandas


class VOC2012(torch.utils.data.Dataset):
    def __init__(self, root, filename, image_size, transform, target_transform):
        super().__init__()
        self.voc_color = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
                          [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
                          [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
                          [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
                          [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
                          [0, 64, 128]]
        self.num_classes = 21
        self.voc_colormap = torch.zeros(256 ** 3, dtype=torch.long)
        for i, color in enumerate(self.voc_color):
            self.voc_colormap[(color[0] * 256 + color[1]) * 256 + color[2]] = i

        self.voc_classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
                            'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
                            'diningtable', 'dog', 'horse', 'motorbike', 'person',
                            'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']

        self.images_root = os.path.join(root, 'JPEGImages')
        self.sc_root = os.path.join(root, 'SegmentationClass')  # semantic segmentation directory
        with open(os.path.join(root, filename), 'r') as f:
            self.images_filename = f.read().split()
        self.transform = transform
        self.target_transform = target_transform
        self.image_size = image_size
        self._labels()

    def __len__(self):
        return len(self.images_filename)

    def __getitem__(self, item):
        pil_image_x = Image.open(os.path.join(self.images_root, self.images_filename[item] + '.jpg'))
        x_image = torch.tensor(numpy.transpose(numpy.array(pil_image_x), (2, 0, 1))).float()
        y_label = self.labels[item]
        x_image = self.pad(x_image, x_image.shape[1], x_image.shape[2], require_size=self.image_size)
        y_label = self.pad(y_label, y_label.shape[0], y_label.shape[1], require_size=self.image_size)

        rect = torchvision.transforms.RandomCrop.get_params(x_image, self.image_size)
        x_image = torchvision.transforms.functional.crop(x_image, *rect)
        y_label = torchvision.transforms.functional.crop(y_label, *rect)

        if self.transform:
            x_image = self.transform(x_image)
        if self.target_transform:
            y_label = self.target_transform(y_label)
        return x_image / 255., y_label

    def _labels(self):
        labels = []
        for f in self.images_filename:
            pil_image_y = Image.open(os.path.join(self.sc_root, f + '.png')).convert('RGB')
            image = numpy.array(pil_image_y, dtype=numpy.int32)
            idx = (image[:, :, 0] * 256 + image[:, :, 1]) * 256 + image[:, :, 2]
            labels.append(self.voc_colormap[idx])
        self.labels = labels

    def pad(self, image, h, w, require_size):
        r_h, r_w = require_size[0:2]
        padding_h = max(r_h - h, 0)
        padding_w = max(r_w - w, 0)
        return torchvision.transforms.functional.pad(image, padding=[padding_w, padding_h])


def get_voc_2012(root, type='train',transform=None,target_transform=None):
    if type == 'train':
        return VOC2012(root=root, filename='ImageSets/Segmentation/train.txt',
                       image_size=(256, 256), transform=transform, target_transform=target_transform)
    elif type == 'val':
        return VOC2012(root=root, filename='ImageSets/Segmentation/trainval.txt',
                       image_size=(256, 256), transform=transform, target_transform=target_transform)
