'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: dataset.py
@time: 2020-05-29 15:44:16
@desc: 
'''
import torch
import numpy as np
import random
from PIL import Image, ImageOps, ImageFilter
from .base import VOCDataSet, TestImageDataSet, COCODataSet, ADE20KDataSet, CityScapesDataSet
from jjzhk.config import ZKCFG
from pycocotools import mask as coco_mask


class SegBaseDataSet(torch.utils.data.Dataset):
    def __init__(self, cfg:ZKCFG, phase, transform=None):
        self.cfg = cfg
        self.phase = phase
        self.config = cfg
        self.transform = transform
        self.base_size = self.cfg.BASE.BASE_SIZE
        self.crop_size = self._to_tuple_(self.cfg.BASE.CROP_SIZE)

        self.color_jitter = None # self._get_color_jitter()
        self.dataset = self.__init_dataset__()

    def __getitem__(self, index):
        img_path, mask_path = self.dataset.__getitem__(index) # img, mask are all path
        info = self.dataset.__getItemInfo__(index)
        img = self.__load_image__(img_path)
        if self.phase in ['train', 'eval']:
            mask = self.__load_mask__(mask_path)
            img, mask = self._sync_transform_(img, mask) if self.phase == 'train' \
                            else (self._img_transform_(img), self._mask_transform_(mask))

            if self.transform is not None:
                img = self.transform(img)

            return img, mask, info
        else: # test
            # img = self._img_transform_(img)
            if self.transform is not None:
                img = self.transform(img)
            return img, info

    def __len__(self):
        return self.dataset.__len__()

    def __load_image__(self, img_path):
        img = Image.open(img_path).convert('RGB')
        return img

    def __load_mask__(self, mask_path):
        mask = Image.open(mask_path)
        return mask

    def _to_tuple_(self, size):
        if isinstance(size, (list, tuple)):
            return tuple(size)
        elif isinstance(size, (int, float)):
            return tuple((size, size))
        else:
            raise ValueError('Unsupport datatype: {}'.format(type(size)))

    def __init_dataset__(self):
        pass

    def _sync_transform_(self, img, mask):
        # random mirror
        if True and random.random() < 0.5:
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
        w, h = img.size
        if h > w:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        else:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < min(crop_size):
            padh = crop_size[0] - oh if oh < crop_size[0] else 0
            padw = crop_size[1] - ow if ow < crop_size[1] else 0
            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=-1)
        # random crop crop_size
        w, h = img.size
        x1 = random.randint(0, w - crop_size[1])
        y1 = random.randint(0, h - crop_size[0])
        img = img.crop((x1, y1, x1 + crop_size[1], y1 + crop_size[0]))
        mask = mask.crop((x1, y1, x1 + crop_size[1], y1 + crop_size[0]))
        # gaussian blur as in PSP
        if self.cfg.BASE.AUG_BLUR_PROB > 0 and random.random() < 0.0:
            radius = self.cfg.BASE.AUG_BLUR_RADIUS if self.cfg.AUG_BLUR_RADIUS > 0 else random.random()
            img = img.filter(ImageFilter.GaussianBlur(radius=radius))
        # color jitter
        if self.color_jitter:
            img = self.color_jitter(img)
        # final transform
        img, mask = self._img_transform_(img), self._mask_transform_(mask)
        return img, mask

    def _img_transform_(self, img):
        return np.array(img)

    def _mask_transform_(self, mask):
        return np.array(mask).astype('long')


class VOCSegSet(SegBaseDataSet):
    def __init__(self,cfg:ZKCFG, phase, transform=None):
        super(VOCSegSet, self).__init__(cfg, phase, transform)

    def __init_dataset__(self):
        if self.phase == "train":
            return VOCDataSet(self.cfg, "train")
        elif self.phase == "eval":
            return VOCDataSet(self.cfg, "test")
        elif self.phase == "test":
            return TestImageDataSet(self.cfg)
        else:
            raise Exception("phase must be train, eval, test")


class COCOSegSet(SegBaseDataSet):
    def __init__(self,cfg:ZKCFG, phase, transform=None):
        super(COCOSegSet, self).__init__(cfg, phase, transform)
        self.CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
                         1, 64, 20, 63, 7, 72]

    def __init_dataset__(self):
        if self.phase == "train":
            return COCODataSet(self.cfg, "train")
        elif self.phase == "eval":
            return COCODataSet(self.cfg, "val")
        elif self.phase == "test":
            return TestImageDataSet(self.cfg)
        else:
            raise Exception("phase must be train, eval, test")

    def __load_mask__(self, maskInfo):
        cocotarget, img_metadata = maskInfo
        mask = Image.fromarray(self._gen_seg_mask_(
            cocotarget, img_metadata['height'], img_metadata['width']))

        return mask

    def _gen_seg_mask_(self, target, h, w):
        mask = np.zeros((h, w), dtype=np.uint8)
        for instance in target:
            rle = coco_mask.frPyObjects(instance['segmentation'], h, w)
            m = coco_mask.decode(rle)
            cat = instance['category_id']
            if cat in self.CAT_LIST:
                c = self.CAT_LIST.index(cat)
            else:
                continue
            if len(m.shape) < 3:
                mask[:, :] += (mask == 0) * (m * c)
            else:
                mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
        return mask


class ADE20KSegSet(SegBaseDataSet):
    def __init__(self,cfg:ZKCFG, phase, transform=None):
        super(ADE20KSegSet, self).__init__(cfg, phase, transform)

    def __init_dataset__(self):
        if self.phase == "train":
            return ADE20KDataSet(self.cfg, "train")
        elif self.phase == "eval":
            return ADE20KDataSet(self.cfg, "test")
        elif self.phase == "test":
            return TestImageDataSet(self.cfg)
        else:
            raise Exception("phase must be train, eval, test")


class CityScapesSegSet(SegBaseDataSet):
    def __init__(self,cfg:ZKCFG, phase, transform=None):
        super(CityScapesSegSet, self).__init__(cfg, phase, transform)

    def __init_dataset__(self):
        if self.phase == "train":
            return CityScapesDataSet(self.cfg, "train")
        elif self.phase == "eval":
            return CityScapesDataSet(self.cfg, "val")
        elif self.phase == "test":
            return TestImageDataSet(self.cfg)
        else:
            raise Exception("phase must be train, eval, test")

    def _mask_transform_(self, mask):
        target = self._class_to_index(np.array(mask).astype('int32'))
        return torch.LongTensor(np.array(target).astype('int32'))

    def _class_to_index(self, mask):
        _key = np.array([-1, -1, -1, -1, -1, -1,
                              -1, -1, 0, 1, -1, -1,
                              2, 3, 4, -1, -1, -1,
                              5, -1, 6, 7, 8, 9,
                              10, 11, 12, 13, 14, 15,
                              -1, -1, 16, 17, 18])
        _mapping = np.array(range(-1, len(_key) - 1)).astype('int32')
        values = np.unique(mask)
        for value in values:
            assert (value in _mapping)
        index = np.digitize(mask.ravel(), _mapping, right=True)
        return _key[index].reshape(mask.shape)

