'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: dataset.py
@time: 2020-07-23 11:51:52
@desc: 
'''
import torch
import cv2
import numpy as np
import torchvision as tv

from jjzhk.config import ZKCFG

from .base import DetectionDataset, COCODataSet, VOCDataSet, TestImageDataSet
from .data_zoo import DATASET_ZOO


@DATASET_ZOO.register()
def efficient_voc(cfg, phase):
    pass


@DATASET_ZOO.register()
def efficient_coco(cfg, phase):
    return COCOEfficientDataSet(cfg, phase)


class EfficientDataSet(DetectionDataset):
    def __init__(self, cfg:ZKCFG, phase):
        super(EfficientDataSet, self).__init__(cfg, phase)

        if self.phase == 'train' or self.phase == 'eval':
            self.transform = tv.transforms.Compose(
                [Normalizer(mean=self.cfg.BASE.MEAN, std=self.cfg.BASE.STD),
                 Augmenter(),
                 Resizer(self.cfg.BASE.IMAGE_SIZE)
                 ])
        self.mean = self.cfg.BASE.MEAN
        self.std = self.cfg.BASE.STD
        self.MAX_SIZE = self.cfg.BASE.IMAGE_SIZE

    def __getitem__(self, index):
        # index = self.dataset.__getIndexByImageId__("005144")
        img, target = self.dataset.__getitem__(index)
        info = self.dataset.__getItemInfo__(index)
        height, width, _ = img.shape

        if self.phase == 'test':
            sample = {'img': img, 'annot': None, 'info': info}
            return sample
        else:
            img, anno = self.__train_eval__(img, info["anno"])
            sample = {'img': img, 'annot': anno, 'info': info}
            if self.transform:
                sample = self.transform(sample)
            return sample

    def __train_eval__(self, img, anno):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = img.astype(np.float32) / 255.

        annotations = np.zeros((0, 5))
        for idx, a in enumerate(anno):
            # some annotations have basically no width / height, skip them
            if a['bbox'][2] < 1 or a['bbox'][3] < 1:
                continue

            annotation = np.zeros((1, 5))
            annotation[0, :4] = a['bbox']
            annotation[0, 4] = a['category_id'] - 1
            annotations = np.append(annotations, annotation, axis=0)
        # transform from [x, y, w, h] to [x1, y1, x2, y2]
        annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
        annotations[:, 3] = annotations[:, 1] + annotations[:, 3]

        return img, annotations

    def collater(self, data):
        imgs = [s['img'] for s in data]
        annots = [s['annot'] for s in data]
        info = [s['info'] for s in data]

        imgs = torch.from_numpy(np.stack(imgs, axis=0))
        if data[0]['annot'] is not None:
            scales = [s['scale'] for s in data]
            max_num_annots = max(annot.shape[0] for annot in annots)
            if max_num_annots > 0:
                annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1
                for idx, annot in enumerate(annots):
                    if annot.shape[0] > 0:
                        annot_padded[idx, :annot.shape[0], :] = annot
            else:
                annot_padded = torch.ones((len(annots), 1, 5)) * -1

            imgs = imgs.permute(0, 3, 1, 2)
        else:
            imgs = imgs.permute(0, 3, 1, 2)
            annot_padded = []
            scales = []

        return {'img': imgs, 'annot': annot_padded, 'scale': scales, 'info': info}


class COCOEfficientDataSet(EfficientDataSet):
    def __init__(self, cfg, phase):
        super(COCOEfficientDataSet, self).__init__(cfg, phase)

    def __init_dataset__(self):
        if self.phase == "train":
            return COCODataSet(self.cfg, "train")
        elif self.phase == "eval":
            return COCODataSet(self.cfg, "val")
        elif self.phase == "test":
            return TestImageDataSet(cfg=self.cfg)
        else:
            raise Exception("phase must be train, eval, test")


class Resizer(object):
    """Convert ndarrays in sample to Tensors."""

    def __init__(self, img_size=512):
        self.img_size = img_size

    def __call__(self, sample):
        image, annots, info = sample['img'], sample['annot'], sample['info']
        height, width, _ = image.shape
        if height > width:
            scale = self.img_size / height
            resized_height = self.img_size
            resized_width = int(width * scale)
        else:
            scale = self.img_size / width
            resized_height = int(height * scale)
            resized_width = self.img_size

        image = cv2.resize(image, (resized_width, resized_height), interpolation=cv2.INTER_LINEAR)

        new_image = np.zeros((self.img_size, self.img_size, 3))
        new_image[0:resized_height, 0:resized_width] = image

        annots[:, :4] *= scale

        return {'img': torch.from_numpy(new_image).to(torch.float32),
                'annot': torch.from_numpy(annots),
                'scale': scale,
                'info': info}


class Augmenter(object):
    """Convert ndarrays in sample to Tensors."""

    def __call__(self, sample, flip_x=0.5):
        if np.random.rand() < flip_x:
            image, annots, info = sample['img'], sample['annot'], sample['info']
            image = image[:, ::-1, :]

            rows, cols, channels = image.shape

            x1 = annots[:, 0].copy()
            x2 = annots[:, 2].copy()

            x_tmp = x1.copy()

            annots[:, 0] = cols - x2
            annots[:, 2] = cols - x_tmp

            sample = {'img': image, 'annot': annots, 'info': info}

        return sample


class Normalizer(object):

    def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
        self.mean = np.array([[mean]])
        self.std = np.array([[std]])

    def __call__(self, sample):
        image, annots, info = sample['img'], sample['annot'], sample['info']

        return {'img': ((image.astype(np.float32) - self.mean) / self.std),
                'annot': annots,
                'info' : info
                }
