'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: yolov3.py
@time: 2020-06-19 14:40:33
@desc: 
'''
import torch
import numpy as np
import cv2
import torchvision as tv
import random
from jjzhk.config import ZKCFG
import os
from .data_zoo import DATASET_ZOO
from .base import DetectionDataset, COCODataSet, VOCDataSet, TestImageDataSet


@DATASET_ZOO.register()
def yolo_coco_v3(cfg, phase):
    return YOLOCOCODataSet(cfg, phase)


@DATASET_ZOO.register()
def yolo_voc_v3(cfg, phase):
    return YOLOVOCDataSet(cfg, phase)


class YOLODataSet(DetectionDataset):
    def __init__(self, cfg:ZKCFG, phase):
        super(YOLODataSet, self).__init__(cfg, phase)
        self.img_size = self.cfg.BASE.IMAGE_SIZE[0] # 416
        self.min_size = self.img_size - 3 * 32 # 320
        self.max_size = self.img_size + 3 * 32 # 512
        self.batch_count = 0

    def __getitem__(self, index):
        img, target = self.dataset.__getitem__(index)
        info = self.dataset.__getItemInfo__(index)
        height, width, _ = img.shape
        sized = img

        if self.phase == 'train' or self.phase == 'eval':
            label_path = info['path'].replace("JPEGImages", "labels").replace(".png", ".txt").replace(".jpg", ".txt")
            if os.path.exists(label_path):
                boxes = torch.from_numpy(np.loadtxt(label_path).reshape(-1, 5))
                sized, target = self._train_eval_image_(img, boxes)
        elif self.phase == 'test':
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
            sized = tv.transforms.ToTensor()(sized)
            sized, _ = pad_to_square(sized, 0)
            sized = resize(sized, self.config.BASE.IMAGE_SIZE[0])

        return sized, target, info

    def _train_eval_image_(self, image, boxes):
        sized = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        sized = tv.transforms.ToTensor()(sized)

        if len(sized.shape) != 3:
            sized = sized.unsqueeze(0)
            sized = sized.expand((3, sized.shape[1:]))

        _, h, w = sized.shape
        h_factor, w_factor = (h, w)
        img, pad = pad_to_square(sized, 0)
        _, padded_h, padded_w = img.shape

        # Extract coordinates for unpadded + unscaled image
        x1 = w_factor * (boxes[:, 1] - boxes[:, 3] / 2)
        y1 = h_factor * (boxes[:, 2] - boxes[:, 4] / 2)
        x2 = w_factor * (boxes[:, 1] + boxes[:, 3] / 2)
        y2 = h_factor * (boxes[:, 2] + boxes[:, 4] / 2)
        # Adjust for added padding
        x1 += pad[0]
        y1 += pad[2]
        x2 += pad[1]
        y2 += pad[3]
        # Returns (x, y, w, h)
        boxes[:, 1] = ((x1 + x2) / 2) / padded_w
        boxes[:, 2] = ((y1 + y2) / 2) / padded_h
        boxes[:, 3] *= w_factor / padded_w
        boxes[:, 4] *= h_factor / padded_h

        targets = torch.zeros((len(boxes), 6))
        targets[:, 1:] = boxes
        # Apply augmentations
        if np.random.random() < 0.5:
            sized = torch.flip(sized, [-1])
            targets[:, 2] = 1 - targets[:, 2]

        return sized, targets

    def collater(self, batch):
        if self.phase == 'test':
            targets = []
            imgs = []
            infos = []
            for sample in batch:
                imgs.append(sample[0])
                if sample[1] is not None:
                    targets.append(sample[1])
                infos.append(sample[2])
            return {'img': np.stack(imgs, 0), 'annot': targets, 'info': infos}
        else:
            imgs, targets, infos = list(zip(*batch))
            targets = [boxes for boxes in targets if boxes is not None]
            for i, boxes in enumerate(targets):
                boxes[:, 0] = i
            targets = torch.cat(targets, 0)
            if self.batch_count % 10 == 0: # 每10个batch，使用一个image size
                #   320,   352,   384,   416,   448,   480,   512 images-size使用的就是这7个大小，随机选择
                # 10*32, 11*32, 12*32, 13*32, 14*32, 15*32, 16*32）
                self.img_size = random.choice(range(self.min_size, self.max_size + 1, 32))
            imgs = torch.stack([resize(img, self.img_size) for img in imgs])
            self.batch_count += 1
            return {'img': imgs, 'annot': targets, 'info': infos}


class YOLOVOCDataSet(YOLODataSet):
    def __init__(self,cfg:ZKCFG, phase):
        super(YOLOVOCDataSet, self).__init__(cfg, phase)

    def __init_dataset__(self):
        if self.phase == "train":
            return VOCDataSet(self.cfg, "train")
        elif self.phase == "eval":
            return VOCDataSet(self.cfg, "test")
        elif self.phase == "test":
            return TestImageDataSet(cfg=self.cfg)
        else:
            raise Exception("phase must be train, eval, test")


class YOLOCOCODataSet(YOLODataSet):
    def __init__(self,cfg, phase):
        super(YOLOCOCODataSet, self).__init__(cfg, phase)

    def __init_dataset__(self):
        if self.phase == "train":
            return COCODataSet(self.cfg, "train")
        elif self.phase == "eval":
            return COCODataSet(self.cfg, "val")
        elif self.phase == "test":
            return TestImageDataSet(cfg=self.cfg)
        else:
            raise Exception("phase must be train, eval, test")


def pad_to_square(img, pad_value):
    c, h, w = img.shape
    dim_diff = np.abs(h - w)
    # (upper / left) padding and (lower / right) padding
    pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
    # Determine padding
    pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)
    # Add padding
    img = torch.nn.functional.pad(img, pad, "constant", value=pad_value)

    return img, pad


def resize(image, size):
    image = torch.nn.functional.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
    return image