'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: yolov3.py
@time: 2020-06-19 14:40:33
@desc: 
'''
import torch
import numpy as np
from jjzhk.config import ZKCFG
import jjzhk.dataset as eub
from .data_zoo import DATASET_ZOO
import cv2
import random
import os
from .base import DetectionDataset


@DATASET_ZOO.register()
def yolo_coco_asff(cfg, phase):
    return YOLOCOCODataSet(cfg, phase)


@DATASET_ZOO.register()
def yolo_voc_asff(cfg, phase):
    return YOLOVOCDataSet(cfg, phase)


class YOLODataSet(DetectionDataset):
    def __init__(self, cfg:ZKCFG, phase):
        super(YOLODataSet, self).__init__(cfg, phase)
        self.strides = np.array([8, 16, 32])
        self.batch_size = self.cfg.TRAIN.BATCH_SIZE
        self._gt_per_grid = self.cfg.BASE.GT_PER_GRID
        self.numcls = self.cfg.BASE.NUM_CLASSES
        self.labels = self.cfg.BASE.CLASSINFO.keys()

    def __getitem__(self, index):
        if self.phase == 'train':
            trainsize = random.choice(self.cfg.BASE.IMAGE_SIZE)
            return self._load_batch(index, trainsize)
        elif self.phase == 'eval':
            trainsize = self.cfg.BASE.IMAGE_SIZE[0]
            return self._load_batch(index, trainsize)
        elif self.phase == 'test':
            img, target = self.dataset.__getitem__(index)
            info = self.dataset.__getItemInfo__(index)
            height, width, _ = img.shape
            sized = img
            sized = img_preprocess(sized,
                                   (self.cfg.TEST.IMAGE_SIZE[0],
                                    self.cfg.TEST.IMAGE_SIZE[1]),
                                   True)
            return sized, target, info

    def _load_batch(self, idx_batch, random_trainsize):
        outputshapes = random_trainsize // self.strides

        batch_image = np.zeros(
            (self.batch_size, random_trainsize, random_trainsize, 3))
        batch_label_sbbox = np.zeros((self.batch_size, outputshapes[0], outputshapes[0],
                                      self._gt_per_grid, 6 + self.numcls))
        batch_label_mbbox = np.zeros((self.batch_size, outputshapes[1], outputshapes[1],
                                      self._gt_per_grid, 6 + self.numcls))
        batch_label_lbbox = np.zeros((self.batch_size, outputshapes[2], outputshapes[2],
                                      self._gt_per_grid, 6 + self.numcls))
        temp_batch_sbboxes = []
        temp_batch_mbboxes = []
        temp_batch_lbboxes = []
        imgpath_batch = []
        orishape_batch = []
        max_sbbox_per_img = 0
        max_mbbox_per_img = 0
        max_lbbox_per_img = 0
        for idx in range(self.batch_size):
            idxitem = idx_batch * self.batch_size + idx
            image_org, bboxes_org, labels_org, imgpath, ori_shape = self._parse_annotation(
                idxitem, random_trainsize)
            if random.random() < 0.5 and self.phase == 'train':
                index_mix = random.randint(0, len(self.dataset)-1)
                image_mix, bboxes_mix, label_mix, _, _ = self._parse_annotation(
                    index_mix, random_trainsize)

                lam = np.random.beta(1.5, 1.5)
                img = lam * image_org + (1 - lam) * image_mix
                bboxes_org = np.concatenate(
                    [bboxes_org, np.full((len(bboxes_org), 1), lam)], axis=-1)
                bboxes_mix = np.concatenate(
                    [bboxes_mix, np.full((len(bboxes_mix), 1), 1 - lam)], axis=-1)
                bboxes = np.concatenate([bboxes_org, bboxes_mix])
                labels = np.concatenate([labels_org, label_mix])
            else:
                img = image_org
                bboxes = np.concatenate(
                    [bboxes_org, np.full((len(bboxes_org), 1), 1.0)], axis=-1)
                labels = labels_org
            label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = \
                self.preprocess_anchorfree(bboxes, labels, outputshapes)
            batch_image[idx, :, :, :] = img
            batch_label_sbbox[idx, :, :, :, :] = label_sbbox
            batch_label_mbbox[idx, :, :, :, :] = label_mbbox
            batch_label_lbbox[idx, :, :, :, :] = label_lbbox

            zeros = np.zeros((1, 4), dtype=np.float32)
            sbboxes = sbboxes if len(sbboxes) != 0 else zeros
            mbboxes = mbboxes if len(mbboxes) != 0 else zeros
            lbboxes = lbboxes if len(lbboxes) != 0 else zeros
            temp_batch_sbboxes.append(sbboxes)
            temp_batch_mbboxes.append(mbboxes)
            temp_batch_lbboxes.append(lbboxes)
            max_sbbox_per_img = max(max_sbbox_per_img, len(sbboxes))
            max_mbbox_per_img = max(max_mbbox_per_img, len(mbboxes))
            max_lbbox_per_img = max(max_lbbox_per_img, len(lbboxes))
            imgpath_batch.append(imgpath)
            orishape_batch.append(ori_shape)

        batch_sbboxes = np.array(
            [np.concatenate([sbboxes, np.zeros((max_sbbox_per_img + 1 - len(sbboxes), 4), dtype=np.float32)], axis=0)
             for sbboxes in temp_batch_sbboxes])
        batch_mbboxes = np.array(
            [np.concatenate([mbboxes, np.zeros((max_mbbox_per_img + 1 - len(mbboxes), 4), dtype=np.float32)], axis=0)
             for mbboxes in temp_batch_mbboxes])
        batch_lbboxes = np.array(
            [np.concatenate([lbboxes, np.zeros((max_lbbox_per_img + 1 - len(lbboxes), 4), dtype=np.float32)], axis=0)
             for lbboxes in temp_batch_lbboxes])
        return torch.from_numpy(np.array(batch_image).transpose((0, 3, 1, 2)).astype(np.float32)), \
            imgpath_batch, \
            torch.from_numpy(np.array(orishape_batch).astype(np.float32)), \
            torch.from_numpy(np.array(batch_label_sbbox).astype(np.float32)), \
            torch.from_numpy(np.array(batch_label_mbbox).astype(np.float32)), \
            torch.from_numpy(np.array(batch_label_lbbox).astype(np.float32)), \
            torch.from_numpy(np.array(batch_sbboxes).astype(np.float32)), \
            torch.from_numpy(np.array(batch_mbboxes).astype(np.float32)), \
            torch.from_numpy(np.array(batch_lbboxes).astype(np.float32))

    def _parse_annotation(self, itemidx, random_trainsize):
        rootpath = os.path.join(self.cfg.BASE.DATA_ROOT, self.cfg.BASE.DATA_SUB_ROOT)
        info = self.dataset.__getItemInfo__(itemidx)
        filename = info['img_id']
        imgpath = "%s/JPEGImages/%s.jpg" % (rootpath, filename)
        allbox = info['boxes']
        allbox = np.array(allbox)
        bboxes = allbox[:, 1:5]
        labels = allbox[:, 0]

        img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
        if self.phase == 'train':
            img, bboxes = random_horizontal_flip(
                np.copy(img), np.copy(bboxes))
            img, bboxes = random_crop(np.copy(img), np.copy(bboxes))
            img, bboxes = random_translate(
                np.copy(img), np.copy(bboxes))
        ori_shape = img.shape[:2]
        img, bboxes = img_preprocess2(np.copy(img), np.copy(bboxes),
                                              (random_trainsize, random_trainsize), True)
        return img, bboxes, labels, imgpath, ori_shape

    def preprocess_anchorfree(self, bboxes, labels, outputshapes):
        '''
        :param boxes:n,x,y,x2,y2
        :param labels: n,1
        :param img_size:(h,w)
        :param class_num:
        :return:
        '''
        label = [np.zeros((outputshapes[i], outputshapes[i],
                           self._gt_per_grid, 6 + self.numcls)) for i in range(3)]
        # mixup weight位默认为1.0
        for i in range(3):
            label[i][:, :, :, -1] = 1.0
        bboxes_coor = [[] for _ in range(3)]
        bboxes_count = [np.zeros((outputshapes[i], outputshapes[i]))
                        for i in range(3)]
        for bbox, l in zip(bboxes, labels):
            # (1)获取bbox在原图上的顶点坐标、类别索引、mix up权重、中心坐标、高宽、尺度
            bbox_coor = bbox[:4]
            bbox_class_ind = l
            bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5,
                                        bbox_coor[2:] - bbox_coor[:2]], axis=-1)
            bbox_scale = np.sqrt(np.multiply.reduce(bbox_xywh[2:]))
            bbox_mixw = bbox[4]
            # label smooth
            onehot = np.zeros(self.numcls, dtype=np.float)
            onehot[bbox_class_ind] = 1.0
            uniform_distribution = np.full(self.numcls, 1.0 / self.numcls)
            deta = 0.01
            smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution
            if bbox_scale <= 30:
                match_branch = 0
            elif (30 < bbox_scale) and (bbox_scale <= 90):
                match_branch = 1
            else:
                match_branch = 2

            xind, yind = np.floor(
                1.0 * bbox_xywh[:2] / self.strides[match_branch]).astype(np.int32)
            gt_count = int(bboxes_count[match_branch][yind, xind])
            if gt_count < self._gt_per_grid:
                if gt_count == 0:
                    gt_count = slice(None)
                bbox_label = np.concatenate(
                    [bbox_coor, [1.0], smooth_onehot, [bbox_mixw]], axis=-1)
                label[match_branch][yind, xind, gt_count, :] = 0
                label[match_branch][yind, xind, gt_count, :] = bbox_label
                bboxes_count[match_branch][yind, xind] += 1
                bboxes_coor[match_branch].append(bbox_coor)
        label_sbbox, label_mbbox, label_lbbox = label
        sbboxes, mbboxes, lbboxes = bboxes_coor
        return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes

    def collater(self, batch):
        targets = []
        imgs = []
        infos = []
        for sample in batch:
            imgs.append(sample[0])
            if sample[1] is not None:
                targets.append(sample[1])
            infos.append(sample[2])
        return {'img': np.stack(imgs, 0), 'annot': targets, 'info': infos}


class YOLOVOCDataSet(YOLODataSet):
    def __init__(self,cfg:ZKCFG, phase):
        super(YOLOVOCDataSet, self).__init__(cfg, phase)

    def __init_dataset__(self):
        if self.phase == "train":
            return eub.VOCDataSet(self.cfg, "train", False)
        elif self.phase == "eval":
            return eub.VOCDataSet(self.cfg, "test", False)
        elif self.phase == "test":
            return eub.TestImageDataSet(cfg=self.cfg)
        else:
            raise Exception("phase must be train, eval, test")


class YOLOCOCODataSet(YOLODataSet):
    def __init__(self,cfg, phase):
        super(YOLOCOCODataSet, self).__init__(cfg, phase)

    def __init_dataset__(self):
        if self.phase == "train":
            return eub.COCODataSet(self.cfg, "train", False)
        elif self.phase == "eval":
            return eub.COCODataSet(self.cfg, "val", False)
        elif self.phase == "test":
            return eub.TestImageDataSet(cfg=self.cfg)
        else:
            raise Exception("phase must be train, eval, test")


def img_preprocess(image, target_shape, keepratio=True):
    h_target, w_target = target_shape
    h_org, w_org, _ = image.shape

    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
    if not keepratio:
        ratio_w = w_target / w_org
        ratio_h = h_target / h_org
        image = cv2.resize(image, target_shape,
                           interpolation=cv2.INTER_LINEAR) / 255.0
        return image

    resize_ratio = min(1.0 * w_target / w_org, 1.0 * h_target / h_org)
    resize_w = int(resize_ratio * w_org)
    resize_h = int(resize_ratio * h_org)
    image_resized = cv2.resize(image, (resize_w, resize_h))
    image_paded = np.full((h_target, w_target, 3), 128.0)
    dw = int((w_target - resize_w) / 2)
    dh = int((h_target - resize_h) / 2)
    image_paded[dh:resize_h + dh, dw:resize_w + dw, :] = image_resized
    image = image_paded / 255.0

    return image


def random_horizontal_flip(img, bboxes, p=0.5):
    if random.random() < p:
        _, w_img, _ = img.shape
        img = img[:, ::-1, :]
        bboxes[:, [0, 2]] = w_img - bboxes[:, [2, 0]]
    return img, bboxes


def img_preprocess2(image, bboxes, target_shape, correct_box=True):
    """
    RGB转换 -> resize(resize不改变原图的高宽比) -> normalize
    并可以选择是否校正bbox
    :param image_org: 要处理的图像
    :param target_shape: 对图像处理后，期望得到的图像shape，存储格式为(h, w)
    :return: 处理之后的图像，shape为target_shape
    """
    h_target, w_target = target_shape
    h_org, w_org, _ = image.shape

    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
    resize_ratio = min(1.0 * w_target / w_org, 1.0 * h_target / h_org)
    resize_w = int(resize_ratio * w_org)
    resize_h = int(resize_ratio * h_org)
    image_resized = cv2.resize(image, (resize_w, resize_h))
    image_paded = np.full((h_target, w_target, 3), 128.0)
    dw = int((w_target - resize_w) / 2)
    dh = int((h_target - resize_h) / 2)
    image_paded[dh:resize_h + dh, dw:resize_w + dw, :] = image_resized
    image = image_paded / 255.0

    if correct_box:
        bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * resize_ratio + dw
        bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * resize_ratio + dh
        return image, bboxes
    return image


def random_translate(img, bboxes, p=0.5):
    if random.random() < p:
        h_img, w_img, _ = img.shape
        # 得到可以包含所有bbox的最大bbox
        max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
        max_l_trans = max_bbox[0]
        max_u_trans = max_bbox[1]
        max_r_trans = w_img - max_bbox[2]
        max_d_trans = h_img - max_bbox[3]

        tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1))
        ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1))

        M = np.array([[1, 0, tx], [0, 1, ty]])
        img = cv2.warpAffine(img, M, (w_img, h_img))

        bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx
        bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty
    return img, bboxes


def random_crop(img, bboxes, p=0.5):
    if random.random() < p:
        h_img, w_img, _ = img.shape
        # 得到可以包含所有bbox的最大bbox
        max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
        max_l_trans = max_bbox[0]
        max_u_trans = max_bbox[1]
        max_r_trans = w_img - max_bbox[2]
        max_d_trans = h_img - max_bbox[3]

        crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)))
        crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)))
        crop_xmax = max(w_img, int(max_bbox[2] + random.uniform(0, max_r_trans)))
        crop_ymax = max(h_img, int(max_bbox[3] + random.uniform(0, max_d_trans)))

        img = img[crop_ymin : crop_ymax, crop_xmin : crop_xmax]

        bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin
        bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin
    return img, bboxes

