# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Project: torch-Face-Recognize-Pipeline
# @Author : panjq
# @Date   : 2019-9-20 13:18:34
# url     : https://blog.csdn.net/u010397980/article/details/88088025
# --------------------------------------------------------
"""

import numpy as np
import torch
from detection.transforms import augment_bbox
from detection.transforms import augment_bbox_landm
from basetrainer.utils import log


class BoxesTransform(object):
    def __init__(self, size, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], norm=False, padding=False,
                 bg_dir=None, trans_type="train"):
        """
        COCO实验验证：
        (1) 使用MultiScaleRandomCrop和RandomMosaic建议后接RandomCropLarge，可明显提供mAP，
            如：
                MultiScaleRandomCrop
                RandomMosaic
                RandomCropLarge
        (2) Mosaic作用比较明显，但参数p=0.1, samples=[2, 3, 4]需要实验调整,
            概率p太小，达不到增强泛化性的目的；如果太大，又容易改变数据分布；
            在COCO实验中，p取值0.1-0.2比较好，其他数据集可在0.1-0.5之间测试
        (3) COCO实验中,mAP最高的组合(padding=False)：
                MultiScaleRandomCrop(size,
                                  scale=[1.1, 1.2, 1.5],
                                  padding=padding,
                                  transform=None,
                                  ),
                RandomMosaic(size=size1x5, p=0.1, samples=[2, 3, 4], padding=padding),
                RandomCropLarge(size=size)
        (4) 待解Bug: 数据增强中【通过padding实现等比例缩放图像】,可使图像resize时不会引入形变,
            按理是可以提升检测效果的，但目前测试发现，mAP下降？？？
            可能测试时，GT没有映射回原始坐标？
        (5)
        :param size: 输出图像大小
        :param mean: 归一化均值
        :param std: 归一化方差
        :param norm: 输入image是否进行归一化，若mean和std大于1.0，则norm=False；
                     若mean和std小于1.0，图像需要归一化0~1，故norm=True
        :param padding: 是否Padding进行等比例缩放
        :param bg_dir: 随机贴图的背景图片
        :param trans_type: 变换类型
        :return:
        """
        logger = log.get_logger()
        logger.info('size:{},mean:{},std {},norm:{}, padding:{}, bg_dir:{}, trans_type:{}'.
                    format(size, mean, std, norm, padding, bg_dir, trans_type))
        size1x8 = augment_bbox.scale_size(size, 1.8)
        size1x5 = augment_bbox.scale_size(size, 1.5)
        size1x3 = augment_bbox.scale_size(size, 1.3)
        size1x2 = augment_bbox.scale_size(size, 1.2)
        self.padding = padding
        self.size = size
        if trans_type.lower() == "train".lower():
            augment_v3 = [
                # augment_bbox.RandomRot90(),  # 随机横屏和竖屏
                # augment_bbox.RandomRotation(degrees=15),
                augment_bbox.ProtectBoxes(norm=False),
                augment_bbox.RandomHorizontalFlip(),
                augment_bbox.RandomColorJitter(),
                augment_bbox.RandomBoxesPaste(p=0.5, bg_dir=bg_dir) if bg_dir else None,
                augment_bbox.RandomCropLarge(size=size1x5),
                augment_bbox.ResizePadding(size1x5) if padding else None,
                # augment_bbox.RandomCrop(),
                # augment_bbox.ResizeRandomPadding(size, p=1.0),
                augment_bbox.RandomMosaic(size1x5, p=0.5, samples=[4]),
                # augment_bbox.RandomMixup(size1x5, p=1.0),
                augment_bbox.RandomCropLarge(size=size),
                # augment_bbox.RandomBoxesPaste(bg_dir=bg_dir),
                # augment_bbox.RandomVerticalFlip(),
                # augment_bbox.RandomContrastBrightness(),
                # augment_bbox.Resize(size),
                augment_bbox.ResizePadding(size) if padding else augment_bbox.Resize(size),
                # augment_bbox.SwapChannels(),
                # augment_bbox.NormalizeBoxesCoords(),
                augment_bbox.NormalizeBoxesCoords(),
                augment_bbox.ProtectBoxes(norm=True),
                augment_bbox.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox.ToTensor(),
            ]
            augment_v4 = [
                augment_bbox.RandomColorJitter(),
                augment_bbox.RandomHorizontalFlip(),
                augment_bbox.MultiScaleRandomCrop(size,
                                                  scale=[1.2, 1.4, 1.6, 1.8],
                                                  padding=padding,
                                                  transform=[augment_bbox.RandomRotation(degrees=10)],
                                                  ),
                augment_bbox.RandomMosaic(size=size1x3, p=0.3, samples=[2, 3, 4], padding=padding),
                augment_bbox.RandomCropLarge(size=size),
                augment_bbox.ScaleResize(size, scale=1.0, padding=padding),
                augment_bbox.NormalizeBoxesCoords(),
                augment_bbox.ProtectBoxes(norm=True),
                augment_bbox.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox.ToTensor(),
            ]
            # augment = augment_v3
            augment = augment_v4
        elif trans_type.lower() == "TrainYolo".lower():
            augment_v1 = [
                # notes: COCO-TrainYolo samples=[2, 3, 4],padding=False,yolox-nano-mAP 0.2462
                augment_bbox.ProtectBoxes(norm=False),
                augment_bbox.RandomHorizontalFlip(),
                augment_bbox.RandomColorJitter(),
                augment_bbox.RandomCropLarge(size=size1x5),
                augment_bbox.ResizePadding(size1x5) if padding else None,
                augment_bbox.RandomMosaic(size=size1x5, p=0.5, samples=[2, 3, 4], padding=padding),
                augment_bbox.RandomCropLarge(size=size1x2),
                augment_bbox.ScaleResize(size, scale=1.0, padding=padding),
                augment_bbox.ProtectBoxes(norm=False),
                augment_bbox.YOLOCoords(max_boxes=120, norm=False),
                augment_bbox.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox.ToTensor(),
            ]
            augment_v2 = [
                # notes: COCO-TrainYolo samples=[2, 3, 4],padding=False,yolox-nano-mAP 0.2445
                augment_bbox.ProtectBoxes(norm=False),
                augment_bbox.RandomColorJitter(),
                augment_bbox.RandomHorizontalFlip(),
                augment_bbox.MultiScaleRandomCrop(size,
                                                  scale=[1.1, 1.2, 1.5],
                                                  padding=padding,
                                                  # transform=[augment_bbox.RandomRotation(degrees=10)],
                                                  transform=None,
                                                  ),
                augment_bbox.RandomMosaic(size=size1x5, p=0.1, samples=[2, 3, 4], padding=padding),
                augment_bbox.RandomCropLarge(size=size),  #
                augment_bbox.ScaleResize(size, scale=1.0, padding=padding),
                augment_bbox.ProtectBoxes(norm=False),
                augment_bbox.YOLOCoords(max_boxes=120, norm=False),
                augment_bbox.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox.ToTensor(),
            ]
            # augment = augment_v1
            augment = augment_v2
        elif trans_type.lower() == "val" or trans_type.lower() == "test":
            augment = [
                augment_bbox.ScaleResize(size, scale=1.0, padding=padding),
                augment_bbox.NormalizeBoxesCoords(),
                augment_bbox.ProtectBoxes(norm=True),
                augment_bbox.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox.ToTensor(),
            ]
        elif trans_type == "ValYolo".lower() or trans_type.lower() == "TestYolo".lower():
            augment = [
                augment_bbox.ProtectBoxes(norm=False),
                augment_bbox.ScaleResize(size, scale=1.0, padding=padding),
                augment_bbox.YOLOCoords(max_boxes=120, norm=False),
                augment_bbox.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox.ToTensor(),
            ]
        elif trans_type.lower() == "demo":
            # bg_dir = "/home/dm/data3/dataset/finger_keypoint/finger/val/images"
            augment = [
                # augment_bbox.RandomRot90(),  # 随机横屏和竖屏
                # augment_bbox.RandomRotation(degrees=15),
                # augment_bbox.ProtectBoxes(norm=False),
                # augment_bbox.MultiScaleResize(size, scale=[1.2, 1.4, 1.6, 1.8], padding=padding),
                # augment_bbox.RandomCropLarge(size=size),
                # augment_bbox.RandomHorizontalFlip(),
                # augment_bbox.RandomColorJitter(),
                # augment_bbox.RandomBoxesPaste(p=0.5, bg_dir=bg_dir) if bg_dir else None,
                # augment_bbox.ScaleResize(size, scale=1.3, padding=padding),
                augment_bbox.MultiScaleRandomCrop(size, scale=[1.2, 1.4, 1.6, 1.8], padding=padding,
                                                  transform=[augment_bbox.RandomRotation(degrees=10)]),
                augment_bbox.RandomMosaic(size1x3, p=1.0, samples=[2, 3, 4], padding=padding, flip=True),
                # augment_bbox.RandomMixup(size1x3, p=1.0),
                # augment_bbox.RandomCropLarge(size=size),
                # augment_bbox.ScaleResize(size, scale=1.0, padding=padding),
                # augment_bbox.SwapChannels(),
                # augment_bbox.NormalizeBoxesCoords(),
                # augment_bbox.ScaleResize(size, scale=1.0, padding=padding),
                augment_bbox.NormalizeBoxesCoords(),
                augment_bbox.ProtectBoxes(norm=True),
                augment_bbox.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox.ToTensor(),
            ]
        else:
            raise NotImplementedError("no {} transform implemented you have defined.".format(trans_type))
        self.transform = augment_bbox.Compose(augment)

    def __call__(self, image, boxes=None, labels=None):
        return self.transform(image, boxes, labels)


class LandmsTransform(object):
    def __init__(self, size, mean=0.0, std=1.0, flip_index=[], norm=False, padding=False,
                 bg_dir=None, trans_type="train"):
        """
        :param size: 输出size大小(W,H)
        :param mean: Normalize mean
        :param std:  Normalize std
        :param flip_index: 翻转后，对于的关键点也进行翻转，如果输入的关键点没有左右关系的，
                           请设置flip_index，以便翻转时，保证index的关系
                           如，对于人脸关键点：flip_index=[2, 3, 0, 1, 4, 5, 8, 9, 6, 7]
        """
        logger = log.get_logger()
        logger.info('size:{},mean:{},std {},norm:{}, padding:{}, bg_dir:{}, trans_type:{}'.
                    format(size, mean, std, norm, padding, bg_dir, trans_type))
        if not flip_index:
            flip_index = [2, 3, 0, 1, 4, 5, 8, 9, 6, 7]
        if not (isinstance(size, list) or isinstance(size, tuple)):
            size = (size, size)
        self.padding = padding
        self.size = size
        norm = False
        if trans_type.lower() == "train":
            augment = [
                # augment_bbox_landm.ProtectBoxes(),
                # augment_bbox.RandomRot90(),  # 随机横屏和竖屏
                augment_bbox_landm.RandomRotation(degrees=15),
                augment_bbox_landm.ProtectBoxes(norm=False),
                augment_bbox_landm.RandomHorizontalFlip(flip_index=flip_index),
                # augment_bbox_landm.RandomBoxesPaste(bg_dir=bg_dir),
                # augment_bbox_landm.RandomVerticalFlip(),
                augment_bbox_landm.RandomMosaic(size, p=0.3, samples=[2, 3, 4], flip_index=flip_index,
                                                flip=True, padding=True),
                augment_bbox_landm.RandomCrop(),
                # augment_bbox_landm.RandomCropLarge(size=size),
                augment_bbox_landm.RandomContrastBrightness(),
                augment_bbox_landm.ResizePadding(size) if padding else augment_bbox_landm.Resize(size),
                # augment_bbox_landm.ResizeRandomPadding(size, p=1.0),
                # augment_bbox_landm.RandomAffineResizePadding(output_size=size, degrees=15),
                # augment_bbox_landm.Resize(size),
                # augment_bbox_landm.ResizePadding(size),
                # augment_bbox_landm.RandomColorJitter(),
                # augment_bbox_landm.SwapChannels(),
                augment_bbox_landm.NormalizeBoxesCoords(),
                augment_bbox_landm.ProtectBoxes(norm=True),
                augment_bbox_landm.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox_landm.ToTensor(),
            ]
        elif trans_type.lower() == "val" or trans_type.lower() == "test":
            augment = [
                # augment_bbox_landm.Resize(size),
                augment_bbox_landm.ResizePadding(size) if padding else augment_bbox_landm.Resize(size),
                # augment_bbox_landm.ResizePadding(size),
                # augment_bbox_landm.RandomAffineResizePadding(output_size=size, degrees=0),
                augment_bbox_landm.NormalizeBoxesCoords(),
                augment_bbox_landm.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox_landm.ToTensor(),
            ]
        elif trans_type.lower() == "demo":
            augment = [
                # augment_bbox_landm.Resize(size),
                augment_bbox_landm.ResizePadding(size) if padding else augment_bbox_landm.Resize(size),
                # augment_bbox_landm.ResizePadding(size),
                # augment_bbox_landm.RandomAffineResizePadding(output_size=size, degrees=0),
                augment_bbox_landm.RandomMosaic(size, p=1.0, samples=[2, 3], flip_index=flip_index,
                                                flip=True, padding=True),
                augment_bbox_landm.NormalizeBoxesCoords(),
                augment_bbox_landm.Normalize(mean=mean, std=std, norm=norm),
                augment_bbox_landm.ToTensor(),
            ]
        else:
            raise NotImplementedError("no {} transform implemented you have defined.".format(trans_type))
        self.transform = augment_bbox_landm.Compose(augment)

    def __call__(self, image, boxes=None, labels=None, **kwargs):
        return self.transform(image, boxes, labels, **kwargs)


class YoloTransform:
    """
    ref: https://blog.csdn.net/u010397980/article/details/88088025
    """

    def __init__(self, size, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], norm=False, padding=False):
        """
        Args:
            size: the size the of final image.
            mean: mean pixel value per channel.
        """
        if not (isinstance(size, list) or isinstance(size, tuple)):
            size = (size, size)
        self.size = size
        self.padding = padding
        augment = [
            augment_bbox.ProtectBoxes(norm=False),
            augment_bbox.YOLOCoords(max_boxes=120, norm=False),
            augment_bbox.Normalize(mean=mean, std=std, norm=norm),
            augment_bbox.ToTensor(),
        ]
        augment = [a for a in augment if a]
        self.transform = augment_bbox.Compose(augment)

    def __call__(self, img, boxes, labels):
        """

        Args:
            img: the output of cv.imread in RGB layout.
            boxes: boundding boxes in the form of (x1, y1, x2, y2).
            labels: labels of boxes.
        """
        return self.transform(img, boxes, labels)


class TorchTransform:
    """
    ref: https://blog.csdn.net/u010397980/article/details/88088025
    """

    def __init__(self, size, mean=0.0, std=1.0, norm=False, padding=False):
        """
        Args:
            size: the size the of final image.
            mean: mean pixel value per channel.
        """
        if not (isinstance(size, list) or isinstance(size, tuple)):
            size = (size, size)
        self.size = size
        self.padding = padding
        augment = [
            augment_bbox.NormalizeBoxesCoords(),
            augment_bbox.Normalize(mean=mean, std=std, norm=norm),
            augment_bbox.ToTensor(),
        ]
        self.transform = augment_bbox.Compose(augment)

    def __call__(self, img, boxes, labels):
        """

        Args:
            img: the output of cv.imread in RGB layout.
            boxes: boundding boxes in the form of (x1, y1, x2, y2).
            labels: labels of boxes.
        """
        return self.transform(img, boxes, labels)


def demo_for_landmark():
    from detection.utils import image_utils
    image_path = "test.jpg"
    src_boxes = [[98, 42, 160, 100], [244, 260, 297, 332]]
    land_mark = [[[122.44442, 54.193676],
                  [147.6293, 56.77364],
                  [135.35794, 74.66961],
                  [120.94379, 83.858765],
                  [143.35617, 86.417175]],
                 [[258.14902, 287.81662],
                  [281.83157, 281.46664],
                  [268.39877, 306.3493],
                  [265.5242, 318.80936],
                  [286.5602, 313.99652]]]
    classes = [1, 1]
    input_size = [400, 400]
    src_image = image_utils.read_image(image_path)
    src_boxes = np.asarray(src_boxes, dtype=np.float32)
    src_classes = np.asarray(classes, dtype=np.int32)
    src_land_mark = np.asarray(land_mark).reshape(-1, 10)
    augment = LandmsTransform(input_size, mean=0.0, std=1.0, trans_type="Train")
    # augment = TestLandmsTransform(input_size)
    for i in range(1000):
        # dst_image, boxes, classes, kwargs = augment(src_image, src_boxes.copy(), classes)
        dst_image, boxes, classes, kwargs = augment(src_image.copy(),
                                                    src_boxes.copy(),
                                                    src_classes.copy(),
                                                    land_mark=src_land_mark.copy())
        land_mark = kwargs["land_mark"]
        augment_bbox_landm.show_landmark_image(dst_image, boxes, land_mark, classes, normal=True, transpose=True)


def demo_for_bboxes():
    from detection.utils import image_utils
    image_path = "test.jpg"
    src_boxes = [[98, 42, 160, 100], [244, 260, 297, 332]]
    classes = [1, 1]
    input_size = [400, 400]
    src_image = image_utils.read_image(image_path)
    src_boxes = np.asarray(src_boxes, dtype=np.float32)
    src_classes = np.asarray(classes, dtype=np.int32)
    augment = BoxesTransform(input_size, mean=0.0, std=1.0, norm=True, trans_type="Train")
    for i in range(1000):
        # dst_image, boxes, classes, kwargs = augment(src_image, src_boxes.copy(), classes)
        dst_image, boxes, classes = augment(src_image.copy(),
                                            src_boxes.copy(),
                                            src_classes.copy())
        augment_bbox.draw_result(dst_image, boxes, classes, normal=True, transpose=True)


if __name__ == "__main__":
    from detection.utils import image_utils

    demo_for_bboxes()
    # demo_for_landmark()
