import cv2
import numpy as np
import random
import torch

from utils.box import matrix_iof

# <Function: _crop/>
def _crop(image, boxes, labels, landm, img_size):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    modified by yudamo.cn@gmail.com in 2020-03-07
    """
    if isinstance(img_size, int):
        img_size = (img_size, img_size)
    # end-if
    height, width, _ = image.shape
    pad_image_flag = True
    for _ in range(250):
        """
        if random.uniform(0, 1) <= 0.2:
            scale = 1.0
        else:
            scale = random.uniform(0.3, 1.0)
        """
        PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0]
        scale = random.choice(PRE_SCALES)
        short_side = min(width, height)
        w = int(scale * short_side)
        h = w
        if width == w:
            l = 0
        else:
            l = random.randrange(width - w)
        # end-if
        if height == h:
            t = 0
        else:
            t = random.randrange(height - h)
        # end-if
        roi = np.array((l, t, l + w, t + h))
        value = matrix_iof(boxes, roi[np.newaxis])
        flag = (value >= 1)
        if not flag.any():
            continue
        # end-if
        centers = (boxes[:, :2] + boxes[:, 2:]) / 2
        mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
        boxes_t = boxes[mask_a].copy()
        labels_t = labels[mask_a].copy()
        landms_t = landm[mask_a].copy()
        landms_t = landms_t.reshape([-1, 5, 2])
        if boxes_t.shape[0] == 0:
            continue
        # end-if
        image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
        # rect
        boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
        boxes_t[:, :2] -= roi[:2]
        boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
        boxes_t[:, 2:] -= roi[:2]
        # landm
        landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2]
        landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0]))
        landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2])
        landms_t = landms_t.reshape([-1, 10])
	    # make sure that the cropped image contains at least one face > 16 pixel at training image scale
        b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_size[0]
        b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_size[1]
        mask_b = np.minimum(b_w_t, b_h_t) > 0.0
        boxes_t = boxes_t[mask_b]
        labels_t = labels_t[mask_b]
        landms_t = landms_t[mask_b]
        if boxes_t.shape[0] == 0:
            continue
        # end-if
        pad_image_flag = False
        return image_t, boxes_t, labels_t, landms_t, pad_image_flag
    # end-for
    return image, boxes, labels, landm, pad_image_flag
# <Function: /_crop>

# <Function: _distort/>
def _distort(image):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    """
    # <Function: _convert/>
    def _convert(image, alpha=1, beta=0):
        tmp = image.astype(float) * alpha + beta
        tmp[tmp < 0] = 0
        tmp[tmp > 255] = 255
        image[:] = tmp
    # <Function: /_convert>
    image = image.copy()
    if random.randrange(2):
        #brightness distortion
        if random.randrange(2):
            _convert(image, beta=random.uniform(-32, 32))
        # end-if
        #contrast distortion
        if random.randrange(2):
            _convert(image, alpha=random.uniform(0.5, 1.5))
        # end-if
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        #saturation distortion
        if random.randrange(2):
            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
        # end-if
        #hue distortion
        if random.randrange(2):
            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
            tmp %= 180
            image[:, :, 0] = tmp
        # end-if
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
    else:
        #brightness distortion
        if random.randrange(2):
            _convert(image, beta=random.uniform(-32, 32))
        # end-if
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        #saturation distortion
        if random.randrange(2):
            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
        # end-if
        #hue distortion
        if random.randrange(2):
            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
            tmp %= 180
            image[:, :, 0] = tmp
        # end-if
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        #contrast distortion
        if random.randrange(2):
            _convert(image, alpha=random.uniform(0.5, 1.5))
        # end-if
    # end-if
    return image
# <Function: /_distort>

# <Function: _expand/>
def _expand(image, boxes, fill, p):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    """
    if random.randrange(2):
        return image, boxes
    # end-if
    height, width, depth = image.shape
    scale = random.uniform(1, p)
    w = int(scale * width)
    h = int(scale * height)
    left = random.randint(0, w - width)
    top = random.randint(0, h - height)
    boxes_t = boxes.copy()
    boxes_t[:, :2] += (left, top)
    boxes_t[:, 2:] += (left, top)
    expand_image = np.empty(
        (h, w, depth),
        dtype=image.dtype
        )
    expand_image[:, :] = fill
    expand_image[top:top + height, left:left + width] = image
    image = expand_image
    return image, boxes_t
# <Function: /_expand>

# <Function: _mirror/>
def _mirror(image, boxes, landms):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    """
    _, width, _ = image.shape
    if random.randrange(2):
        image = image[:, ::-1]
        # rect
        boxes = boxes.copy()
        boxes[:, 0::2] = width - boxes[:, 2::-2]
        # landm
        landms = landms.copy()
        landms = landms.reshape([-1, 5, 2])
        landms[:, :, 0] = width - landms[:, :, 0]
        tmp = landms[:, 1, :].copy()
        landms[:, 1, :] = landms[:, 0, :]
        landms[:, 0, :] = tmp
        tmp1 = landms[:, 4, :].copy()
        landms[:, 4, :] = landms[:, 3, :]
        landms[:, 3, :] = tmp1
        landms = landms.reshape([-1, 10])
    # end-if
    return image, boxes, landms
# <Function: /_mirror>

# <Function: _pad_to_square/>
def _pad_to_square(image, rgb_mean, pad_image_flag):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    """
    if not pad_image_flag:
        return image
    # end-if
    height, width, _ = image.shape
    long_side = max(width, height)
    image_t = np.empty((long_side, long_side, 3), dtype=image.dtype)
    image_t[:, :] = rgb_mean
    image_t[0:0 + height, 0:0 + width] = image
    return image_t
# <Function: /_pad_to_square>

# <Function: _pad_to_square_with_normalize/>
def _pad_to_square_with_normalize(image, nrom_mean, pad_image_flag):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    Modified by yudamo.cn@gmail.com in 2020-03-08
    """
    if not pad_image_flag:
        return image
    # end-if
    rgb_mean = (nrom_mean * 255.0).astype(image.dtype)
    height, width, _ = image.shape
    long_side = max(width, height)
    image_t = np.empty((long_side, long_side, 3), dtype=image.dtype)
    image_t[:, :] = rgb_mean
    image_t[0:0 + height, 0:0 + width] = image
    return image_t
# <Function: /_pad_to_square_with_normalize>

# <Function: _resize_subtract_mean/>
def _resize_subtract_mean(image, insize, rgb_mean):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    """    
    dsize = (insize[1], insize[0]) if isinstance(insize, (list, tuple)) else (insize, insize)
    interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]    
    interp_method = interp_methods[random.randrange(5)]
    image = cv2.resize(image, dsize, interpolation=interp_method)
    image = image.astype(np.float32)
    image -= rgb_mean
    return image.transpose(2, 0, 1)
# <Function: /_resize_subtract_mean>

# <Function: _resize_subtract_normalize/>
def _resize_subtract_normalize(image, insize, mean, std_inv):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/data_augment.py \n
    """    
    dsize = (insize[1], insize[0]) if isinstance(insize, (list, tuple)) else (insize, insize)
    interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]    
    interp_method = interp_methods[random.randrange(5)]
    image = cv2.resize(image, dsize, interpolation=interp_method)
    image = image.astype(np.float32) / 255.0
    image -= mean
    image *= std_inv
    return image.transpose(2, 0, 1)
# <Function: /_resize_subtract_normalize>

# <Function: detection_collate/>
def detection_collate(batch):
    """
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/wider_face.py \n
    Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).

    - Arguments: \n
        batch: (tuple) A tuple of tensor images and lists of annotations

    - Return: \n
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on 0 dim
    """
    targets = []
    imgs = []
    for _, sample in enumerate(batch):
        for _, tup in enumerate(sample):
            if torch.is_tensor(tup):
                imgs.append(tup)
            elif isinstance(tup, type(np.empty(0))):
                annos = torch.from_numpy(tup).float()
                targets.append(annos)
            # end-if
        # end-for
    # end-for
    return (torch.stack(imgs, 0), targets)
# <Function: /detection_collate>

# <Class: TrainPreprocessor/>
class TrainPreprocessor(object):
    """    
    Some information about TrainPreprocessor: \n
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/wider_face.py
    - img_size(int|list|tuple): image size, if img_size is a list or tuple, it means (height, width), else img_size means the same height and width.
    - rgb_means(ndarray/list/tuple): means will be sub in pre-processing.
    """
    # <Method: __init__/>
    def __init__(self, img_size, norm_means=(0, 0, 0), norm_std=(1, 1, 1)):
        self._img_size = img_size
        self._norm_means = np.array(norm_means)
        self._norm_std_inv = 1.0 / np.array(norm_std)
    # <Method: /__init__>

    # <Method: __call__/>
    def __call__(self, image, targets):
        # assert targets.shape[0] > 0, "this image does not have gt"
        if targets.shape[0] == 0:
            # print("this image does not have gt")
            targets = np.zeros((1, 20)) - 1.0
            targets[:, 0:4] = np.zeros((1, 4))
        else:
            pass
        # end-if        
        # get 3 parts ...
        boxes  = targets[:, 0:4].copy()        
        landm  = targets[:, 4:14].copy()
        labels = targets[:, 14:].copy() # enhance labels ...
        # process ...
        image_t, boxes_t, labels_t, landm_t, pad_image_flag = _crop(image, boxes, labels, landm, self._img_size)
        image_t = _distort(image_t)
        image_t = _pad_to_square_with_normalize(image_t, self._norm_means, pad_image_flag)
        image_t, boxes_t, landm_t = _mirror(image_t, boxes_t, landm_t)
        height, width, _ = image_t.shape
        # image_t = _resize_subtract_mean(image_t, self._img_size, self._norm_means)
        image_t = _resize_subtract_normalize(image_t, self._img_size, self._norm_means, self._norm_std_inv)
        boxes_t[:, 0::2] /= width
        boxes_t[:, 1::2] /= height
        landm_t[:, 0::2] /= width
        landm_t[:, 1::2] /= height
        # labels_t = np.expand_dims(labels_t, 1)
        # stack ...
        targets_t = np.hstack((boxes_t, landm_t, labels_t))
        # return ...
        return image_t, targets_t
    # <Method: /__call__>
# <Class: /TrainPreprocessor>

# <Class: EvalPreprocessor/>
class EvalPreprocessor(object):
    """    
    Some information about EvalPreprocessor: \n
    Copy from https://github.com/biubug6/Pytorch_Retinaface/blob/master/data/wider_face.py
    - img_size(int|list|tuple): image size, if img_size is a list or tuple, it means (height, width), else img_size means the same height and width.
    - rgb_means(ndarray/list/tuple): means will be sub in pre-processing.
    """
    # <Method: __init__/>
    def __init__(self, img_size, norm_means=(0, 0, 0), norm_std=(1, 1, 1)):
        self._img_size = img_size
        self._norm_means = np.array(norm_means)
        self._norm_std_inv = 1.0 / np.array(norm_std)
    # <Method: /__init__>

    # <Method: __call__/>
    def __call__(self, image, targets):
        # assert targets.shape[0] > 0, "this image does not have gt"
        if targets.shape[0] == 0:
            # print("this image does not have gt")
            targets = np.zeros((1, 20)) - 1.0
            targets[:, 0:4] = np.zeros((1, 4))
        else:
            pass
        # end-if        
        # get 3 parts ...
        image_t = image.copy()
        boxes_t  = targets[:, 0:4].copy()        
        landm_t  = targets[:, 4:14].copy()
        labels_t = targets[:, 14:].copy() # enhance labels ...        
        height, width, _ = image_t.shape
        # image_t = _resize_subtract_mean(image_t, self._img_size, self._norm_means)
        image_t = _resize_subtract_normalize(image_t, self._img_size, self._norm_means, self._norm_std_inv)
        boxes_t[:, 0::2] /= width
        boxes_t[:, 1::2] /= height
        landm_t[:, 0::2] /= width
        landm_t[:, 1::2] /= height
        # labels_t = np.expand_dims(labels_t, 1)
        # stack ...
        targets_t = np.hstack((boxes_t, landm_t, labels_t))
        # return ...
        return image_t, targets_t
    # <Method: /__call__>
# <Class: /EvalPreprocessor>