import random

import cv2
import math
import numpy as np

from utils.box_utils import matrix_iof


def _crop(image, boxes, labels, landm, img_dim):
    height, width, _ = image.shape
    pad_image_flag = True

    for _ in range(250):
        """
        if random.uniform(0, 1) <= 0.2:
            scale = 1.0
        else:
            scale = random.uniform(0.3, 1.0)
        """
        PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0]
        scale = random.choice(PRE_SCALES)
        short_side = min(width, height)
        w = int(scale * short_side)
        h = w

        if width == w:
            l = 0
        else:
            l = random.randrange(width - w)
        if height == h:
            t = 0
        else:
            t = random.randrange(height - h)
        roi = np.array((l, t, l + w, t + h))

        value = matrix_iof(boxes, roi[np.newaxis])
        flag = (value >= 1)
        if not flag.any():
            continue

        centers = (boxes[:, :2] + boxes[:, 2:]) / 2
        mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
        boxes_t = boxes[mask_a].copy()
        labels_t = labels[mask_a].copy()
        landms_t = landm[mask_a].copy()
        landms_t = landms_t.reshape([-1, 5, 2])

        if boxes_t.shape[0] == 0:
            continue

        image_t = image[roi[1]:roi[3], roi[0]:roi[2]]

        boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
        boxes_t[:, :2] -= roi[:2]
        boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
        boxes_t[:, 2:] -= roi[:2]

        # landm
        landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2]
        landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0]))
        landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2])
        landms_t = landms_t.reshape([-1, 10])

        # make sure that the cropped image contains at least one face > 16 pixel at training image scale
        b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim
        b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim
        mask_b = np.minimum(b_w_t, b_h_t) > 0.0
        boxes_t = boxes_t[mask_b]
        labels_t = labels_t[mask_b]
        landms_t = landms_t[mask_b]

        if boxes_t.shape[0] == 0:
            continue

        pad_image_flag = False

        return image_t, boxes_t, labels_t, landms_t, pad_image_flag
    return image, boxes, labels, landm, pad_image_flag


def _distort(image):
    def _convert(image, alpha=1, beta=0):
        tmp = image.astype(float) * alpha + beta
        tmp[tmp < 0] = 0
        tmp[tmp > 255] = 255
        image[:] = tmp

    image = image.copy()

    if random.randrange(2):

        # brightness distortion
        if random.randrange(2):
            _convert(image, beta=random.uniform(-32, 32))

        # contrast distortion
        if random.randrange(2):
            _convert(image, alpha=random.uniform(0.5, 1.5))

        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        # saturation distortion
        if random.randrange(2):
            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))

        # hue distortion
        if random.randrange(2):
            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
            tmp %= 180
            image[:, :, 0] = tmp

        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    else:

        # brightness distortion
        if random.randrange(2):
            _convert(image, beta=random.uniform(-32, 32))

        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        # saturation distortion
        if random.randrange(2):
            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))

        # hue distortion
        if random.randrange(2):
            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
            tmp %= 180
            image[:, :, 0] = tmp

        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

        # contrast distortion
        if random.randrange(2):
            _convert(image, alpha=random.uniform(0.5, 1.5))

    return image


def _expand(image, boxes, fill, p):
    if random.randrange(2):
        return image, boxes

    height, width, depth = image.shape

    scale = random.uniform(1, p)
    w = int(scale * width)
    h = int(scale * height)

    left = random.randint(0, w - width)
    top = random.randint(0, h - height)

    boxes_t = boxes.copy()
    boxes_t[:, :2] += (left, top)
    boxes_t[:, 2:] += (left, top)
    expand_image = np.empty(
        (h, w, depth),
        dtype=image.dtype)
    expand_image[:, :] = fill
    expand_image[top:top + height, left:left + width] = image
    image = expand_image

    return image, boxes_t


def _mirror(image, boxes, landms):
    _, width, _ = image.shape
    if random.randrange(2):
        image = image[:, ::-1]
        boxes = boxes.copy()
        boxes[:, 0::2] = width - boxes[:, 2::-2]

        # landm
        landms = landms.copy()
        landms = landms.reshape([-1, 5, 2])
        landms[:, :, 0] = width - landms[:, :, 0]
        tmp = landms[:, 1, :].copy()
        landms[:, 1, :] = landms[:, 0, :]
        landms[:, 0, :] = tmp
        tmp1 = landms[:, 4, :].copy()
        landms[:, 4, :] = landms[:, 3, :]
        landms[:, 3, :] = tmp1
        landms = landms.reshape([-1, 10])

    return image, boxes, landms


def _pad_to_square(image, rgb_mean, pad_image_flag):
    if not pad_image_flag:
        return image
    height, width, _ = image.shape
    long_side = max(width, height)
    image_t = np.empty((long_side, long_side, 3), dtype=image.dtype)
    image_t[:, :] = rgb_mean
    image_t[0:0 + height, 0:0 + width] = image
    return image_t


def _resize_subtract_mean(image, insize, rgb_mean):
    interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
    interp_method = interp_methods[random.randrange(5)]
    image = cv2.resize(image, (insize, insize), interpolation=interp_method)
    image = image.astype(np.float32)
    image -= rgb_mean
    return image.transpose(2, 0, 1)


def load_image(image_path, img_size):
    """loads 1 image from dataset, returns img, original hw, resized hw"""
    if isinstance(image_path, np.ndarray):
        img = image_path
    else:
        img = cv2.imread(filename=image_path)
    assert img is not None, 'Image Not Found ' + image_path
    origin_h, origin_w = img.shape[:2]
    # resize image to img_size
    r = img_size / max(origin_h, origin_w)

    if r != 1:
        # always resize down, only resize up if training with augmentation
        img = cv2.resize(img, (int(origin_w * r), int(origin_h * r)), interpolation=cv2.INTER_AREA)

    return img, (origin_h, origin_w), img.shape[:2]


def random_affine_five_keypoints(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
    # https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
    # targets = [cls, xyxy]

    height = img.shape[0] + border[0] * 2  # shape(h,w,c)
    width = img.shape[1] + border[1] * 2

    # Rotation and Scale
    R = np.eye(3)
    a = random.uniform(-degrees, degrees)
    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations
    s = random.uniform(1 - scale, 1 + scale)
    # s = 2 ** random.uniform(-scale, scale)
    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)

    # Translation
    T = np.eye(3)
    T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1]  # x translation (pixels)
    T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0]  # y translation (pixels)

    # Shear
    S = np.eye(3)
    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)
    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)

    # Combined rotation matrix
    M = S @ T @ R  # ORDER IS IMPORTANT HERE!!
    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed
        img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(0, 0, 0))

    cv2.imwrite("./output/img_after_random_affine.jpg", img)

    # Transform label coordinates

    # Augment -> (13, 5), (13, 10), (13, 5)
    keypoints_lst = [4, 5, 7, 8, 10, 11, 13, 14, 16, 17]
    keypoints_visible_lst = [6, 9, 12, 15, 18]
    bboxs, keypoints, keypoints_visible, score = targets[:, :4], targets[:, keypoints_lst], \
                                                 targets[:, keypoints_visible_lst], targets[:, -1]

    n = len(bboxs)
    if n:
        # warp points
        xy = np.ones((n * 4, 3))
        xy[:, :2] = bboxs[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1
        xy = (xy @ M.T)[:, :2].reshape(n, 8)

        # create new boxes
        x = xy[:, [0, 2, 4, 6]]
        y = xy[:, [1, 3, 5, 7]]
        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

        # reject warped points outside of image
        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
        w = xy[:, 2] - xy[:, 0]
        h = xy[:, 3] - xy[:, 1]
        area = w * h
        area0 = (bboxs[:, 2] - bboxs[:, 0]) * (bboxs[:, 3] - bboxs[:, 1])
        ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))  # aspect ratio
        select_index = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)

        bboxs = bboxs[select_index]
        bboxs[:, 0:4] = xy[select_index]

        # five keypoints
        xy_keypoints = np.ones((n * 5, 3), dtype=np.float32)
        xy_keypoints[:, :2] = keypoints.reshape(n * 5, 2)
        xy_keypoints = (xy_keypoints @ M.T)[:, :2].reshape(n, 10)
        keypoints_ = np.hstack((xy_keypoints, keypoints_visible))
        # (x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,v1,v2,v3,v4,v5) --> (x1,y1,v1,x2,y2,v2,x3,y3,v3,x4,y4,v4,x5,y5,v5)
        change_index_order_lst = [0, 1, 10, 2, 3, 11, 4, 5, 12, 6, 7, 13, 8, 9, 14]
        keypoints_ = keypoints_[:, change_index_order_lst].reshape((keypoints_.shape[0]), 5, 3)
        change_index = np.where((keypoints_ < 0) | (keypoints_ > 640))
        keypoints_[change_index[0], change_index[1], 2] = -1
        keypoints_ = keypoints_.reshape((keypoints_.shape[0]), 15)
        # bbox
        keypoints = keypoints_.astype(np.float32)[select_index]

        # score
        score = score[select_index]

    labels = np.hstack((bboxs, keypoints, score.reshape((score.shape[0], 1))))

    return img, labels


def data_expansion_mosaic_five_keypoints(image_path_lst, image_labels, img_size):
    mosaic_border = [-img_size // 2, -img_size // 2]
    # mosaic center x, y
    y_center, x_center = [int(random.uniform(-x, 2 * img_size + x)) for x in mosaic_border]
    # base image with 4 tiles
    img_4 = np.full((img_size * 2, img_size * 2, 3), 0, dtype=np.uint8)
    labels_4 = []

    for i, image_path in enumerate(image_path_lst):
        img, (oh, ow), (h, w) = load_image(image_path, img_size=img_size)

        # place img in img4
        if i == 0:
            # top left
            # xmin, ymin, xmax, ymax (large image)
            x1a, y1a, x2a, y2a = max(x_center - w, 0), max(y_center - h, 0), x_center, y_center
            # xmin, ymin, xmax, ymax (small image)
            x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h
        elif i == 1:
            # top right
            x1a, y1a, x2a, y2a = x_center, max(y_center - h, 0), min(x_center + w, img_size * 2), y_center
            x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
        elif i == 2:
            # bottom left
            x1a, y1a, x2a, y2a = max(x_center - w, 0), y_center, x_center, min(img_size * 2, y_center + h)
            x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(x_center, w), min(y2a - y1a, h)
        else:
            # bottom right
            x1a, y1a, x2a, y2a = x_center, y_center, min(x_center + w, img_size * 2), min(img_size * 2, y_center + h)
            x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)

        # img4[ymin:ymax, xmin:xmax]
        img_4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]

        pad_w = x1a - x1b
        pad_h = y1a - y1b

        x = np.array(image_labels[i], dtype=np.float32)
        labels_i = x.copy()
        x[:, 0:4:2] /= ow
        x[:, 1:4:2] /= oh
        x[:, 0] += x[:, 2] / 2
        x[:, 1] += x[:, 3] / 2
        num = x.shape[0]
        a = x[:, 4:-1].reshape(num, -1, 3) / [ow, oh, 1]
        x[:, 4:-1] = a.reshape(num, -1)
        if x.size > 0:  # Normalized xywh to pixel xyxy format
            labels_i[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + pad_w
            labels_i[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + pad_h
            labels_i[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + pad_w
            labels_i[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + pad_h

            # keypoints
            for j in range(4, 19, 3):
                labels_i[:, j] = w * x[:, j] + pad_w
                labels_i[:, j + 1] = h * x[:, j + 1] + pad_h

        if len(labels_i) > 0:
            labels_4.append(labels_i)

    # Concat/clip labels
    if len(labels_4):
        labels_4 = np.concatenate(labels_4, 0)
        np.clip(labels_4[:, 1:], 0, 2 * img_size, out=labels_4[:, 1:])  # use with random_affine

    # cv2.imwrite("./output/img_before_random_affine.jpg", img_4)

    img_4, labels_4 = random_affine_five_keypoints(img_4, labels_4, 10, 0.1, 0.1, 10, mosaic_border)

    return img_4, labels_4


def mosaic(imgs_path, words, index, img_dim):
    # 3 additional image indices
    index_lst = [index] + [random.randint(0, len(imgs_path) - 1) for _ in range(3)]
    image_path_4 = [imgs_path[i] for i in index_lst]
    labels = [np.asarray(words[i], dtype=np.float32) for i in index_lst]

    img, labels = data_expansion_mosaic_five_keypoints(image_path_4, labels, img_dim)

    # label_bbox, label_keypoints = labels[:, :4], labels[:, 4:-1]
    # print(label_bbox.shape)
    # print(label_keypoints.shape)
    #
    # for bbox in label_bbox:
    #     #     tl = round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
    #     #     color = [random.randint(0, 255) for _ in range(3)]
    #     #     # x, y, w, h -> x1, y1, x2, y2
    #     #     pt1, pt2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3]))
    #     #     cv2.rectangle(img, pt1, pt2, color, thickness=tl, lineType=cv2.LINE_AA)
    #     #
    #     # for keypoints_lst in label_keypoints:
    #     #     keypoints_num = int(len(keypoints_lst) / 3)
    #     #     for i in range(keypoints_num):
    #     #         if keypoints_lst[i * 3 + 2] == 1:
    #     #             cv2.circle(img, (int(keypoints_lst[i * 3]), int(keypoints_lst[i * 3 + 1])), 2, (255, 0, 0))
    #     #         elif keypoints_lst[i * 3 + 2] == 0:
    #     #             cv2.circle(img, (int(keypoints_lst[i * 3]), int(keypoints_lst[i * 3 + 1])), 2, (0, 255, 0))
    #     #
    #     # cv2.imwrite("./output/test.jpg", img)
    img = _resize_subtract_mean(image=img, insize=img_dim, rgb_mean=(104, 117, 123))
    return img, labels


class preproc(object):

    def __init__(self, img_dim, rgb_means):
        self.img_dim = img_dim
        self.rgb_means = rgb_means
        # print("self.img_dim = {}".format(self.img_dim))

    def __call__(self, image, targets):
        assert targets.shape[0] > 0, "this image does not have gt"

        boxes = targets[:, :4].copy()
        labels = targets[:, -1].copy()
        landm = targets[:, 4:-1].copy()

        image_t, boxes_t, labels_t, landm_t, pad_image_flag = _crop(image, boxes, labels, landm, self.img_dim)
        image_t = _distort(image_t)
        image_t = _pad_to_square(image_t, self.rgb_means, pad_image_flag)
        image_t, boxes_t, landm_t = _mirror(image_t, boxes_t, landm_t)
        height, width, _ = image_t.shape

        # 计算w h
        boxes_t_wh = np.c_[boxes_t[:, 2] - boxes_t[:, 0], boxes_t[:, 3] - boxes_t[:, 1]]
        # 去除掉w, h小于15的框
        boxes_t = boxes_t[np.all(boxes_t_wh[:, :] >= 15.0, axis=1)]
        landm_t = landm_t[np.all(boxes_t_wh[:, :] >= 15.0, axis=1)]
        labels_t = np.expand_dims(labels_t, 1)
        labels_t = labels_t[np.all(boxes_t_wh[:, :] >= 15.0, axis=1)]

        # for bbox in boxes_t:
        #     tl = int(round(0.002 * (image_t.shape[0] + image_t.shape[1]) / 2) + 1)
        #     color = [random.randint(0, 255) for _ in range(3)]
        #     # x, y, w, h -> x1, y1, x2, y2
        #     pt1, pt2 = (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3]))
        #     cv2.rectangle(image_t, pt1, pt2, color, thickness=tl, lineType=cv2.LINE_AA)
        # cv2.imwrite("./output/image_t.jpg", image_t)

        image_t = _resize_subtract_mean(image_t, self.img_dim, self.rgb_means)
        # prepare for multi-scale train
        boxes_t[:, 0::2] /= width
        boxes_t[:, 1::2] /= height

        landm_t[:, 0::2] /= width
        landm_t[:, 1::2] /= height

        targets_t = np.hstack((boxes_t, landm_t, labels_t))

        return image_t, targets_t
