from __future__ import division
import logging
import time
from typing import Union

import cv2
import numpy as np
from skimage import transform as trans
from sklearn import preprocessing


DIT = None
MAX_BATCH_SIZE = 1024

def resize_factor(img, resize_shape=(480, 640), pad: bool = True, mode: str = 'pad'):
    const_width = resize_shape[1]
    const_height = resize_shape[0]
    cw = const_width
    ch = const_height
    h, w = img.original_height, img.original_width
    if mode == 'stretch':
        return (const_width, const_height), (0, 0, 0, 0), min(cw / w, ch / h)
    else:
        scale_factor = min(cw / w, ch / h)
        # If image is too small, it may contain only single face, which leads to decreased detection accuracy,
        # so we reduce scale factor by some factor
        if scale_factor > 3:
            scale_factor = scale_factor * 0.7
        if pad:
            return (int(w * scale_factor), int(h * scale_factor)), (0, ch - int(h * scale_factor), 0, cw - int(w * scale_factor)), scale_factor
        else:
            return (cw, ch), (0, 0, 0, 0), scale_factor


def nms(dets, thresh=0.4):
    """Pure Python NMS baseline."""
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]  #bbox打分

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    #打分从大到小排列，取index
    order = scores.argsort()[::-1]

    #keep为最后保留的边框
    keep = []
    while order.size > 0:
        #order[0]是当前分数最大的窗口，肯定保留
        i = order[0]
        keep.append(i)
        #计算窗口i与其他所有窗口的交叠部分的面积
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        #交/并得到iou值
        ovr = inter / (areas[i] + areas[order[1:]] - inter)

        #inds为所有与窗口i的iou值小于threshold值的窗口的index，其他窗口此次都被窗口i吸收
        inds = np.where(ovr <= thresh)[0]
        #order里面只保留与窗口i交叠面积小于threshold的那些窗口，由于ovr长度比order长度少1(不包含i)，所以inds+1对应到保留的窗口
        order = order[inds + 1]

    return keep


def _whctrs(anchor):
    """
    Return width, height, x center, and y center for an anchor (window).
    """

    w = anchor[2] - anchor[0] + 1
    h = anchor[3] - anchor[1] + 1
    x_ctr = anchor[0] + 0.5 * (w - 1)
    y_ctr = anchor[1] + 0.5 * (h - 1)
    return w, h, x_ctr, y_ctr


def _mkanchors(ws, hs, x_ctr, y_ctr):
    """
    Given a vector of widths (ws) and heights (hs) around a center
    (x_ctr, y_ctr), output a set of anchors (windows).
    """

    ws = ws[:, np.newaxis]
    hs = hs[:, np.newaxis]
    anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
                         y_ctr - 0.5 * (hs - 1),
                         x_ctr + 0.5 * (ws - 1),
                         y_ctr + 0.5 * (hs - 1)))
    return anchors


def _ratio_enum(anchor, ratios):
    """
    Enumerate a set of anchors for each aspect ratio wrt an anchor.
    """

    w, h, x_ctr, y_ctr = _whctrs(anchor)
    size = w * h
    size_ratios = size / ratios
    ws = np.round(np.sqrt(size_ratios))
    hs = np.round(ws * ratios)
    anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
    return anchors


# @jit()
def _scale_enum(anchor, scales):
    """
    Enumerate a set of anchors for each scale wrt an anchor.
    """

    w, h, x_ctr, y_ctr = _whctrs(anchor)
    ws = w * scales
    hs = h * scales
    anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
    return anchors


def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
                     scales=2 ** np.arange(3, 6), stride=16):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, 15, 15) window.
    """

    base_anchor = np.array([1, 1, base_size, base_size]) - 1
    ratio_anchors = _ratio_enum(base_anchor, ratios)
    anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
                         for i in range(ratio_anchors.shape[0])])
    return anchors


def generate_anchors_fpn(cfg):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales wrt a reference (0, 0, 15, 15) window.
    """
    RPN_FEAT_STRIDE = []
    for k in cfg:
        RPN_FEAT_STRIDE.append(int(k))
    RPN_FEAT_STRIDE = sorted(RPN_FEAT_STRIDE, reverse=True)
    anchors = []
    for k in RPN_FEAT_STRIDE:
        v = cfg[str(k)]
        bs = v['BASE_SIZE']
        __ratios = np.array(v['RATIOS'])
        __scales = np.array(v['SCALES'])
        stride = int(k)
        # print('anchors_fpn', bs, __ratios, __scales, file=sys.stderr)
        r = generate_anchors(bs, __ratios, __scales, stride)
        # print('anchors_fpn', r.shape, file=sys.stderr)
        anchors.append(r)

    return anchors


def anchors_plane(height, width, stride, base_anchors):
    """
    Parameters
    ----------
    height: height of plane
    width:  width of plane
    stride: stride ot the original image
    anchors_base: (A, 4) a base set of anchors
    Returns
    -------
    all_anchors: (height, width, A, 4) ndarray of anchors spreading over the plane
    """
    A = base_anchors.shape[0]
    all_anchors = np.zeros((height, width, A, 4), dtype=np.float32)
    for iw in range(width):
        sw = iw * stride
        for ih in range(height):
            sh = ih * stride
            for k in range(A):
                all_anchors[ih, iw, k, 0] = base_anchors[k, 0] + sw
                all_anchors[ih, iw, k, 1] = base_anchors[k, 1] + sh
                all_anchors[ih, iw, k, 2] = base_anchors[k, 2] + sw
                all_anchors[ih, iw, k, 3] = base_anchors[k, 3] + sh
    return all_anchors



def clip_pad(tensor, pad_shape):
    """
    Clip boxes of the pad area.
    :param tensor: [n, c, H, W]
    :param pad_shape: [h, w]
    :return: [n, c, h, w]
    """
    H, W = tensor.shape[2:]
    h, w = pad_shape

    if h < H or w < W:
        tensor = tensor[:, :, :h, :w].copy()

    return tensor


def bbox_pred(boxes, box_deltas):
    """
    Transform the set of class-agnostic boxes into class-specific boxes
    by applying the predicted offsets (box_deltas)
    :param boxes: !important [N 4]
    :param box_deltas: [N, 4 * num_classes]
    :return: [N 4 * num_classes]
    """
    if boxes.shape[0] == 0:
        return np.zeros((0, box_deltas.shape[1]))

    boxes = boxes.astype(np.float32, copy=False)
    widths = boxes[:, 2] - boxes[:, 0] + 1.0
    heights = boxes[:, 3] - boxes[:, 1] + 1.0
    ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
    ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)

    dx = box_deltas[:, 0:1]
    dy = box_deltas[:, 1:2]
    dw = box_deltas[:, 2:3]
    dh = box_deltas[:, 3:4]

    pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
    pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
    pred_w = np.exp(dw) * widths[:, np.newaxis]
    pred_h = np.exp(dh) * heights[:, np.newaxis]

    pred_boxes = np.zeros(box_deltas.shape)
    # x1
    pred_boxes[:, 0:1] = pred_ctr_x - 0.5 * (pred_w - 1.0)
    # y1
    pred_boxes[:, 1:2] = pred_ctr_y - 0.5 * (pred_h - 1.0)
    # x2
    pred_boxes[:, 2:3] = pred_ctr_x + 0.5 * (pred_w - 1.0)
    # y2
    pred_boxes[:, 3:4] = pred_ctr_y + 0.5 * (pred_h - 1.0)

    if box_deltas.shape[1] > 4:
        pred_boxes[:, 4:] = box_deltas[:, 4:]

    return pred_boxes


def landmark_pred(boxes, landmark_deltas):
    if boxes.shape[0] == 0:
        return np.zeros((0, landmark_deltas.shape[1]))
    boxes = boxes.astype(np.float32, copy=False)
    widths = boxes[:, 2] - boxes[:, 0] + 1.0
    heights = boxes[:, 3] - boxes[:, 1] + 1.0
    ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
    ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
    pred = landmark_deltas.copy()
    for i in range(5):
        pred[:, i, 0] = landmark_deltas[:, i, 0] * widths + ctr_x
        pred[:, i, 1] = landmark_deltas[:, i, 1] * heights + ctr_y
    return pred



class RetinaFace:
    def __init__(self, inference_backend: Union[DIT], rac='net3l', masks: bool = False, **kwargs):
        self.rac = rac
        self.masks = masks
        self.model = inference_backend
        self.input_shape = (1, 3, 960, 960)
        self.prepare()

    def prepare(self, nms: float = 0.4, **kwargs):
        self.max_batch_size = MAX_BATCH_SIZE
        self.nms_threshold = nms
        self.landmark_std = 1.0

        _ratio = (1.,)
        fmc = 3
        if self.rac == 'net3':
            _ratio = (1.,)
        elif self.rac == 'net3l':
            _ratio = (1.,)
            self.landmark_std = 0.2
        else:
            assert False, 'rac setting error %s' % self.rac

        if fmc == 3:
            self._feat_stride_fpn = [32, 16, 8]
            self.anchor_cfg = {
                '32': {'SCALES': (32, 16), 'BASE_SIZE': 16, 'RATIOS': _ratio, 'ALLOWED_BORDER': 9999},
                '16': {'SCALES': (8, 4), 'BASE_SIZE': 16, 'RATIOS': _ratio, 'ALLOWED_BORDER': 9999},
                '8': {'SCALES': (2, 1), 'BASE_SIZE': 16, 'RATIOS': _ratio, 'ALLOWED_BORDER': 9999},
            }

        self.use_landmarks = True
        self.fpn_keys = []

        for s in self._feat_stride_fpn:
            self.fpn_keys.append('stride%s' % s)

        self._anchors_fpn = dict(zip(self.fpn_keys, generate_anchors_fpn(cfg=self.anchor_cfg)))
        for k in self._anchors_fpn:
            v = self._anchors_fpn[k].astype(np.float32)
            self._anchors_fpn[k] = v
        self.anchor_plane_cache = {}

        self._num_anchors = dict(zip(self.fpn_keys, [anchors.shape[0] for anchors in self._anchors_fpn.values()]))

    # bgr_img 可单张，也可多张
    def detect(self, img_mxtensor_list, scale_factor_list, threshold: float = 0.6):
        # 1. Infer and process output tensors
        t0 = time.time()
        net_out = self.model.infer(img_mxtensor_list)
        t1 = time.time()
        for i, output_tensor in enumerate(net_out):
            output_tensor.to_host()
            net_out[i] = np.array(output_tensor)

        # 2. Do postprocess
        batch = len(scale_factor_list)
        _all_dets, _all_landmarks = self.postprocess(net_out, threshold, batch)
        all_dets = []
        all_landmarks = []
        for i in range(len(_all_dets)):
            boxes = _all_dets[i]
            landmarks = _all_landmarks[i]
            if boxes is not None:
                boxes[:, 0:4] = boxes[:, 0:4]/scale_factor_list[i]
                landmarks = landmarks/scale_factor_list[i]
            all_dets.append(boxes)
            all_landmarks.append(landmarks)

        return all_dets, all_landmarks

    def postprocess(self, net_out, threshold, batch):
        t0 = time.time()
        all_dets = []
        all_landmarks = []
        for batch_i in range(batch):
            proposals_list = []
            scores_list = []
            mask_scores_list = []
            landmarks_list = []
            for _idx, s in enumerate(self._feat_stride_fpn):
                _key = 'stride%s' % s
                stride = int(s)
                if self.use_landmarks:
                    idx = _idx * 3
                else:
                    idx = _idx * 2
                if self.masks:
                    idx = _idx * 4

                A = self._num_anchors['stride%s' % s]

                scores = np.array([net_out[idx][batch_i]])
                scores = scores[:, A:, :, :]
                idx += 1
                bbox_deltas = np.array([net_out[idx][batch_i]])
                height, width = bbox_deltas.shape[2], bbox_deltas.shape[3]

                K = height * width
                key = (height, width, stride)
                if key in self.anchor_plane_cache:
                    anchors = self.anchor_plane_cache[key]
                else:

                    anchors_fpn = self._anchors_fpn['stride%s' % s]
                    anchors = anchors_plane(height, width, stride, anchors_fpn)
                    anchors = anchors.reshape((K * A, 4))
                    if len(self.anchor_plane_cache) < 100:
                        self.anchor_plane_cache[key] = anchors

                scores = clip_pad(scores, (height, width))
                scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))

                bbox_deltas = clip_pad(bbox_deltas, (height, width))
                bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1))
                bbox_pred_len = bbox_deltas.shape[3] // A
                bbox_deltas = bbox_deltas.reshape((-1, bbox_pred_len))

                proposals = bbox_pred(anchors, bbox_deltas)

                scores_ravel = scores.ravel()
                order = np.where(scores_ravel >= threshold)[0]
                proposals = proposals[order, :]
                scores = scores[order]

                proposals_list.append(proposals)
                scores_list.append(scores)

                if self.masks:
                    type_scores = np.array([net_out[idx+2][batch_i]])
                    mask_scores = type_scores[:, A*2:, :, :]
                    mask_scores = clip_pad(mask_scores,(height, width))
                    mask_scores = mask_scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
                    mask_scores = mask_scores[order]
                    mask_scores_list.append(mask_scores)

                if self.use_landmarks:
                    idx += 1
                    landmark_deltas = np.array([net_out[idx][batch_i]])
                    landmark_deltas = clip_pad(landmark_deltas, (height, width))
                    landmark_pred_len = landmark_deltas.shape[1] // A
                    landmark_deltas = landmark_deltas.transpose((0, 2, 3, 1)).reshape((-1, 5, landmark_pred_len // 5))
                    landmark_deltas *= self.landmark_std
                    landmarks = landmark_pred(anchors, landmark_deltas)
                    landmarks = landmarks[order, :]
                    landmarks_list.append(landmarks)

            proposals = np.vstack(proposals_list)
            landmarks = None
            det = None
            if proposals.shape[0] != 0:
                scores = np.vstack(scores_list)
                scores_ravel = scores.ravel()
                order = scores_ravel.argsort()[::-1]
                proposals = proposals[order, :]
                scores = scores[order]
                if self.use_landmarks:
                    landmarks = np.vstack(landmarks_list)
                    landmarks = landmarks[order].astype(np.float32, copy=False)
                if self.masks:
                    mask_scores = np.vstack(mask_scores_list)
                    mask_scores = mask_scores[order]
                    pre_det = np.hstack((proposals[:, 0:4], scores, mask_scores)).astype(np.float32, copy=False)
                else:
                    pre_det = np.hstack((proposals[:, 0:4], scores)).astype(np.float32, copy=False)
                keep = nms(pre_det, thresh=self.nms_threshold)
                det = np.hstack((pre_det, proposals[:, 4:]))
                det = det[keep, :]
                if self.use_landmarks:
                    landmarks = landmarks[keep]

            all_dets.append(det)
            all_landmarks.append(landmarks)

        t1 = time.time()
        return all_dets, all_landmarks


def _preprocess(img, bbox=None, landmark=None, **kwargs):
    M = None
    image_size = []
    str_image_size = kwargs.get('image_size', '')
    if len(str_image_size) > 0:
        image_size = [int(x) for x in str_image_size.split(',')]
        if len(image_size) == 1:
            image_size = [image_size[0], image_size[0]]
        assert len(image_size) == 2
        assert image_size[0] == 112
        assert image_size[0] == 112 or image_size[1] == 96
    if landmark is not None:
        assert len(image_size) == 2
        src = np.array([
            [30.2946, 51.6963],
            [65.5318, 51.5014],
            [48.0252, 71.7366],
            [33.5493, 92.3655],
            [62.7299, 92.2041]], dtype=np.float32)
        if image_size[1] == 112:
            src[:, 0] += 8.0
        dst = landmark.astype(np.float32)

        tform = trans.SimilarityTransform()
        tform.estimate(dst, src)
        M = tform.params[0:2, :]
        # M = cv2.estimateRigidTransform( dst.reshape(1,5,2), src.reshape(1,5,2), False)

    if M is None:
        if bbox is None:  # use center crop
            det = np.zeros(4, dtype=np.int32)
            det[0] = int(img.shape[1] * 0.0625)
            det[1] = int(img.shape[0] * 0.0625)
            det[2] = img.shape[1] - det[0]
            det[3] = img.shape[0] - det[1]
        else:
            det = bbox
        margin = kwargs.get('margin', 44)
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img.shape[1])
        bb[3] = np.minimum(det[3] + margin / 2, img.shape[0])
        ret = img[bb[1]:bb[3], bb[0]:bb[2], :]
        if len(image_size) > 0:
            ret = cv2.resize(ret, (image_size[1], image_size[0]))
        return ret
    else:  # do align using landmark
        assert len(image_size) == 2
        warped = cv2.warpAffine(img, M, (image_size[1], image_size[0]), borderValue=0.0)
        return warped


def get_aligned_face_base_with_mface_all(all_boxes, all_landmarks, img_ndarrays):
    warpeds, bbs, lmarks, scores, nfaces = [], [], [], [], []
    for i in range(len(all_boxes)):
        bboxes = all_boxes[i]
        landmarks = all_landmarks[i]
        if bboxes is None:
            warped, bb, landmark, score, nface = None, None, None, None, None
        else:
            bindex = 0
            det = bboxes[:, 0:4]
            img_3channels = img_ndarrays[i][0, ::-1, :, :].transpose(1, 2, 0)  # HWC, BGR
            img_size = np.asarray(img_3channels.shape[:2])
            nrof_faces = bboxes.shape[0]
            nface = nrof_faces
            # 取最中间的人脸
            if nrof_faces > 1:
                bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
                img_center = img_size / 2
                offsets = np.vstack(
                    [(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                bindex = np.argmax(bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
            det = bboxes[:, 0:4]
            det = det[bindex, :]
            landmark = landmarks[bindex, :]
            score = bboxes[bindex, 4:5][0]
            # points need to be transpose, points = points.reshape( (5,2) ).transpose()
            det = np.squeeze(det)
            bb = det
            points = list(landmark.flatten())
            assert (len(points) == 10)
            str_image_size = "%d,%d" % (112, 112)
            warped = _preprocess(img_3channels, bbox=bb, landmark=landmark, image_size=str_image_size)
            # cv2.imwrite('test.jpg', warped)
            warped = cv2.cvtColor(warped, cv2.COLOR_BGR2RGB)
            warped = np.transpose(warped, (2, 0, 1))
        warpeds.append(warped)
        bbs.append(bb)
        lmarks.append(landmark)
        scores.append(score)
        nfaces.append(nface)
    return warpeds, bbs, lmarks, scores, nfaces


def do_flip(data):
    for batch in range(data.shape[0]):
        for idx in range(data.shape[1]):
            data[batch, idx, :, :] = np.fliplr(data[batch, idx, :, :])


def get_feature(face_img):
    rec_max_batch_size = 1
    if face_img.ndim == 3:
        face_img = np.expand_dims(face_img, axis=0)

    face_img = (face_img / 255. - 0.5) / 0.5

    embedding = None
    img = np.copy(face_img)
    _img = np.copy(face_img)
    do_flip(_img)  # 做镜像翻转
    img = np.concatenate((img, _img))

    return img


def get_feature_batch(face_img):
    rec_max_batch_size = 1
    if face_img.ndim == 3:
        face_img = np.expand_dims(face_img, axis=0)

    face_img = (face_img / 255. - 0.5) / 0.5

    embedding = None
    img = np.copy(face_img)
    _img = np.copy(face_img)
    do_flip(_img)  # 做镜像翻转

    return img, _img