# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     utils
   Description :   
   Author :       lth
   date：          2022/6/24
-------------------------------------------------
   Change Activity:
                   2022/6/24 18:17: create this script
-------------------------------------------------
"""
__author__ = 'lth'

from typing import Union

import numpy as np
import torch
from PIL import Image, ImageDraw
from torch import nn

radius = 4
center_offset = 0.


def denormalize(im: Union[np.ndarray, torch.Tensor], mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
    return im * std + mean


def jaccard(box_a, box_b):
    b1_x1, b1_x2 = box_a[:, 0] - box_a[:, 2] / 2, box_a[:, 0] + box_a[:, 2] / 2
    b1_y1, b1_y2 = box_a[:, 1] - box_a[:, 3] / 2, box_a[:, 1] + box_a[:, 3] / 2
    b2_x1, b2_x2 = box_b[:, 0] - box_b[:, 2] / 2, box_b[:, 0] + box_b[:, 2] / 2
    b2_y1, b2_y2 = box_b[:, 1] - box_b[:, 3] / 2, box_b[:, 1] + box_b[:, 3] / 2
    box_a = torch.zeros_like(box_a)
    box_b = torch.zeros_like(box_b)
    box_a[:, 0], box_a[:, 1], box_a[:, 2], box_a[:, 3] = b1_x1, b1_y1, b1_x2, b1_y2
    box_b[:, 0], box_b[:, 1], box_b[:, 2], box_b[:, 3] = b2_x1, b2_y1, b2_x2, b2_y2
    A = box_a.size()[0]
    B = box_b.size()[0]
    max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
                       box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
    min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
                       box_b[:, :2].unsqueeze(0).expand(A, B, 2))
    inter = torch.clamp((max_xy - min_xy), min=0)

    inter = inter[:, :, 0] * inter[:, :, 1]
    # 计算先验框和真实框各自的面积
    area_a = ((box_a[:, 2] - box_a[:, 0]) *
              (box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter)  # [A,B]
    area_b = ((box_b[:, 2] - box_b[:, 0]) *
              (box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter)  # [A,B]
    # 求IOU
    union = area_a + area_b - inter
    return inter / union  # [A,B]


class TargetEncode(nn.Module):
    def __init__(self):
        super(TargetEncode, self).__init__()
        # 人脸检测框为正方形，ratio为1.0
        self.min_size = torch.tensor([[16, 32], [64, 128], [256, 512]]).cuda()
        self.image_shape = [840, 840]

        self.down_sample = [8, 16, 32]
        self.threshold = 0.5
        self.variance = [1, 1]

    def Encode(self, target, outputs):
        """
        retinaface流程
        xyxy----->xywh
        face------>0,1
        ldm------->offset 相对于中心坐标

        :param target:
        :param outputs:
        :return:
        """
        outputs_xyxy, outputs_face, outputs_ldm = outputs[0], outputs[1], outputs[2]
        t_xyxy = []
        t_face = []
        t_ldm = []
        t_mask = []
        t_ldm_mask = []
        t_o_f_mask = []
        for index in range(3):
            o_face = outputs_face[index]
            o_xyxy = outputs_xyxy[index]
            o_ldm = outputs_ldm[index]

            o_mask = torch.ones([o_xyxy.shape[0], o_xyxy.shape[1], 1],device=o_face.device)
            o_face_mask = torch.zeros([o_xyxy.shape[0], o_xyxy.shape[1], 1])
            ldm_mask = torch.zeros([o_xyxy.shape[0], o_xyxy.shape[1], 1])
            xyxy = torch.zeros_like(o_xyxy)
            face = torch.ones_like(o_face)
            face[..., 1] = 0
            ldm = torch.zeros_like(o_ldm)

            middle_length_mark = o_mask.shape[1] // 2

            for idx, t in enumerate(target):
                """
                idx 表示batch size中的第i个
                """
                #################################################
                #               ious p p p p p p p p p p p p p
                #                  t
                #                  t
                #                  t
                #                  t
                #################################################

                temp = t[:, :4]
                gt_boxes = torch.stack(
                    [(temp[:, 0] + temp[:, 2]) / 2,  # x_center
                     (temp[:, 1] + temp[:, 3]) / 2,  # y_center
                     temp[:, 2] - temp[:, 0],  # w
                     temp[:, 3] - temp[:, 1]], 1)  # h

                x = torch.arange(0, np.sqrt(middle_length_mark))
                y = torch.arange(0, np.sqrt(middle_length_mark))
                mesh_y, mesh_x = torch.meshgrid(x, y)
                mesh_x, mesh_y = mesh_x.to(o_face.device), mesh_y.to(o_face.device)

                mesh_x, mesh_y = mesh_x + center_offset, mesh_y + center_offset
                mesh_x, mesh_y = mesh_x.view(-1) * self.down_sample[index], mesh_y.view(-1) * self.down_sample[index]

                anchors1 = torch.stack([mesh_x, mesh_y, self.min_size[index][0].expand_as(mesh_y),
                                        self.min_size[index][0].expand_as(mesh_y)])
                anchors2 = torch.stack([mesh_x, mesh_y, self.min_size[index][1].expand_as(mesh_y),
                                        self.min_size[index][1].expand_as(mesh_y)])
                # x_center y_center w h
                anchors = torch.cat([anchors1, anchors2], dim=-1).permute(1, 0)
                """
                计算 gt 与 grid 提出的框的iou
                给每个gt确定是否适合在这一个头上encode ，确定标准是iou>threshold
                """
                anchor_ious = jaccard(gt_boxes, anchors)
                amaxs, argmaxs = torch.sort(anchor_ious, descending=True)

                argmaxs_p = argmaxs[:, :10]
                for aidx, ags in enumerate(argmaxs_p):
                    for n in ags:
                        if anchor_ious[aidx, n] >= self.threshold:
                            if n >= middle_length_mark:
                                temp_n = n - middle_length_mark
                            else:
                                temp_n = n
                            xyxy[idx, n, :] = self.encode_xywh2(t[aidx, :4],
                                                                temp_n,
                                                                np.sqrt(middle_length_mark),
                                                                index,
                                                                n < middle_length_mark)  # if = means the other matrix

                            if t[aidx, -1] != -1:
                                ldm[idx, n, :] = self.encode_ldm2(t[aidx, 4:14],
                                                                  index,
                                                                  temp_n,
                                                                  np.sqrt(middle_length_mark),
                                                                  n < middle_length_mark)
                                ldm_mask[idx, n] = 1
                            face[idx, n, 1] = 1
                            face[idx, n, 0] = 0
                            o_mask[idx, n] = 1  # used for face
                            o_face_mask[idx, n] = 1  # used for box

                        elif 0.3 < anchor_ious[aidx, n] < self.threshold:
                            face[idx, n, 0] = 1
                            o_mask[idx, n] = 0

            t_xyxy.append(xyxy)
            t_face.append(face)
            t_ldm.append(ldm)
            t_mask.append(o_mask)
            t_ldm_mask.append(ldm_mask)
            t_o_f_mask.append(o_face_mask)
        return t_xyxy, t_face, t_ldm, t_mask, t_ldm_mask, t_o_f_mask

    def encode_xywh2(self, xyxy, seq_index, ratio, index, middle_mark):
        if middle_mark:
            min_anchor = self.min_size[index][0]
        else:
            min_anchor = self.min_size[index][1]
        x1, y1, x2, y2 = xyxy[0], xyxy[1], xyxy[2], xyxy[3]
        x = ((x1 + x2) / 2 - self.down_sample[index] * (seq_index % ratio + center_offset)) / self.down_sample[index] / \
            self.variance[0]
        y = ((y1 + y2) / 2 - self.down_sample[index] * (seq_index // ratio + center_offset)) / self.down_sample[
            index] / self.variance[0]
        w = torch.log((x2 - x1) / min_anchor) / self.variance[1]
        h = torch.log((y2 - y1) / min_anchor) / self.variance[1]
        return torch.stack([x, y, w, h]).float()

    def encode_ldm2(self, ldm, index, seq_index, ratio, middle_before):
        # if middle_before:
        #     min_anchor = self.min_size[index][0]
        # else:
        #     min_anchor = self.min_size[index][1]
        temp = torch.zeros_like(ldm, device=ldm.device)

        temp[[0, 2, 4, 6, 8]] = (ldm[[0, 2, 4, 6, 8]] - (seq_index % ratio + center_offset) * self.down_sample[index]) / \
                                self.down_sample[
                                    index] / self.variance[0]
        temp[[1, 3, 5, 7, 9]] = (ldm[[1, 3, 5, 7, 9]] - (seq_index // ratio + center_offset) * self.down_sample[
            index]) / \
                                self.down_sample[
                                    index] / self.variance[0]

        return temp


class TargetDecode(nn.Module):
    def __init__(self):
        super(TargetDecode, self).__init__()
        self.min_size = [[16, 32], [64, 128], [256, 512]]
        self.image_shape = [840, 840]

        self.down_sample = [8, 16, 32]
        self.variance = [1, 1]

    def Decode(self, image, target_encode):
        output = (denormalize(image.permute((0, 2, 3, 1)).
                              detach().
                              to('cpu').
                              numpy()) * 255).astype('uint8')

        images = [Image.fromarray(op) for op in output]
        images_draw = [ImageDraw.Draw(img) for img in images]
        t_xyxy, t_face, t_ldm, t_mask, t_ldm_mask, t_o_face_mask = target_encode
        for index, (xyxy, f, ldm, mask, ldm_mask) in enumerate(zip(t_xyxy, t_face, t_ldm, t_o_face_mask, t_ldm_mask)):
            middle_length_mark = mask.shape[1] // 2
            for i in range(mask.shape[0]):  # <---BatchSize
                o_xyxy = xyxy[i]
                o_ldm = ldm[i]
                o_mask = mask[i]
                l_mask = ldm_mask[i]
                ratio = np.sqrt(middle_length_mark)
                result_index = o_mask.nonzero()
                if result_index.shape[0] == 0:
                    continue
                for r in result_index:

                    if r[0] >= middle_length_mark:
                        middle_length = 1
                        #  r[0]-(middle_length_mark-1)
                        center_x, center_y = (r[0] - middle_length_mark) % ratio, (
                                r[0] - middle_length_mark) // ratio
                    else:
                        middle_length = 0
                        center_x, center_y = (r[0]) % ratio, r[0] // ratio

                    x1, y1, x2, y2 = self.decode_xywh(o_xyxy[r[0]], index, middle_length, center_x, center_y)

                    ldm10 = self.decode_ldm(o_ldm[r[0]], index, center_x, center_y, middle_length)
                    images_draw[i].rectangle((x1.item(), y1.item(), x2.item(), y2.item()))
                    if l_mask[r[0]][0] == 0:
                        continue
                    for w in range(0, 10, 2):
                        images_draw[i].ellipse(((ldm10[w].item() - radius, ldm10[w + 1].item() - radius),
                                                (ldm10[w].item() + radius, ldm10[w + 1].item() + radius)), fill=None,
                                               outline=(0, 0, 255), width=-1)

        images[0].show()

    def decode_xywh(self, xywh, index, middle_length, center_x, center_y):

        min_anchor = self.min_size[index][middle_length]

        x = (xywh[0] * self.variance[0] + center_x + center_offset) * self.down_sample[index]
        y = (xywh[1] * self.variance[0] + center_y + center_offset) * self.down_sample[index]
        w = torch.exp(xywh[2] * self.variance[1]) * min_anchor
        h = torch.exp(xywh[3] * self.variance[1]) * min_anchor

        return torch.stack([x - w / 2, y - h / 2, x + w / 2, y + h / 2]).float()

    def decode_ldm(self, ldm, index, center_x, center_y, middle_length):
        # min_anchor = self.min_size[index][middle_length]
        temp = torch.zeros_like(ldm, device=ldm.device)

        temp[[0, 2, 4, 6, 8]] = (ldm[[0, 2, 4, 6, 8]] * self.variance[0]  + (center_x + center_offset)) * \
                                self.down_sample[
                                    index]
        temp[[1, 3, 5, 7, 9]] = (ldm[[1, 3, 5, 7, 9]] * self.variance[0] + (center_y + center_offset)) * \
                                self.down_sample[
                                    index]

        return temp


class OutputDecode(nn.Module):
    def __init__(self):
        super(OutputDecode, self).__init__()
        self.min_size = [[16, 32], [64, 128], [256, 512]]
        self.image_shape = [840, 840]

        self.down_sample = [8, 16, 32]
        self.threshold = 0.6
        self.variance = [1, 1]

    def forward(self, image, output_encode):
        # output = (denormalize(image.permute((0, 2, 3, 1)).
        #                       detach().
        #                       to('cpu').
        #                       numpy()) * 255).astype('uint8')

        # images = [Image.fromarray(op) for op in output]
        # images_draw = [ImageDraw.Draw(img) for img in images]

        outputs_final = []
        o_xyxy, o_face, o_ldm = output_encode
        # index 0 ,1, 2
        """
        xyxy : n seq 4
        face : n seq 2   ----> 0 : no face 1: face
        ldm  : n seq 10
        """
        for index, (xyxy, f, ldm) in enumerate(zip(o_xyxy, o_face, o_ldm)):
            mask_middle_length = xyxy.shape[1] // 2
            for i in range(xyxy.shape[0]):  # i <---BatchSize

                o_xyxy = xyxy[i]
                o_f = torch.softmax(f[i],dim=-1)
                o_ldm = ldm[i]

                cls_conf, cls_index = torch.max(o_f, dim=1)
                cls_conf, cls_index = cls_conf.unsqueeze(-1), cls_index.unsqueeze(-1)

                ratio = np.sqrt(mask_middle_length)
                x = torch.arange(0, ratio)
                y = torch.arange(0, ratio)
                mesh_y, mesh_x = torch.meshgrid(x, y)
                mesh_x, mesh_y = mesh_x.to(o_f.device), mesh_y.to(o_f.device)

                mesh_x, mesh_y = mesh_x + center_offset, mesh_y + center_offset
                mesh_x, mesh_y = mesh_x.view(-1), mesh_y.view(-1)

                xyxy = []
                """
                mesh_x mesh_y represents the center of the grid
                """
                for idx in range(1, 3):  # [1,3)
                    x = (o_xyxy[(idx - 1) * mask_middle_length:idx * mask_middle_length, 0] * self.variance[0] + mesh_x) \
                        * self.down_sample[index]
                    y = (o_xyxy[(idx - 1) * mask_middle_length:idx * mask_middle_length, 1] * self.variance[0] + mesh_y) \
                        * self.down_sample[index]

                    w = torch.exp(
                        o_xyxy[(idx - 1) * mask_middle_length:idx * mask_middle_length, 2] * self.variance[1]) * \
                        self.min_size[index][idx - 1]
                    h = torch.exp(
                        o_xyxy[(idx - 1) * mask_middle_length:idx * mask_middle_length, 3] * self.variance[1]) * \
                        self.min_size[index][idx - 1]
                    xyxy.append(torch.stack([x - w / 2, y - h / 2, x + w / 2, y + h / 2], dim=-1))

                    o_ldm[(idx - 1) * mask_middle_length:idx * mask_middle_length, [0, 2, 4, 6, 8]] = \
                        (o_ldm[(idx - 1) * mask_middle_length:idx * mask_middle_length, [0, 2, 4, 6, 8]] *
                         self.variance[0]
                         + mesh_x.unsqueeze(-1).expand(-1, 5)) * self.down_sample[index]
                    o_ldm[(idx - 1) * mask_middle_length:idx * mask_middle_length, [1, 3, 5, 7, 9]] = \
                        (o_ldm[(idx - 1) * mask_middle_length:idx * mask_middle_length, [1, 3, 5, 7, 9]] *
                         self.variance[0]
                         + mesh_y.unsqueeze(-1).expand(-1, 5)) * self.down_sample[index]

                xyxy = torch.cat(xyxy, dim=0)

                #                xyxy  cls_index  cls_conf  obj_conf  ldm
                res = torch.cat([xyxy, cls_index, cls_conf, cls_conf, o_ldm], dim=1)

                outputs_final.append(res)

        outputs_final = torch.cat(outputs_final, dim=0)
        output_from_nms = OutputDecode.nms_self_define(outputs_final.unsqueeze(0))
        # colors = ["blue", "green", "red", "yellow", "black"]
        # for index, output in enumerate(output_to_nms):
        #     for o in output:
        #         print("conf:" + str(o.cpu().numpy()[4]))
        #         images_draw[index].rectangle([o[0], o[1], o[2], o[3]], outline=(0, 255, 255), width=3)
        #         for color_idx, w in enumerate(range(0, 10, 2)):
        #             images_draw[index].ellipse((o[7 + w] - radius, o[7 + w + 1] - radius,
        #                                         o[7 + w] + radius, o[7 + w + 1] + radius), colors[color_idx],
        #                                        outline=colors[color_idx],
        #                                        width=-1)
        # images[0].show()
        # images[0].save("result.jpg")
        return output_from_nms
    @staticmethod
    def nms_self_define(prediction, conf_thres=0.9, nms_thres=0.3):

        # =========>   x1y1x2y2
        # prediction = prediction.permute(0, 2, 1)

        output = [None for _ in range(len(prediction))]
        for image_i, image_pred in enumerate(prediction):
            class_conf, class_pred = prediction[:, :, 5].permute(1, 0), prediction[:, :, 4].permute(1, 0)
            conf = prediction[:, :, 6].permute(1, 0)
            ldm = prediction[:, :, 7:].squeeze(0)
            # 利用置信度进行第一轮筛选
            conf_mask = (conf >= conf_thres).squeeze()
            # 根据置信度进行预测结果的筛选
            image_pred = image_pred[conf_mask]
            class_conf = class_conf[conf_mask]
            class_pred = class_pred[conf_mask]
            conf = conf[conf_mask]
            ldm = ldm[conf_mask]
            if not image_pred.size(0):
                continue
            # detections  x1y1x2y2 + class_val + class_no  + conf_val
            detections = torch.cat((image_pred[:, :4], class_conf.float(), class_pred.float(), conf.float(), ldm), 1)

            if prediction.is_cuda:
                detections = detections.cuda()

            # 获得某一类得分筛选后全部的预测结果
            detections_class = detections[detections[:, 5] == 1]

            # # 按照存在物体的置信度排序
            _, conf_sort_index = torch.sort(detections_class[:, 4] * detections_class[:, 6], descending=True)
            detections_class = detections_class[conf_sort_index][:1000]
            # 进行非极大抑制
            max_detections = []
            while detections_class.size(0):
                # 取出这一类置信度最高的，一步一步往下判断，判断重合程度是否大于nms_thres，如果是则去除掉
                max_detections.append(detections_class[0].unsqueeze(0))
                if len(detections_class) == 1:
                    break
                ious = bbox_iou(max_detections[-1], detections_class[1:])
                detections_class = detections_class[1:][ious < nms_thres]
            try:
                # 堆叠
                max_detections = torch.cat(max_detections).data
            except:
                return [None]
            # Add max detections to outputs
            output[image_i] = max_detections if output[image_i] is None else torch.cat(
                (output[image_i], max_detections))

        return output


def bbox_iou(box1, box2, x1y1x2y2=True):
    """
    计算IOU
    """
    if not x1y1x2y2:
        b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
        b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
        b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
        b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
    else:
        b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
        b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    inter_rect_x1 = torch.max(b1_x1, b2_x1)
    inter_rect_y1 = torch.max(b1_y1, b2_y1)
    inter_rect_x2 = torch.min(b1_x2, b2_x2)
    inter_rect_y2 = torch.min(b1_y2, b2_y2)

    inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * \
                 torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)

    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)

    iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)

    return iou


class RetinaLoss(nn.Module):
    def __init__(self):
        """
        以下为多任务学习损失计算：loc和landmark都是smooth_l1 loss，confidence是cross entropy loss
        """
        super(RetinaLoss, self).__init__()
        # weight = torch.tensor([3, 1], device="cuda")

        self.box = nn.SmoothL1Loss(reduction="none")
        # self.face = nn.BCELoss(reduction="none", weight=weight)
        self.face = nn.CrossEntropyLoss(reduction="none")
        self.ldm = nn.SmoothL1Loss(reduction="none")

    def forward(self, output, target):
        """
        t_mask = neg + pos
        """
        o_xyxy, o_face, o_ldm = output
                            #   face     ldm       xywh
        t_xyxy, t_face, t_ldm, t_mask, t_ldm_mask, t_o_face_mask = target
        box_loss = 0
        face_loss = 0
        ldm_loss = 0
        for o1, o2, o3, t1, t2, t3, t4, t5, t6 in zip(o_xyxy, o_face, o_ldm, t_xyxy, t_face, t_ldm, t_mask, t_ldm_mask,
                                                      t_o_face_mask):

            # face
            if torch.sum(t2[..., 1]) > 0:
                # Online Hard Example Mining
                t2 = t2.view(-1, 2)

                temp = self.face(o2.view(-1, 2), (1 - t2[..., 0]).long())

                neg = t2[:, 0]
                pos = t2[:, 1]
                """
                iou 在 (0.3,0.5)的过滤掉
                正样本只计算 iou >=0.5的
                负样本只计算loss最大的，数量是正样本的3倍
                """
                loss_pos = temp[pos.bool() & t4.view(-1).bool()]
                loss_neg = temp[neg.bool() & t4.view(-1).bool()].sort(descending=True)[0]

                num_pos = loss_pos.shape[0]
                num_neg = num_pos * 3

                loss_neg = loss_neg[:num_neg]

                face_loss += torch.sum(loss_neg) / num_neg + torch.sum(loss_pos) / num_pos

            # xywh
            if torch.sum(t6) > 0:
                t6 = t6.bool()
                box_loss += torch.mean(self.box(o1[t6.expand_as(o1)], t1[t6.expand_as(t1)]))

            # ldm
            if torch.sum(t5) > 0:
                t5 = t5.bool()
                ldm_loss += torch.mean(self.ldm(o3[t5.expand_as(o3)], t3[t5.expand_as(t3)]))

        return box_loss + face_loss + ldm_loss, box_loss, face_loss, ldm_loss


# 预测结果不能过大,所以gain=0.02
def weights_init(net, init_type='normal', init_gain=0.001):
    def init_func(m):
        classname = m.__class__.__name__
        if hasattr(m, 'weight') and classname.find('Conv') != -1:
            if init_type == 'normal':
                torch.nn.init.normal_(m.weight.data, 0.0, init_gain)
            elif init_type == 'xavier':
                torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)
            elif init_type == 'kaiming':
                torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif init_type == 'orthogonal':
                torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)
            else:
                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
        elif classname.find('BatchNorm2d') != -1:
            torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
            torch.nn.init.constant_(m.bias.data, 0.0)

    print('initialize network with %s type' % init_type)
    net.apply(init_func)
