# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     utils
   Description :   
   Author :       lth
   date：          2022/2/19
-------------------------------------------------
   Change Activity:
                   2022/2/19 6:13: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import numpy as np
import torch
from PIL import Image
from torch import nn


def bbox_iou(box1, box2, x1y1x2y2=True):
    """
    计算IOU
    """
    if not x1y1x2y2:
        b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
        b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
        b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
        b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
    else:
        b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
        b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]

    inter_rect_x1 = torch.max(b1_x1, b2_x1)
    inter_rect_y1 = torch.max(b1_y1, b2_y1)
    inter_rect_x2 = torch.min(b1_x2, b2_x2)
    inter_rect_y2 = torch.min(b1_y2, b2_y2)

    inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * \
                 torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)

    b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
    b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)

    iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)

    return iou


class TargetEncode(nn.Module):
    def __init__(self, class_num, image_size=None):
        super(TargetEncode, self).__init__()
        if image_size is None:
            self.image_size = [512, 512]
        self.class_num = class_num

    @staticmethod
    def gaussian(target, x0, y0, sigma, height, width, radius=3):
        size = 2 * radius + 1
        x = np.arange(0, size, 1, float)
        y = x[:, np.newaxis]
        x_center = y_center = size // 2

        g = np.exp(- ((x - x_center) ** 2 + (y - y_center) ** 2) / (2 * sigma ** 2))

        g = torch.tensor(g)

        img_left = max(x0 - radius, 0)
        img_right = min(x0 + radius + 1, width)
        img_up = max(y0 - radius, 0)
        img_bottom = min(y0 + radius + 1, height)

        g_left = radius - (x0 - img_left)
        g_right = radius + (img_right - x0)
        g_up = radius - (y0 - img_up)
        g_bottom = radius + (img_bottom - y0)
        target[img_up:img_bottom, img_left:img_right] = g[g_up:g_bottom, g_left:g_right]

        return target

    def forward(self, data, heatmap, offset, wh):
        """
        [512,512]-->[128,128]


        :param data:List[List[x1,y1,x2,y2,cls]]
        :param heatmap: N,class,h,w
        :param offset: N,2,h,w
        :param wh: N,2,h,w
        :return:
        """
        n, c, h, w = heatmap.shape

        assert c == self.class_num, "the heatmap shape[1] should equal to the class_num"

        target_heat_map = torch.zeros_like(heatmap)
        target_offset = torch.zeros_like(offset)
        target_wh = torch.zeros_like(wh)
        target_mask = torch.zeros([n, 1, h, w])

        for index in range(len(data)):
            for d in data[index]:
                x1, y1, x2, y2, cls = d

                center_x = (x1 + x2) / 2 / 4
                center_y = (y1 + y2) / 2 / 4

                center_x_stride = (x1 + x2) / 2 // 4
                center_y_stride = (y1 + y2) / 2 // 4

                radius = gaussian_radius(((y2 - y1) // 4, (x2 - x1) // 4))
                radius = max(1, int(radius))

                if center_x_stride < 0 or center_y_stride < 0:
                    print("DSa")

                # heat map
                self.gaussian(target_heat_map[index, cls.long()], center_x_stride.long(), center_y_stride.long(),
                              sigma=(2 * radius + 1) / 6, height=h,
                              width=w, radius=radius)
                # offset
                target_offset[index, 0, center_y_stride.long(), center_x_stride.long()] = torch.tensor(
                    center_x - center_x_stride)
                target_offset[index, 1, center_y_stride.long(), center_x_stride.long()] = torch.tensor(
                    center_y - center_y_stride)
                # wh
                target_wh[index, 0, center_y_stride.long(), center_x_stride.long()] = torch.tensor(
                    (x2 - x1) / self.image_size[0])
                target_wh[index, 1, center_y_stride.long(), center_x_stride.long()] = torch.tensor(
                    (y2 - y1) / self.image_size[1])

                # print((x2 - x1) / self.image_size[0], (y2 - y1) / self.image_size[1])
                # target_mask
                target_mask[index, 0, center_y_stride.long(), center_x_stride.long()]=torch.tensor(1)
        target_hm_mask=torch.where(torch.sum(target_heat_map,dim=1,keepdim=True)!=0,1,0)

        return target_heat_map.to(heatmap.device), target_offset.to(heatmap.device), target_wh.to(
            heatmap.device), target_mask.to(heatmap.device),target_hm_mask.to(heatmap.device)


class TargetDecode(nn.Module):
    def __init__(self, class_num, image_size=None, conf_thres=0.9, nms_thres=0.5):
        super(TargetDecode, self).__init__()
        self.class_num = class_num
        if image_size is None:
            image_size = [512, 512]
        self.image_size = image_size

        self.nms_thresh = nms_thres
        self.conf_thresh = conf_thres

    def forward(self, target):
        """

        :param target:N, heatmap  + offset +wh ,h,w
        :return:
        """
        n, c, h, w = target.shape

        heatmap = pool_nms(target[:, :self.class_num, ...])
        offset = target[:, self.class_num:self.class_num + 2, ...]
        wh = target[:, -2:, ...]

        heatmap = heatmap.view(n, self.class_num, h * w)

        cls_score_h_w = []

        for cls_index, hm in enumerate(heatmap[0]):
            temp = hm.nonzero()
            if temp.shape[0] != 0:
                for t in temp:
                    cls_score_h_w.append([cls_index, heatmap[0, cls_index, t.long()], t // h, t % h])

        result = []
        for cshw in cls_score_h_w:
            stride_w = cshw[3].long()
            stride_h = cshw[2].long()
            score = cshw[1]
            cls = torch.tensor(cshw[0], device=stride_w.device)
            offset_x = offset[:, 0, stride_h, stride_w]
            offset_y = offset[:, 1, stride_h, stride_w]
            w = wh[:, 0, stride_h, stride_w]
            h = wh[:, 1, stride_h, stride_w]

            x1 = (stride_w + offset_x) * 4 - w * self.image_size[0] / 2
            x2 = (stride_w + offset_x) * 4 + w * self.image_size[0] / 2
            y1 = (stride_h + offset_y) * 4 - h * self.image_size[1] / 2
            y2 = (stride_h + offset_y) * 4 + h * self.image_size[1] / 2

            res = torch.tensor([[x1, y1, x2, y2, cls, score, 1]]).unsqueeze(-1)
            result.append(res)
            result.append(res)

        output_to_nms = OutputDecode.nms_self_define(torch.cat(result, dim=-1), self.conf_thresh,
                                                     self.nms_thresh)
        return output_to_nms


class OutputDecode(nn.Module):
    def __init__(self, image_size, class_num, conf_thres=0.5, nms_thres=0.5, topK=100):
        super(OutputDecode, self).__init__()
        self.image_size = image_size
        self.class_num = class_num

        self.nms_thresh = nms_thres
        self.conf_thresh = conf_thres
        self.topK = topK

    def forward(self, output):
        """

        :param output:1,c[cls_num+offset_x+offset_y+height+width] ,w,h
        :return:
        """
        output = output.cpu()
        n, c, h, w = output.shape

        heatmap = pool_nms(output[:, :self.class_num, ...])
        offset = output[:, -4:-2, ...]
        wh = output[:, -2:, ...]

        heatmap = heatmap.view(n, self.class_num, h * w)

        cls_score_h_w = []

        for cls_index, hm in enumerate(heatmap[0]):
            temp = hm.nonzero()
            if temp.shape[0] != 0:
                for t in temp:
                    cls_score_h_w.append([cls_index, heatmap[0, cls_index, t.long()], t // h, t % h])

        result = []
        for cshw in cls_score_h_w:
            stride_w = cshw[3].long()
            stride_h = cshw[2].long()
            score = cshw[1]
            cls = torch.tensor(cshw[0], device=stride_w.device)
            offset_x = offset[:, 0, stride_h, stride_w]
            offset_y = offset[:, 1, stride_h, stride_w]
            w = wh[:, 0, stride_h, stride_w]
            h = wh[:, 1, stride_h, stride_w]

            x1 = (stride_w + offset_x) * 4 - w * self.image_size[0] / 2
            x2 = (stride_w + offset_x) * 4 + w * self.image_size[0] / 2
            y1 = (stride_h + offset_y) * 4 - h * self.image_size[1] / 2
            y2 = (stride_h + offset_y) * 4 + h * self.image_size[1] / 2

            res = torch.tensor([[x1, y1, x2, y2, cls, score, 1]]).unsqueeze(-1)
            result.append(res)

        # topk_indices = (torch.sort(torch.cat(result, dim=-1)[:, -2, :], descending=True)[1])[:,:self.topK]
        #
        # topk_output_to_nms = torch.cat(result, dim=-1)[:, :, topk_indices.squeeze(0)]
        # output_to_nms=[topk_output_to_nms[:,:,(topk_output_to_nms[:,-2,:]>self.conf_thresh).squeeze(0)][0].cpu().numpy().transpose(1,0)]
        output_to_nms = OutputDecode.nms_self_define(torch.cat(result, dim=-1), self.conf_thresh,
                                                     self.nms_thresh)
        return output_to_nms

    @staticmethod
    def nms_self_define(prediction, conf_thres=0.1, nms_thres=0.9):

        # =========>   x1y1x2y2
        prediction = prediction.permute(0, 2, 1)

        output = [None for _ in range(len(prediction))]
        for image_i, image_pred in enumerate(prediction):
            class_conf, class_pred = prediction[:, :, 5].permute(1, 0), prediction[:, :, 4].permute(1, 0)
            conf = prediction[:, :, -1].permute(1, 0)
            # 利用置信度进行第一轮筛选
            conf_mask = ((conf * class_conf) >= conf_thres).squeeze()
            # 根据置信度进行预测结果的筛选
            image_pred = image_pred[conf_mask]
            class_conf = class_conf[conf_mask]
            class_pred = class_pred[conf_mask]
            conf = conf[conf_mask]
            if not image_pred.size(0):
                continue
            # detections  x1y1x2y2 + class_val + class_no  + conf_val
            detections = torch.cat((image_pred[:, :4], class_conf.float(), class_pred.float(), conf.float()), 1)
            unique_labels = detections[:, 5].cpu().unique()

            if prediction.is_cuda:
                unique_labels = unique_labels.cuda()
                detections = detections.cuda()

            for c in unique_labels:
                # 获得某一类得分筛选后全部的预测结果
                detections_class = detections[detections[:, 5] == c]
                # 使用官方自带的非极大抑制会速度更快一些！
                '''
                官方版本的nms
                '''
                # keep = nms(
                #     detections_class[:, :4],
                #     detections_class[:, -1],
                #     nms_thres
                # )
                # max_detections = detections_class[keep]
                '''
                自己准备的nms
                '''
                # # 按照存在物体的置信度排序
                _, conf_sort_index = torch.sort(detections_class[:, -1] * detections_class[:, -3], descending=True)
                detections_class = detections_class[conf_sort_index]
                # 进行非极大抑制
                max_detections = []
                while detections_class.size(0):
                    # 取出这一类置信度最高的，一步一步往下判断，判断重合程度是否大于nms_thres，如果是则去除掉
                    max_detections.append(detections_class[0].unsqueeze(0))
                    if len(detections_class) == 1:
                        break
                    ious = bbox_iou(max_detections[-1], detections_class[1:])
                    detections_class = detections_class[1:][ious < nms_thres]
                # 堆叠
                max_detections = torch.cat(max_detections).data

                # Add max detections to outputs
                output[image_i] = max_detections if output[image_i] is None else torch.cat(
                    (output[image_i], max_detections))

        return output


class CenterLoss(nn.Module):
    def __init__(self, class_num):
        super(CenterLoss, self).__init__()
        self.wh_loss = nn.L1Loss(reduction="none")
        self.offset_loss = nn.L1Loss(reduction="none")
        self.heat_map_l1loss = nn.L1Loss(reduction="none")
        self.class_num = class_num

    def forward(self, output, target, mask,hm_mask):
        """
        :param output: N cls_num +2[offset_x,offset_y] +2[width height]  h w
        :param target:
        :param mask:
        :return:
        """
        N_pos = torch.sum(mask)
        # heatmap
        loss_heatmap = (1 / N_pos) * torch.sum(
            self.focal_loss_from_logits(output[:, :self.class_num, ...],
                                        target[:, :self.class_num, ...])
        )
        # offset
        loss_offset = (1 / N_pos) * torch.sum(
            self.offset_loss(output[:, -4:-2, ...], target[:, -4:-2, ...])[
                mask.expand(-1, 2, -1, -1).bool()])
        # wh
        loss_wh = (1 / N_pos) * torch.sum(
            self.wh_loss(output[:, -2:, ...], target[:, -2:, ...])[
                mask.expand(-1, 2, -1, -1).bool()])

        return loss_heatmap + 1 * loss_offset + 0.1 * loss_wh, loss_heatmap, loss_offset, loss_wh

    @staticmethod
    def BCELoss(pred, target):
        output = -target * torch.log(pred) - (1.0 - target) * torch.log(1.0 - pred)
        return output

    @staticmethod
    def focal_loss_from_logits(preds, targets, gamma=4, alpha=2):
        """
        preds: [n,class_num,h,w]
        targets: [n,class_num,h,w]
        """
        pt = preds * targets + (1.0 - preds) * (1.0 - targets)
        w = (1 - targets) ** gamma + torch.where(targets==1,1,0) ** gamma
        loss = -w * torch.pow((1.0 - pt), alpha) * pt.log()
        return loss

    @staticmethod
    def focal_loss(pred, target, mask):
        # pred = pred.permute(0, 2, 3, 1)

        # -------------------------------------------------------------------------#
        #   找到每张图片的正样本和负样本
        #   一个真实框对应一个正样本
        #   除去正样本的特征点，其余为负样本
        # -------------------------------------------------------------------------#
        pos_inds = target.eq(1).float()
        neg_inds = target.lt(1).float()
        # -------------------------------------------------------------------------#
        #   正样本特征点附近的负样本的权值更小一些
        # -------------------------------------------------------------------------#
        neg_weights = torch.pow(1 - target, 4)

        pred = torch.clamp(pred, 1e-6, 1 - 1e-6)
        # -------------------------------------------------------------------------#
        #   计算focal loss。难分类样本权重大，易分类样本权重小。
        # -------------------------------------------------------------------------#
        pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
        neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds

        # -------------------------------------------------------------------------#
        #   进行损失的归一化
        # -------------------------------------------------------------------------#
        num_pos = pos_inds.float().sum()
        pos_loss = pos_loss.sum()
        neg_loss = neg_loss.sum()

        if num_pos == 0:
            loss = -neg_loss
        else:
            loss = -(pos_loss + neg_loss) / num_pos
        return loss


def denormalize(im, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
    return im * std + mean


def get_image(generator):
    generator = (denormalize(generator.permute((0, 2, 3, 1)).detach().to("cpu").numpy()) * 255).astype('uint8')
    generator = Image.fromarray(generator[0]).convert("RGB")
    width, height = generator.width, generator.height
    target = Image.new("RGB", (width, height), (255, 255, 255))
    target.paste(generator, (0, 0))

    return target


def pool_nms(heat, kernel=3):
    pad = (kernel - 1) // 2

    hmax = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
    keep = (hmax == heat).float()
    return heat * keep


def gaussian_radius(det_size, min_overlap=0.7):
    height, width = det_size

    a1 = 1
    b1 = (height + width)
    c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
    sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
    r1 = (b1 + sq1) / 2

    a2 = 4
    b2 = 2 * (height + width)
    c2 = (1 - min_overlap) * width * height
    sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
    r2 = (b2 + sq2) / 2

    a3 = 4 * min_overlap
    b3 = -2 * min_overlap * (height + width)
    c3 = (min_overlap - 1) * width * height
    sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
    r3 = (b3 + sq3) / 2
    return min(r1, r2, r3)


# 预测结果不能过大,所以gain=0.02
def weights_init(net, init_type='normal', init_gain=0.001):
    def init_func(m):
        classname = m.__class__.__name__
        if hasattr(m, 'weight') and classname.find('Conv') != -1:
            if init_type == 'normal':
                torch.nn.init.normal_(m.weight.data, 0.0, init_gain)
            elif init_type == 'xavier':
                torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)
            elif init_type == 'kaiming':
                torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif init_type == 'orthogonal':
                torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)
            else:
                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
        elif classname.find('BatchNorm2d') != -1:
            torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
            torch.nn.init.constant_(m.bias.data, 0.0)

    print('initialize network with %s type' % init_type)
    net.apply(init_func)
