'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: yolo_loss.py
@time: 2020-06-18 12:27:55
@desc: 
'''
import torch
from .loss_zoo import LOSS_ZOO
import numpy as np
from ELib.utils.utils import bboxes_iou
from ELib.utils.utils_asff import GIOU, DIOU, iou_calc3, focalloss
from jjzhk.device import device


@LOSS_ZOO.register()
def yolo_loss(cfg, priors):
    return YOLO_LOSS(cfg)

@LOSS_ZOO.register()
def asff_yolov3_loss(cfg, priors):
    return YOLO_LOSS_ASFF(cfg)


class YOLO_LOSS(torch.nn.Module):
    def __init__(self, cfg):
        super(YOLO_LOSS, self).__init__()
        self.device = device
        self.cfg = cfg
        self.strides = [8, 16, 32]
        image_size = self.cfg.BASE.IMAGE_SIZE[0]
        self.n_classes = self.cfg.BASE.NUM_CLASSES
        batch = self.cfg.TRAIN.BATCH_SIZE
        self.n_anchors = 3

        self.anchors = [[12, 16], [19, 36], [40, 28], [36, 75], [76, 55], [72, 146], [142, 110], [192, 243], [459, 401]]
        self.anch_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
        self.ignore_thre = 0.5

        self.masked_anchors, self.ref_anchors, self.grid_x, self.grid_y, self.anchor_w, self.anchor_h = [], [], [], [], [], []

        for i in range(3):
            all_anchors_grid = [(w / self.strides[i], h / self.strides[i]) for w, h in self.anchors]
            masked_anchors = np.array([all_anchors_grid[j] for j in self.anch_masks[i]], dtype=np.float32)
            ref_anchors = np.zeros((len(all_anchors_grid), 4), dtype=np.float32)
            ref_anchors[:, 2:] = np.array(all_anchors_grid, dtype=np.float32)
            ref_anchors = torch.from_numpy(ref_anchors)
            # calculate pred - xywh obj cls
            fsize = image_size // self.strides[i]
            grid_x = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).to(device)
            grid_y = torch.arange(fsize, dtype=torch.float).repeat(batch, 3, fsize, 1).permute(0, 1, 3, 2).to(device)
            anchor_w = torch.from_numpy(masked_anchors[:, 0]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
                device)
            anchor_h = torch.from_numpy(masked_anchors[:, 1]).repeat(batch, fsize, fsize, 1).permute(0, 3, 1, 2).to(
                device)

            self.masked_anchors.append(masked_anchors)
            self.ref_anchors.append(ref_anchors)
            self.grid_x.append(grid_x)
            self.grid_y.append(grid_y)
            self.anchor_w.append(anchor_w)
            self.anchor_h.append(anchor_h)

    def build_target(self, pred, labels, batchsize, fsize, n_ch, output_id):
        # target assignment
        tgt_mask = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 4 + self.n_classes).to(device=self.device)
        obj_mask = torch.ones(batchsize, self.n_anchors, fsize, fsize).to(device=self.device)
        tgt_scale = torch.zeros(batchsize, self.n_anchors, fsize, fsize, 2).to(self.device)
        target = torch.zeros(batchsize, self.n_anchors, fsize, fsize, n_ch).to(self.device)

        # labels = labels.cpu().data
        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects

        truth_x_all = (labels[:, :, 2] + labels[:, :, 0]) / (self.strides[output_id] * 2)
        truth_y_all = (labels[:, :, 3] + labels[:, :, 1]) / (self.strides[output_id] * 2)
        truth_w_all = (labels[:, :, 2] - labels[:, :, 0]) / self.strides[output_id]
        truth_h_all = (labels[:, :, 3] - labels[:, :, 1]) / self.strides[output_id]
        truth_i_all = truth_x_all.to(torch.int16).cpu().numpy()
        truth_j_all = truth_y_all.to(torch.int16).cpu().numpy()

        for b in range(batchsize):
            n = int(nlabel[b])
            if n == 0:
                continue
            truth_box = torch.zeros(n, 4).to(self.device)
            truth_box[:n, 2] = truth_w_all[b, :n]
            truth_box[:n, 3] = truth_h_all[b, :n]
            truth_i = truth_i_all[b, :n]
            truth_j = truth_j_all[b, :n]

            # calculate iou between truth and reference anchors
            anchor_ious_all = bboxes_iou(truth_box.cpu(), self.ref_anchors[output_id])
            best_n_all = anchor_ious_all.argmax(dim=1)
            best_n = best_n_all % 3
            best_n_mask = ((best_n_all == self.anch_masks[output_id][0]) |
                           (best_n_all == self.anch_masks[output_id][1]) |
                           (best_n_all == self.anch_masks[output_id][2]))

            if sum(best_n_mask) == 0:
                continue

            truth_box[:n, 0] = truth_x_all[b, :n]
            truth_box[:n, 1] = truth_y_all[b, :n]

            pred_ious = bboxes_iou(pred[b].view(-1, 4), truth_box, xyxy=False)
            pred_best_iou, _ = pred_ious.max(dim=1)
            pred_best_iou = (pred_best_iou > self.ignore_thre)
            pred_best_iou = pred_best_iou.view(pred[b].shape[:3])
            # set mask to zero (ignore) if pred matches truth
            obj_mask[b] = ~ pred_best_iou

            for ti in range(best_n.shape[0]):
                if best_n_mask[ti] == 1:
                    i, j = truth_i[ti], truth_j[ti]
                    a = best_n[ti]
                    obj_mask[b, a, j, i] = 1
                    tgt_mask[b, a, j, i, :] = 1
                    target[b, a, j, i, 0] = truth_x_all[b, ti] - truth_x_all[b, ti].to(torch.int16).to(torch.float)
                    target[b, a, j, i, 1] = truth_y_all[b, ti] - truth_y_all[b, ti].to(torch.int16).to(torch.float)
                    target[b, a, j, i, 2] = torch.log(
                        truth_w_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 0] + 1e-16)
                    target[b, a, j, i, 3] = torch.log(
                        truth_h_all[b, ti] / torch.Tensor(self.masked_anchors[output_id])[best_n[ti], 1] + 1e-16)
                    target[b, a, j, i, 4] = 1
                    target[b, a, j, i, 5 + labels[b, ti, 4].to(torch.int16).cpu().numpy()] = 1
                    tgt_scale[b, a, j, i, :] = torch.sqrt(2 - truth_w_all[b, ti] * truth_h_all[b, ti] / fsize / fsize)
        return obj_mask, tgt_mask, tgt_scale, target

    def forward(self, xin, labels=None):
        labels = torch.from_numpy(np.stack(labels, axis=0))
        loss, loss_xy, loss_wh, loss_obj, loss_cls, loss_l2 = 0, 0, 0, 0, 0, 0
        for output_id, output in enumerate(xin):
            batchsize = output.shape[0]
            fsize = output.shape[2]
            n_ch = 5 + self.n_classes

            output = output.view(batchsize, self.n_anchors, n_ch, fsize, fsize)
            output = output.permute(0, 1, 3, 4, 2)  # .contiguous()

            # logistic activation for xy, obj, cls
            output[..., np.r_[:2, 4:n_ch]] = torch.sigmoid(output[..., np.r_[:2, 4:n_ch]])

            pred = output[..., :4].clone()
            pred[..., 0] += self.grid_x[output_id]
            pred[..., 1] += self.grid_y[output_id]
            pred[..., 2] = torch.exp(pred[..., 2]) * self.anchor_w[output_id]
            pred[..., 3] = torch.exp(pred[..., 3]) * self.anchor_h[output_id]

            obj_mask, tgt_mask, tgt_scale, target = self.build_target(pred, labels, batchsize, fsize, n_ch, output_id)

            # loss calculation
            output[..., 4] *= obj_mask
            output[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
            output[..., 2:4] *= tgt_scale

            target[..., 4] *= obj_mask
            target[..., np.r_[0:4, 5:n_ch]] *= tgt_mask
            target[..., 2:4] *= tgt_scale

            loss_xy += torch.nn.functional.binary_cross_entropy(input=output[..., :2], target=target[..., :2],
                                              weight=tgt_scale * tgt_scale, size_average=False)
            loss_wh += torch.nn.functional.mse_loss(input=output[..., 2:4], target=target[..., 2:4], size_average=False) / 2
            loss_obj += torch.nn.functional.binary_cross_entropy(input=output[..., 4], target=target[..., 4], size_average=False)
            loss_cls += torch.nn.functional.binary_cross_entropy(input=output[..., 5:], target=target[..., 5:], size_average=False)
            loss_l2 += torch.nn.functional.mse_loss(input=output, target=target, size_average=False)

        loss = loss_xy + loss_wh + loss_obj + loss_cls

        return loss


class YOLO_LOSS_ASFF(torch.nn.Module):
    def __init__(self, cfg):
        super(YOLO_LOSS_ASFF, self).__init__()
        self.cfg = cfg
        self.strides = [8, 16, 32]

    def forward(self,pred, targets):
        conv_sbbox, conv_mbbox, conv_lbbox, pred_sbbox, pred_mbbox, pred_lbbox = pred
        label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes,lbboxes = targets

        GIOUloss_s, conf_loss_s, probloss_s = self.loss_per_scale(conv_sbbox, pred_sbbox, label_sbbox, sbboxes,
                                                             self.strides[0], cfg=self.cfg)
        GIOUloss_m, conf_loss_m, probloss_m = self.loss_per_scale(conv_mbbox, pred_mbbox, label_mbbox, mbboxes,
                                                             self.strides[1], cfg=self.cfg)
        GIOUloss_l, conf_loss_l, probloss_l = self.loss_per_scale(conv_lbbox, pred_lbbox, label_lbbox, lbboxes,
                                                             self.strides[2], cfg=self.cfg)
        GIOUloss = GIOUloss_s + GIOUloss_m + GIOUloss_l
        conf_loss = conf_loss_s + conf_loss_m + conf_loss_l
        probloss = probloss_s + probloss_m + probloss_l

        GIOUloss = GIOUloss.sum() / self.cfg.TRAIN.BATCH_SIZE
        conf_loss = conf_loss.sum() / self.cfg.TRAIN.BATCH_SIZE
        probloss = probloss.sum() / self.cfg.TRAIN.BATCH_SIZE

        totalloss = GIOUloss + conf_loss + probloss

        return totalloss

    def loss_per_scale(self, conv, pred, label, bboxes, stride, cfg):
        """
        :param name: loss的名字
        :param conv: conv是yolo卷积层的原始输出
        shape为(batch_size, output_size, output_size, anchor_per_scale * (5 + num_class))
        :param pred: conv是yolo输出的预测bbox的信息(x, y, w, h, conf, prob)，
        其中(x, y, w, h)的大小是相对于input_size的，如input_size=416，(x, y, w, h) = (120, 200, 50, 70)
        shape为(batch_size, output_size, output_size, anchor_per_scale, 5 + num_class)
        :param label: shape为(batch_size, output_size, output_size, anchor_per_scale, 6 + num_classes)
        只有负责预测GT的对应位置的数据才为(xmin, ymin, xmax, ymax, 1, classes, mixup_weights),
        其他位置的数据都为(0, 0, 0, 0, 0, 0..., 1)
        :param bboxes: shape为(batch_size, max_bbox_per_scale, 4)，
        存储的坐标为(xmin, ymin, xmax, ymax)
        bboxes用于计算相应detector的预测框与该detector负责预测的所有bbox的IOU
        :param anchors: 相应detector的anchors
        :param stride: 相应detector的stride
        """
        bcelogit_loss = torch.nn.BCEWithLogitsLoss(reduction='none')
        smooth_loss = torch.nn.SmoothL1Loss(reduction='none')

        conv = conv.permute(0, 2, 3, 1)
        conv_shape = conv.shape
        batch_size = conv_shape[0]
        output_size = conv_shape[1]
        input_size = stride * output_size
        numanchor = cfg.BASE.GT_PER_GRID
        conv = conv.view(batch_size, output_size, output_size, numanchor, -1)
        if self.cfg.MODEL.BOXLOSS == 'KL':
            conv_raw_conf = conv[..., 8:9]
            conv_raw_prob = conv[..., 9:]

            pred_coor = pred[..., 0:4]
            pred_vari = pred[..., 4:8]
            pred_conf = pred[..., 8:9]
            pred_prob = pred[..., 9:]
        else:
            conv_raw_conf = conv[..., 4:5]
            conv_raw_prob = conv[..., 5:]

            pred_prob = pred[..., 5:]
            pred_coor = pred[..., 0:4]
            pred_conf = pred[..., 4:5]

        label_coor = label[..., 0:4]
        respond_bbox = label[..., 4:5]
        label_prob = label[..., 5:-1]
        label_mixw = label[..., -1:]
        # 计算GIOU损失
        bbox_wh = label_coor[..., 2:] - label_coor[..., :2]
        bbox_loss_scale = 2.0 - 1.0 * bbox_wh[..., 0:1] * bbox_wh[..., 1:2] / (input_size ** 2)
        if self.cfg.MODEL.BOXLOSS == 'iou':
            giou = GIOU(pred_coor, label_coor).unsqueeze(-1)
            giou_loss = respond_bbox * bbox_loss_scale * (1.0 - giou)
            bbox_loss = giou_loss
        elif self.cfg.MODEL.BOXLOSS == 'l1':
            l1_loss = respond_bbox * bbox_loss_scale * smooth_loss(target=label_coor, input=pred_coor) * cfg.l1scale
            bbox_loss = l1_loss
        elif self.cfg.MODEL.BOXLOSS == 'KL':
            l1_loss = respond_bbox * bbox_loss_scale * (
                    torch.exp(-pred_vari) * smooth_loss(target=label_coor,
                                                        input=pred_coor) + 0.5 * pred_vari) * cfg.l1scale
            bbox_loss = l1_loss
        elif self.cfg.MODEL.BOXLOSS == 'diou':
            diou = DIOU(pred_coor, label_coor).unsqueeze(-1)
            diou_loss = respond_bbox * bbox_loss_scale * (1.0 - diou)
            bbox_loss = diou_loss
        else:
            raise NotImplementedError
        bbox_loss = bbox_loss * label_mixw
        # (2)计算confidence损失
        iou = iou_calc3(pred_coor.unsqueeze(4), bboxes.unsqueeze(1).unsqueeze(1).unsqueeze(1))
        max_iou, _ = torch.max(iou, dim=-1)

        max_iou = max_iou.unsqueeze(-1)
        respond_bgd = (1.0 - respond_bbox) * (max_iou < 0.5).float()

        conf_focal = focalloss(respond_bbox, pred_conf)

        conf_loss = conf_focal * (
                respond_bbox * bcelogit_loss(target=respond_bbox, input=conv_raw_conf)
                +
                respond_bgd * bcelogit_loss(target=respond_bbox, input=conv_raw_conf)
        )
        conf_loss = conf_loss * label_mixw
        # (3)计算classes损失
        if conv_raw_prob.shape[-1] != 0:
            if True:
                cls_focal = focalloss(label_prob, pred_prob)
                prob_loss = cls_focal * respond_bbox * bcelogit_loss(target=label_prob, input=conv_raw_prob)
            else:
                prob_loss = respond_bbox * bcelogit_loss(target=label_prob, input=conv_raw_prob)
        else:
            prob_loss = torch.zeros_like(label_prob)
        prob_loss = prob_loss * label_mixw
        return bbox_loss.sum(), conf_loss.sum(), prob_loss.sum()
