import torch
import torch.nn as nn
import numpy as np
from utils.utils import *
from utils.bbox import *


class YoloLayer(nn.Module):
    """
    Detection layer
    """

    def __init__(self, anchors, num_classes, img_size=[416, 416]):
        """
        Construct YOLO layer by params
        :param anchors:         list, [[w, h], [w, h], ... ,[w, h]]
        :param num_classes:     int
        :param img_size:        int (square image), or list ([w, h])
        """
        super(YoloLayer, self).__init__()

        self.ngx = 0
        self.ngy = 0
        self.stridex = 0
        self.stridey = 0
        self.anchors = anchors
        self.na = len(anchors)
        self.nc = num_classes
        # 根据输入设定该模块处理的图像尺寸
        if len(img_size) == 1:
            self.img_w = img_size
            self.img_h = img_size
        else:
            self.img_w = img_size[0]
            self.img_h = img_size[1]

    def compute_grid_offsets(self, imgw, imgh, ngy, ngx, cuda=True):
        self.ngy = ngy
        self.ngx = ngx
        self.img_w = imgw
        self.img_h = imgh

        FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
        self.stridex = imgw / self.ngx
        self.stridey = imgh / self.ngy
        # Calculate offsets for each grid
        self.grid_x = torch.arange(ngx).repeat(ngy, 1).view([1, 1, ngy, ngx]).type(FloatTensor)
        self.grid_y = torch.arange(ngy).repeat(ngx, 1).t().view([1, 1, ngy, ngx]).type(FloatTensor)
        self.scaled_anchors = FloatTensor([(a_w / self.stridex, a_h / self.stridey) for a_w, a_h in self.anchors])
        self.anchor_w = self.scaled_anchors[:, 0:1].view((1, self.na, 1, 1))
        self.anchor_h = self.scaled_anchors[:, 1:2].view((1, self.na, 1, 1))

    def forward(self, x, img_size):
        """
        x:          [nb, (nc + 5) * na, ngy, ngx]
        img_size:   sz or [w, h]
        """
        FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
        nb, ngy, ngx = x.shape[0], x.shape[-2], x.shape[-1]
        if len(img_size) == 1:
            imgw = img_size
            imgh = img_size
        else:
            imgw = img_size[0]
            imgh = img_size[1]
        if self.ngx != ngx or self.ngy != ngy:
            self.compute_grid_offsets(imgw, imgh, ngy, ngx, cuda=x.is_cuda)
        # grid_x:   [1, 1, ngy, ngx]
        # grid_y:   [1, 1, ngy, ngx]
        # anchor_w: [1, na, 1, 1]
        # anchor_h: [1, na, 1, 1]

        # [nb, (nc + 5) * na, ngy, ngx] -> [nb, na, nc + 5, ngy, ngx] -> [nb, na, ngy, ngx, nc + 5]
        prediction = (x.view(nb, self.na, self.nc + 5, ngy, ngx).permute(0, 1, 3, 4, 2).contiguous())
        if self.training:  # Detection mode, return prediction result directly
            return prediction
        else:
            # Get outputs, sigmoid() compress to [0, 1]
            px = torch.sigmoid(prediction[..., 0])  # predicted center x : [nb, na, ngy, ngx]
            py = torch.sigmoid(prediction[..., 1])  # predicted center y : [nb, na, ngy, ngx]
            pw = prediction[..., 2]  # predicted width : [nb, na, ngy, ngx]
            ph = prediction[..., 3]  # predicted height : [nb, na, ngy, ngx]
            pconf = torch.sigmoid(prediction[..., 4])  # predicted confidence : [nb, na, ngy, ngx]
            pcls = torch.sigmoid(prediction[..., 5:])  # predicted class : [nb, na, ngy, ngx, nc]
            # Add offset and scale with anchors
            pred_boxes = FloatTensor(prediction[..., :4].shape)  # predicted boxes : [nb, na, ngy, ngx, 4]
            pred_boxes[..., 0] = px + self.grid_x
            pred_boxes[..., 1] = py + self.grid_y
            pred_boxes[..., 2] = torch.exp(pw) * self.anchor_w
            pred_boxes[..., 3] = torch.exp(ph) * self.anchor_h
            # output: [nb, na * ng * ng, nc+5]
            output = torch.cat((pred_boxes.view(nb, -1, 4) * self.stride,
                                pconf.view(nb, -1, 1),
                                pcls.view(nb, -1, self.nc)), -1)
            return output


    #
    #         iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf = self._build_targets(
    #             pred_boxes=pred_boxes,
    #             pred_cls=pcls,
    #             target=targets,
    #             anchors=self.scaled_anchors,
    #             ignore_thres=self.ignore_thres)
    #         # iou_scores: [nb, na, ng, ng], GroundTruth框与预测框的交并比
    #         # class_mask: [nb, na, ng, ng], 预测的类别结果正确的掩模
    #         # obj_mask: [nb, na, ng, ng], 正确的目标cell的掩模
    #         # noobj_mask: [nb, na, ng, ng], 正确的非目标cell的掩模
    #         # tx， ty, tw, th: [nb, na, ng, ng], 正确的目标位置与尺寸
    #         # tcls: [nb, na, ng, ng, nc], 正确的目标类别掩模
    #         # tconf: [nb, na, ng, ng], 正确的目标cell置信度，同 obj_mask
    #
    #         # 计算损失，使用obj_mask索引进行计算，从而忽略不存在的目标（置信度损失除外）
    #         # 位置损失，使用MSE(误差平方均值）
    #         loss_x = self.mse_loss(px[obj_mask], tx[obj_mask])
    #         loss_y = self.mse_loss(py[obj_mask], ty[obj_mask])
    #         loss_w = self.mse_loss(pw[obj_mask], tw[obj_mask])
    #         loss_h = self.mse_loss(ph[obj_mask], th[obj_mask])
    #         # 类别损失，使用BCE或CE
    #         loss_cls = self.bce_loss(pcls[obj_mask], tcls[obj_mask])
    #         # 置信度损失，使用BCE
    #         loss_conf_obj = self.bce_loss(pconf[obj_mask], tconf[obj_mask])
    #         loss_conf_noobj = self.bce_loss(pconf[noobj_mask], tconf[noobj_mask])
    #         # 计算损失和,使用超参数作为权重系数
    #         total_loss = self.hyp['k'] * (self.hyp['xy'] * (loss_x + loss_y) +
    #                                       self.hyp['wh'] * (loss_w + loss_h) +
    #                                       self.hyp['obj_conf'] * loss_conf_obj +
    #                                       self.hyp['noobj_conf'] * loss_conf_noobj +
    #                                       self.hyp['cls'] * loss_cls)
    #
    #         # Metrics
    #         cls_acc = 100 * class_mask[obj_mask].mean()
    #         conf50 = (pconf > 0.5).float()
    #         iou50 = (iou_scores > 0.5).float()
    #         iou75 = (iou_scores > 0.75).float()
    #         detected_mask = conf50 * class_mask * tconf
    #         precision = torch.sum(iou50 * detected_mask) / (conf50.sum() + 1e-16)
    #         recall50 = torch.sum(iou50 * detected_mask) / (obj_mask.sum() + 1e-16)
    #         recall75 = torch.sum(iou75 * detected_mask) / (obj_mask.sum() + 1e-16)
    #
    #         self.metrics = {
    #             "num_grid": self.ng,
    #             "total_loss": to_cpu(total_loss).item(),
    #             "xy_loss": to_cpu(self.hyp['xy'] * (loss_x + loss_y) * self.hyp['k']).item(),
    #             "wh_loss": to_cpu(self.hyp['wh'] * (loss_w + loss_h) * self.hyp['k']).item(),
    #             "conf_obj_loss": to_cpu((self.hyp['obj_conf'] * loss_conf_obj) * self.hyp['k']).item(),
    #             "conf_noobj_loss": to_cpu((self.hyp['noobj_conf'] * loss_conf_noobj) * self.hyp['k']).item(),
    #             "cls_loss": to_cpu(self.hyp['cls'] * loss_cls * self.hyp['k']).item(),
    #             "cls_acc": to_cpu(cls_acc).item(),
    #             "recall50": to_cpu(recall50).item(),
    #             "recall75": to_cpu(recall75).item(),
    #             "precision": to_cpu(precision).item(),
    #         }
    #
    #         return output, total_loss
    #
    # def _build_targets(self, pred_boxes, pred_cls, target, anchors, ignore_thres):
    #     BoolTensor = torch.cuda.BoolTensor if pred_boxes.is_cuda else torch.BoolTensor
    #     FloatTensor = torch.cuda.FloatTensor if pred_boxes.is_cuda else torch.FloatTensor
    #     nB = pred_boxes.size(0)  # num of batch
    #     nA = pred_boxes.size(1)  # num of anchors
    #     nC = pred_cls.size(-1)  # num of classes
    #     nG = pred_boxes.size(2)  # num of grids
    #
    #     # Output tensors
    #     obj_mask = BoolTensor(nB, nA, nG, nG).fill_(False)
    #     noobj_mask = BoolTensor(nB, nA, nG, nG).fill_(True)
    #     class_mask = FloatTensor(nB, nA, nG, nG).fill_(0)
    #     iou_scores = FloatTensor(nB, nA, nG, nG).fill_(0)
    #     tx = FloatTensor(nB, nA, nG, nG).fill_(0)
    #     ty = FloatTensor(nB, nA, nG, nG).fill_(0)
    #     tw = FloatTensor(nB, nA, nG, nG).fill_(0)
    #     th = FloatTensor(nB, nA, nG, nG).fill_(0)
    #     tcls = FloatTensor(nB, nA, nG, nG, nC).fill_(0)
    #
    #     # Convert to position relative to box
    #     target_boxes = target[:, 2:6] * nG  # target_boxes : [nt, 4]
    #     gxy = target_boxes[:, :2]  # gxy : [nt, 2]
    #     gwh = target_boxes[:, 2:]  # gwh : [nt, 2]
    #     # Get anchors with best iou
    #     ious = torch.stack([bbox_wh_iou(anchor, gwh) for anchor in anchors])  # ious : [na, nt]
    #     best_ious, best_n = ious.max(0)  # best_ious : [nt], best_n : [nt], 计算第0维的最大值
    #     # Separate target values
    #     b, target_labels = target[:, :2].long().t()  # b: batch_id, target_labels: class_id
    #     gx, gy = gxy.t()  # gx, gy : [nt]
    #     gw, gh = gwh.t()  # gw, gh : [nt]
    #     gi = torch.clamp(gx.long(), min=0, max=nG - 1)  # gi : [nt], 向下取整，获得所属cell的横纵坐标
    #     gj = torch.clamp(gy.long(), min=0, max=nG - 1)  # gj : [nt], 向下取整，获得所属cell的横纵坐标
    #     # Set masks
    #     obj_mask[b, best_n, gj, gi] = True  # 设置目标掩模
    #     noobj_mask[b, best_n, gj, gi] = False  # 设置非目标掩模
    #
    #     # Set noobj mask to zero where iou exceeds ignore threshold
    #     # 与GroundTruth的iou超过忽略阈值的，标记为目标
    #     for i, anchor_ious in enumerate(ious.t()):
    #         noobj_mask[b[i], anchor_ious > ignore_thres, gj[i], gi[i]] = False
    #
    #     # Coordinates
    #     tx[b, best_n, gj, gi] = gx - gx.floor()  # 计算有效的GroundTruth与对应cell的x坐标偏移量
    #     ty[b, best_n, gj, gi] = gy - gy.floor()  # 计算有效的GroundTruth与对应cell的y坐标偏移量
    #     # Width and height
    #     tw[b, best_n, gj, gi] = torch.log(gw / anchors[best_n, 0] + 1e-16)
    #     th[b, best_n, gj, gi] = torch.log(gh / anchors[best_n, 1] + 1e-16)
    #     # One-hot encoding of label
    #     tcls[b, best_n, gj, gi, target_labels] = 1
    #     # Compute label correctness and iou at best anchor
    #     class_mask[b, best_n, gj, gi] = (pred_cls[b, best_n, gj, gi].argmax(-1) == target_labels).float()
    #     iou_scores[b, best_n, gj, gi] = bbox_iou(pred_boxes[b, best_n, gj, gi], target_boxes, x1y1x2y2=False)
    #
    #     tconf = obj_mask.float()
    #     return iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf
