import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable


class yoloLoss(nn.Module):
    def __init__(self, S, B, l_coord, l_noobj):
        super(yoloLoss, self).__init__()
        self.S = S
        self.B = B
        self.l_coord = l_coord
        self.l_noobj = l_noobj

    def compute_iou(self, box1, box2):
        '''
        Args:
            box1[N,4],box2[M,4]
        Return:
            iou, sized [N,M].
        '''
        N = box1.size()[0]
        M = box2.size()[0]

        lt = torch.max(
            box1[:, :2].unsqueeze(1).expand(N, M, 2),
            box2[:, :2].unsqueeze(0).expand(N, M, 2)
        )
        rd = torch.min(
            box1[:, 2:].unsqueeze(1).expand(N, M, 2),
            box2[:, 2:].unsqueeze(0).expand(N, M, 2)
        )

        wh = rd - lt
        wh[wh < 0] = 0
        inter = wh[..., 0] * wh[..., 1]

        area1 = ((box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1])).unsqueeze(1).expand_as(inter)
        area2 = ((box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1])).unsqueeze(0).expand_as(inter)

        iou = inter / (area1 + area2 - inter)
        return iou

    def forward(self, pred_tensor, target_tensor):
        """ pred_tensor[b,S,S,B*5+20] ; target_tensor[b,S,S,30]"""
        # 1 mask_obj_nobj
        N = pred_tensor.size(0)
        """
        target_tensor 真实的表注框。
        [batch_size,S,S,30]
        30代表：
        
        """
        coo_mask = target_tensor[:, :, :, 4] > 0  # 存在物体的mask [batch_size,7,7]
        noo_mask = target_tensor[:, :, :, 4] == 0  # 不存在物体的mask
        coo_mask = coo_mask.unsqueeze(-1).expand_as(target_tensor)  # [b,7,7,30]
        noo_mask = noo_mask.unsqueeze(-1).expand_as(target_tensor)

        # 2 nobj loss
        noo_pred = pred_tensor[noo_mask].view(-1, 30)  # 没有物体的预测值
        # print('noo_mask.shape:',noo_mask.shape)
        # print('pred_tensor.shape:',pred_tensor.shape)
        # print('noo_pred.shape:',noo_pred.shape)
        noo_target = target_tensor[noo_mask].view(-1, 30)  # 存在物体的预测值
        noo_pred_c = noo_pred[:, [4, 9]].flatten()  # 取出预测值中的负样本的置信度
        noo_target_c = noo_target[:, [4, 9]].flatten()  # 取出标签中负样本的置信度
        noobj_loss = F.mse_loss(noo_pred_c, noo_target_c, size_average=False)  # 计算负样本损失

        # 3  obj: box , class
        coo_pred = pred_tensor[coo_mask].view(-1, 30)  # 存在物体的预测值
        box_pred = coo_pred[:, :10].contiguous().view(-1, 5)  # 预测框
        class_pred = coo_pred[:, 10:]  # 预测类别
        coo_target = target_tensor[coo_mask].view(-1, 30)  # 存在物体的标签
        box_target = coo_target[:, :10].contiguous().view(-1, 5)  # 真实框
        class_target = coo_target[:, 10:]  # 真实类别

        # 3.1  class loss
        class_loss = F.mse_loss(class_pred, class_target, size_average=False)  # 类别损失
        # 4  obj_iou（每个网格上有两个预测框，根据IoU选出与真实框最匹配的预测框计算回归损失和正样本损失）
        coo_response_mask = torch.ByteTensor(box_target.size()).zero_()
        # coo_response_mask = torch.tensor(coo_response_mask,dtype=torch.bool)
        box_target_iou = torch.zeros(box_target.size())
        for i in range(0, box_target.size(0), 2):  # 遍历存在物体的框
            box1 = box_pred[i:i + 2]  # 存在物体的两个预测框
            box1_xy = Variable(torch.FloatTensor(box1.size()))
            box1_xy[:, :2] = box1[:, :2] / 14. - 0.5 * box1[:, 2:4]
            box1_xy[:, 2:4] = box1[:, :2] / 14. + 0.5 * box1[:, 2:4]
            box2 = box_target[i].view(-1, 5)  # 存在物体的一个真实框
            box2_xy = Variable(torch.FloatTensor(box2.size()))
            box2_xy[:, :2] = box2[:, :2] / 14. - 0.5 * box2[:, 2:4]
            box2_xy[:, 2:4] = box2[:, :2] / 14. + 0.5 * box2[:, 2:4]
            iou = self.compute_iou(box1_xy[:, :4], box2_xy[:, :4])
            max_iou, max_index = iou.max(0)  # 计算预测框和真实框的IoU，并返回最有的IoU和预测框的下标
            coo_response_mask[i + max_index] = 1
            box_target_iou[i + max_index, 4] = max_iou
        box_target_iou = Variable(box_target_iou)

        # 4.1 obj_loss
        box_pred_response = box_pred[coo_response_mask].view(-1, 5)  # 与真实框最匹配的预测框
        box_target_response = box_target[coo_response_mask].view(-1, 5)  # 真是框，这一步多余。
        box_target_response_iou = box_target_iou[coo_response_mask].view(-1, 5)  # 正样本的概率
        # 4.1.1 contain_loss
        contain_loss = F.mse_loss(box_pred_response[:, 4], box_target_response_iou[:, 4], size_average=False)  # 正样本损失
        # 4.1.2 loc_loss
        loc_loss = F.mse_loss(box_pred_response[:, :2], box_target_response[:, :2], size_average=False) + \
                   F.mse_loss(torch.sqrt(box_pred_response[:, 2:]), torch.sqrt(box_target_response[:, 2:]),
                              size_average=False)  # 框的回归损失

        return (self.l_noobj * noobj_loss + class_loss + 2 * contain_loss + self.l_coord * loc_loss) / N
        # 加权平均损失


if __name__ == '__main__':
    pred_tensor = torch.randn(2, 14, 14, 30)
    target_tensor = pred_tensor + 0.01
    yolo_loss = yoloLoss(14, 8, 5, 0.5)
    loss = yolo_loss(pred_tensor, target_tensor)
    print(loss)
