import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable

import warnings
warnings.filterwarnings("ignore", category=UserWarning)


class YOLOLoss(nn.Module):
    def __init__(self, fp_size=7, bbox_num=2, cls_num=20,
                 lambda_coord=5.0, lambda_noobj=0.5):
        super(YOLOLoss, self).__init__()
        self.S = fp_size
        self.B = bbox_num
        self.C = cls_num
        self.lambda_coord = lambda_coord
        self.lambda_noobj = lambda_noobj

    def compute_iou(self, boxes_1, boxes_2):
        """ Compute the intersection over union of two set of boxes, each boxes are [[x1, y1, x2, y2], ...]
        Args:
            boxes_1: (tensor) bounding boxes, size: [N, 4]
            boxes_2: (tensor) bounding boxes, size: [M, 4]
        Return:
             (tensor) iou, size: [N, M]
        """
        N = boxes_1.size(0)
        M = boxes_2.size(1)

        left_top = torch.max(
            boxes_1[:, :2].unsqueeze(1).expand(N, M, 2),  # [N, 2] -> [N, 1, 2] -> [N, M, 2]
            boxes_2[:, :2].unsqueeze(0).expand(N, M, 2),  # [M, 2] -> [1, M, 2] -> [N, M, 2]
        )

        right_bottom = torch.min(
            boxes_1[:, 2:].unsqueeze(1).expand(N, M, 2),  # [N, 2] -> [N, 1, 2] -> [N, M, 2]
            boxes_2[:, 2:].unsqueeze(0).expand(N, M, 2),  # [M, 2] -> [1, M, 2] -> [N, M, 2]
        )

        w_h = right_bottom - left_top  # [N, M, 2]
        w_h[w_h < 0] = 0               # clip at 0
        intersect = w_h[:, :, 0] * w_h[:, :, 1]  # [N, M]

        area_1 = (boxes_1[:, 2] - boxes_1[:, 0]) * (boxes_1[:, 3] - boxes_1[:, 1])
        area_2 = (boxes_2[:, 2] - boxes_2[:, 0]) * (boxes_2[:, 3] - boxes_2[:, 1])
        area_1 = area_1.unsqueeze(1).expand_as(intersect)  # [N, M]
        area_2 = area_2.unsqueeze(0).expand_as(intersect)  # [N, M]

        iou = intersect / (area_1 + area_2 - intersect)

        return iou

    def forward(self, pred_tensor, label_tensor):
        """Compute loss for YOLO_v1 train
        Args:
            pred_tensor: predictions, size: [batch, S, S, Bx5+C], 5 = len([x,y,w,h,conf])
            label_tensor: labels, size: [batch, S, S, Bx5+C]
        Returns:
            Loss: (tensor), size: [1,]
        """
        n = self.B * 5 + self.C
        batch_size = pred_tensor.size(0)
        coord_mask = label_tensor[:, :, :, 4] > 0   # mask for the cells which contain objects. [batch, self.S, self.S]
        noobj_mask = label_tensor[:, :, :, 4] == 0  # maks for the cells which don't contain objects. [batch , self.S, self.S]

        coord_mask = coord_mask.unsqueeze(-1).expand_as(label_tensor)  # [batch, S, S] -> [batch, S, S, n]
        noobj_mask = noobj_mask.unsqueeze(-1).expand_as(label_tensor)  # [batch, S, S] -> [batch, S, S, n]

        coord_pred = pred_tensor[coord_mask].view(-1, n)  # pred_tensor on the cells which contain objects [n_coord, n]
                                                          # n_coord: number of the cells which contain objects 
        bbox_pred = coord_pred[:, :5*self.B].contiguous().view(-1, 5)  # [n_coord x B, 5 = len([x,y,w,h,conf])]
        cls_pred = coord_pred[:, 5*self.B:]               # [n_coord, C]

        coord_label = label_tensor[coord_mask].view(-1, n)
        bbox_label = coord_label[:, :5*self.B].contiguous().view(-1, 5)
        cls_label = coord_label[:, 5*self.B:]

        # Compute loss for the cells with no object bbox
        noobj_pred = pred_tensor[noobj_mask].view(-1, n)  # pred_tensor on the cells which do not contain objects. [n_noobj, n]
                                                          # n_noobj: number of the cells which do not contain objects
        noobj_label = label_tensor[noobj_mask].view(-1, n)
        noobj_conf_mask = torch.cuda.ByteTensor(noobj_pred.size()).fill_(False)  # [n_noobj, n]
        noobj_conf_mask = noobj_conf_mask.bool()
        for b in range(self.B):
            noobj_conf_mask[:, 4 + b*5] = 1  # noobj_conf_mask[:, 4] = 1; noobj_conf_mask[:, 9] = 1
        noobj_pred_conf = noobj_pred[noobj_conf_mask]    # [n_noobj, 2=len([conf1, conf2])]
        noobj_label_conf = noobj_label[noobj_conf_mask]  # [n_noobj, 2=len([conf1, conf2])]
        loss_noobj = F.mse_loss(noobj_pred_conf, noobj_label_conf, reduction='sum')

        # Compute loss for the cells with objects
        coord_response_mask = torch.cuda.ByteTensor(bbox_label.size()).fill_(False)  # [n_coord x B, 5]
        coord_response_mask =  coord_response_mask.bool()
        coord_not_response_mask = torch.cuda.ByteTensor(bbox_label.size()).fill_(True)  # [n_coord x B, 5]
        coord_not_response_mask =  coord_not_response_mask.bool()
        bbox_label_iou = torch.zeros(bbox_label.size()).cuda()

        # Choose the pred_bbox which having the highest IoU for each label_bbox.
        for i in range(0, bbox_label.size(0), self.B):
            pred = bbox_pred[i:i+self.B]  # predicted bboxes at i-th cell, [B, 5=len([x,y,w,h,conf]]
            pred_x1y1_x2y2 = Variable(torch.FloatTensor(pred.size()))
            # (c_x, c_y) = pred[:, :2] are normalized for cell-size
            # (w, h) = pred[:, 2:4] are normalized for img-size
            # re-scale (c_x, c_y) for the img-size to compute IoU correctly
            pred_x1y1_x2y2[:, :2] = pred[:, :2]/float(self.S) - 0.5*pred[:, 2:4]      # ???
            pred_x1y1_x2y2[:, 2:4] = pred[:, :2]/float(self.S) + 0.5*pred[:, 2:4]     # ???

            label = bbox_label[i].view(-1, 5)  # label box at i-th cell, [b, 5]
            label_x1y1_x2y2 = Variable(torch.FloatTensor(label.size()))
            label_x1y1_x2y2[:, :2] = label[:, :2]/float(self.S) - 0.5*label[:, 2:4]   # ???
            label_x1y1_x2y2[:, 2:4] = label[:, :2]/float(self.S) + 0.5*label[:, 2:4]  # ???

            iou = self.compute_iou(pred_x1y1_x2y2[:, :4], label_x1y1_x2y2[:, :4])  # [self.B, 1]

            max_iou, max_idx = iou.max(0)
            max_idx = max_idx.data.cuda()

            coord_response_mask[i+max_idx] = 1
            coord_not_response_mask[i+max_idx] = 0

            # we want the conf score to equal to the intersection over union (IoU)
            # between the predicted box and the gt box
            bbox_label_iou[i+max_idx, torch.LongTensor([4]).cuda()] = max_iou.data.cuda()
        bbox_label_iou = Variable(bbox_label_iou).cuda()

        # bbox location/size and object loss for response bboxes.
        bbox_pred_response = bbox_pred[coord_response_mask].contiguous().view(-1, 5)
        bbox_label_reponse = bbox_label[coord_response_mask].contiguous().view(-1, 5)  # only the first 4=(x,y,w,h) are used

        label_conf = bbox_label_iou[coord_response_mask].view(-1, 5)
        loss_x_y = F.mse_loss(bbox_pred_response[:, :2], bbox_label_reponse[:, :2], reduction='sum')
        loss_w_h = F.mse_loss(torch.sqrt(bbox_pred_response[:, 2:4]), torch.sqrt(bbox_label_reponse[:, 2:4]), reduction='sum')
        loss_obj = F.mse_loss(bbox_pred_response[:, 4], label_conf[:, 4], reduction='sum')

        # class probability loss for the cells which contain objects.
        loss_class = F.mse_loss(cls_pred, cls_label, reduction='sum')

        # 含有目标但是没有响应的框不需要计算位置偏差损失
        # 但是需要计算区别正样本和背景的损失
        bbox_pred_not_response = bbox_pred[coord_not_response_mask].contiguous().view(-1, 5)
        bbox_label_not_response = bbox_label[coord_not_response_mask].contiguous().view(-1, 5)
        bbox_label_not_response[:, 4] = 0
        loss_no_response = F.mse_loss(bbox_pred_not_response[:, 4], bbox_label_not_response[:, 4], reduction='sum')

        # Total loss
        loss = self.lambda_coord * (loss_x_y + loss_w_h) + 2 * loss_obj + loss_no_response + self.lambda_noobj * loss_noobj + loss_class
        loss = loss / float(batch_size)

        return loss

