# -*- coding: utf-8 -*-
"""


Created on Sun Jan  5 17:59:17 2020
@author: Lim
"""
import torch
import torch.nn as nn
import torch.nn.functional as F

EPSILON = 1e-5


def _neg_loss(pred, gt):
    ''' Modified focal loss. Exactly the same as CornerNet.
        Runs faster and costs a little bit more memory
      Arguments:
        pred (batch x c x h x w)
        gt_regr (batch x c x h x w)
    '''

    pos_inds = gt.eq(1).float()
    # pos_inds torch.Size([6, 1, 152, 152])
    neg_inds = gt.lt(1).float()
    neg_weights = torch.pow(1 - gt, 4)
    loss = 0
    #     print('pos_inds',pos_inds.shape)
    #     print('pred',pred.shape)
    pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
    # pos_loss torch.Size([6, 1, 152, 152])

    neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds

    num_pos = pos_inds.float().sum()
    pos_loss = pos_loss.sum()
    neg_loss = neg_loss.sum()

    if num_pos == 0:
        loss = loss - neg_loss
    else:
        loss = loss - (pos_loss + neg_loss) / num_pos
    return loss


class FocalLoss(nn.Module):
    '''nn.Module warpper for focal loss'''

    def __init__(self):
        super(FocalLoss, self).__init__()
        self.neg_loss = _neg_loss

    def forward(self, pred_tensor, target_tensor):
        return self.neg_loss(pred_tensor, target_tensor)


def _gather_feat(feat, ind, mask=None):
    dim = feat.size(2)
    #     print('dim',dim)
    #     print('ind',ind.shape)#([2, 128])
    ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
    #     print('newing',ind.shape)#([2, 128, 2])
    feat = feat.gather(1, ind)  # torch.Size([2, 128, 2])
    #     print('ind_feat',feat.size())#
    if mask is not None:
        mask = mask.unsqueeze(2).expand_as(feat)
        feat = feat[mask]
        feat = feat.view(-1, dim)
    return feat


def _transpose_and_gather_feat(feat, ind):
    feat = feat.permute(0, 2, 3, 1).contiguous()
    #     print('first',feat.shape)
    feat = feat.view(feat.size(0), -1, feat.size(3))
    #     print('feat,view',feat.size())#torch.Size([2, 40000, 1])
    feat = _gather_feat(feat, ind)  # torch.Size([2, 128, 2])
    #     print('gather_feat',feat.size())
    return feat


class RegL1Loss(nn.Module):
    def __init__(self):
        super(RegL1Loss, self).__init__()

    # pred_tensor['ang'], target_tensor['reg_mask'], target_tensor['ind'],
    #                                      target_tensor['ang']
    def forward(self, pred, mask, ind, target):
        # pred.shape)torch.Size([6, 2, 152, 152])
        pred = _transpose_and_gather_feat(pred, ind)  # torch.Size([6, 128, 2])
        mm = mask.sum()
        #         print('orienegal_mask',mm)
        mask = mask.unsqueeze(2).expand_as(pred).float()  # torch.Size([6, 128, 2])
        #     loss = F.smooth_l1_loss(pred * mask, target * mask, reduction='sum')

        loss = F.l1_loss(pred * mask, target * mask, reduction='sum')  # if wh: pre*mask.shape(6,128,2)

        loss = loss / (mask.sum() + 1e-4)  # 每个目标的平均损失

        return loss


def _sigmoid(x):
    y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)
    return y


def _relu(x):
    y = torch.clamp(x.relu_(), min=0., max=179.99)
    return y


#################################################################################################c

class CtdetLoss(torch.nn.Module):
    # loss_weight={'hm_weight':1,'wh_weight':0.1,'reg_weight':0.1}
    def __init__(self, loss_weight):
        super(CtdetLoss, self).__init__()
        self.crit = FocalLoss()
        self.crit_reg = RegL1Loss()
        self.crit_wh = RegL1Loss()
        self.loss_weight = loss_weight

    def forward(self, pred_tensor, target_tensor):
        hm_loss, wh_loss, off_loss, ang_loss = 0, 0, 0, 0
        pred_tensor['hm'] = _sigmoid(pred_tensor['hm'])
        pred_tensor['ang'] = _relu(pred_tensor['ang'])  # 原始激活函数

        hm_loss += self.crit(pred_tensor['hm'], target_tensor['hm'])
        ang_loss += self.crit_wh(pred_tensor['ang'], target_tensor['reg_mask'], target_tensor['ind'],
                                 target_tensor['ang'])
        wh_loss += self.crit_wh(pred_tensor['wh'], target_tensor['reg_mask'], target_tensor['ind'],
                                target_tensor['wh'])
        off_loss += self.crit_reg(pred_tensor['reg'], target_tensor['reg_mask'], target_tensor['ind'],
                                  target_tensor['reg'])
        return hm_loss, wh_loss, off_loss, ang_loss


class CtdetPiouLoss(torch.nn.Module):
    def __init__(self, opt):
        super(CtdetPiouLoss, self).__init__()
        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = RegL1Loss()
        self.crit_wh = RegL1Loss()
        self.crit_piou = PIoULoss()


    def forward(self, outputs, batch):
        hm_loss, piou_loss, off_loss = 0, 0, 0
        pred_tensor['hm'] = _sigmoid(pred_tensor['hm'])
        pred_tensor['ang'] = _relu(pred_tensor['ang'])  # 原始激活函数
        hm_loss += self.crit(pred_tensor['hm'], target_tensor['hm'])        
        wh_loss += self.crit_wh(pred_tensor['wh'], target_tensor['reg_mask'], target_tensor['ind'],
                                target_tensor['wh'])
        off_loss += self.crit_reg(pred_tensor['reg'], target_tensor['reg_mask'], target_tensor['ind'],
                                  target_tensor['reg'])
       
        loc_t = torch.cat((target_tensor['cxcy'], target_tensor['wh'], target_tensor['angle']), 2)
        loc_p = torch.cat((pred_tensor['wh'], pred_tensor['angle']), 1)

        piou_loss += self.crit_piou(
                loc_p, target_tensor['reg_mask'],
                target_tensor['ind'], loc_t) 

#         loss = opt.hm_weight * hm_loss + opt.piou_weight * piou_loss + \
#                opt.off_weight * off_loss
#         loss_stats = {'loss': loss, 'hm_loss': hm_loss,
#                       'piou_loss': piou_loss, 'off_loss': off_loss}
        return hm_loss, wh_loss, off_loss, piou_loss


def template_w_pixels(width):
    x = torch.tensor(torch.arange(-100, width + 100))
    grid_x = x.float() + 0.5
    return grid_x


class PIoULoss(nn.Module):
    def __init__(self):
        super(PIoULoss, self).__init__()
        self.template = template_w_pixels(800)
        self.PIoU = Pious(10, False)

    # //output: Nx3xhxw
    # target: Nxhx5
    def forward(self, output, mask, ind, target):
        pred = _tranpose_and_gather_feat(output, ind)
        # share center
        cxcy = target[:, :, 0:2].clone()
        pred_loc = torch.cat((cxcy, pred), -1)
        # mask = mask.unsqueeze(2).expand_as(target)
        pos_index = mask > 0.5
        loc_p = pred_loc[pos_index].view(-1, 5)
        loc_t = target[pos_index].view(-1, 5)
        # to absolute coord
        loc_p[:, 0:4] = loc_p[:, 0:4] * 4
        loc_t[:, 0:4] = loc_t[:, 0:4] * 4
        # compute piou loss
        # print('loc_p: ', loc_p.shape)
        # print('loc_t: ', loc_t.shape)
        pious = self.PIoU(loc_p, loc_t.data, self.template.cuda(loc_p.get_device()))
        pious = torch.clamp(pious, 0.1, 1.0)
        pious = -2.0 * torch.log(pious)
        loss = torch.sum(pious)
        loss = loss / (loc_p.size(0) + 1e-9)
        return loss
