import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


def _neg_loss(pred, gt):

    pos_inds = gt.eq(1).float()  
    neg_inds = gt.lt(1).float()
    neg_weights = torch.pow(1 - gt, 4)
    loss = 0
    pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
    neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
    num_pos  = pos_inds.float().sum() 
    pos_loss = pos_loss.sum()
    neg_loss = neg_loss.sum()
    if num_pos == 0:
        loss = loss - neg_loss
    else:
        loss = loss - (pos_loss + neg_loss) / num_pos 
    return loss


class FocalLoss(nn.Module):
    def __init__(self):
        super(FocalLoss, self).__init__()
        self.neg_loss = _neg_loss

    def forward(self, pred_tensor, target_tensor):
        return self.neg_loss(pred_tensor, target_tensor)


def _gather_feat(feat, ind, mask=None):
    dim  = feat.size(2)
    ind  = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
    feat = feat.gather(1,ind)
    if mask is not None:
        mask = mask.unsqueeze(2).expand_as(feat)
        feat = feat[mask]
        feat = feat.view(-1, dim)
    return feat

def _transpose_and_gather_feat(feat, ind):
    #有些tensor并不是占用一整块内存，而是由不同的数据块组成，而tensor的view()操作依赖于内存是整块的，
    #这时只需要执行contiguous()这个函数，把tensor变成在内存中连续分布的形式。
    feat = feat.permute(0, 2, 3, 1).contiguous()
    feat = feat.view(feat.size(0), -1, feat.size(3))
#     print("feat",type(feat))
#     print("ind",type(ind))
    #feat torch.Size([6, 40000, 28]) ind torch.Size([6, 128])
    feat = _gather_feat(feat, ind)
    return feat

class RegL1Loss(nn.Module):
    def __init__(self):
        super(RegL1Loss, self).__init__()
  
    def forward(self, pred, mask, ind, target):
        pred = _transpose_and_gather_feat(pred, ind) 
        mask = mask.unsqueeze(2).expand_as(pred).float() 
        loss = F.l1_loss(pred * mask, target * mask, reduction='sum')
        loss = loss / (mask.sum() + 1e-4) # 每个目标的平均损失
        return loss

def _sigmoid(x):
    y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
    return y

def _softmax(x):
    y = nn.Softmax(dim=1)(x)
    return y


class CSLloss_reg(nn.Module):
    def __init__(self):
        super(CSLloss_reg, self).__init__()
        
        self.mv_loss = MeanVarianceLoss()        
    def forward(self,pred, mask, ind, target_high,target_low,target,target_cls,args):
#         print("1111111111111111111111111111",args)
        cls_num = int(args.reg_max/args.omega)

        pred = _softmax(pred)
        pred = _transpose_and_gather_feat(pred, ind) 
#         print("pred",pred.shape)
        mask_old = mask.sum()
        target = target.unsqueeze(2)
        mask_target = mask.unsqueeze(2).expand_as(target).float() 
        mask = mask.unsqueeze(2).expand_as(pred).float() 
        labels_high = target_high*mask
#         print(target_high.shape)
#         print(mask.shape)
#         print(labels_high.shape)
        labels_low = target_low*mask
#         print(labels_low.shape)
        label = target*mask_target
        pred = pred*mask

#         labels_high = labels_high.view(-1,100)
#         labels_low = labels_low.view(-1,100)
#         label = label.view(-1,1)
#         pred = pred.view(-1,100)
############################################################
        mask_var =  mask_target.view(-1,1)
#         print(target.shape)
        target = target.view(-1,1)
#         print(target)
#         print("ddddddd",pred.shape)
        pred = pred.view(-1,cls_num+1)
#         print("ppppppppp",pred.shape)
        labels_high = labels_high.view(-1,cls_num+1)
        labels_low = labels_low.view(-1,cls_num+1)
        mean_loss,variance_loss = self.mv_loss(pred,target, mask_var,args)
############################################################ 
#         print(label.shape,pred.shape,labels_low.shape,labels_high.shape)
        
        weight_high= target%args.omega/args.omega
        weight_low = 1-weight_high
        
#------------------------------------一半------------------------------------
        per_cross_ent_high = - labels_high * torch.log(pred + 1e-5)
        per_cross_ent_low = - labels_low * torch.log(pred + 1e-5) 
#----------------------------------------------------------------------------

#------------------------------------全部------------------------------------
#         per_cross_ent_high = - labels_high * torch.log(pred + 1e-5)-(1 - labels_high) * torch.log(1 - pred+1e-5) #*indices  
#         per_cross_ent_low = - labels_low * torch.log(pred + 1e-5) -(1 - labels_low) * torch.log(1 - pred+1e-5) #*indices 
#-----------------------------------------------------------------------------
        weight_loss = weight_high*per_cross_ent_high +weight_low*per_cross_ent_low

        loss = weight_loss.sum()/mask_old
#         loss = loss + mean_loss+ variance_loss

        return loss,mean_loss,variance_loss  


class Loss(torch.nn.Module):
    def __init__(self, args):
        super(Loss, self).__init__()
        
        self.args = args
        self.cls_num = int(args.reg_max/args.omega)+1
        self.crit = FocalLoss()
        self.crit_t = CSLloss_reg()
        self.crit_b = CSLloss_reg()
        self.crit_l = CSLloss_reg()
        self.crit_r = CSLloss_reg()
        self.crit_cor = FocalLoss()
        self.crit_reg = RegL1Loss()
    def forward(self, pred_tensor, target_tensor):
        hm_loss, t_loss, b_loss, l_loss, r_loss, cor_loss,reg_loss = 0, 0, 0, 0, 0, 0,0
#         print(len(pred_tensor[0]))
        pred_tensor = pred_tensor[0]
        pred_tensor[0] = _sigmoid(pred_tensor[0])
        pred_tensor[2] = _sigmoid(pred_tensor[2])
#         pred_tensor[3] = pred_tensor[3]
        #         pred_tensor[1] = _sigmoid(pred_tensor[1])
        #         print("shap",pred_tensor[1].shape)
        pred_t = pred_tensor[1][:, :self.cls_num, :, :]
        pred_b = pred_tensor[1][:, self.cls_num:2 * self.cls_num, :, :]
        pred_l = pred_tensor[1][:, 2 * self.cls_num:3 * self.cls_num, :, :]
        pred_r = pred_tensor[1][:, 3 * self.cls_num:4 * self.cls_num, :, :]

        #         print(target_tensor['tblr'].shape)
        target_t_high = target_tensor['tblr_high'][:, :, :self.cls_num]
        target_b_high = target_tensor['tblr_high'][:, :, self.cls_num:2 * self.cls_num]
        target_l_high = target_tensor['tblr_high'][:, :, 2 * self.cls_num:3 * self.cls_num]
        target_r_high = target_tensor['tblr_high'][:, :, 3 * self.cls_num:4 * self.cls_num]

        target_t_low = target_tensor['tblr_low'][:, :, :self.cls_num]
        target_b_low = target_tensor['tblr_low'][:, :, self.cls_num:2 * self.cls_num]
        target_l_low = target_tensor['tblr_low'][:, :, 2 * self.cls_num:3 * self.cls_num]
        target_r_low = target_tensor['tblr_low'][:, :, 3 * self.cls_num:4 * self.cls_num]
        
        target_t_cls = target_tensor['tblr_cls'][:, :, :self.cls_num]
        target_b_cls = target_tensor['tblr_cls'][:, :, self.cls_num:2 * self.cls_num]
        target_l_cls = target_tensor['tblr_cls'][:, :, 2 * self.cls_num:3 * self.cls_num]
        target_r_cls = target_tensor['tblr_cls'][:, :, 3 * self.cls_num:4 * self.cls_num]
        
        target_t_reg = target_tensor['tblr_reg'][:, :, 0]
        target_b_reg = target_tensor['tblr_reg'][:, :, 1]
        target_l_reg = target_tensor['tblr_reg'][:, :, 2]
        target_r_reg = target_tensor['tblr_reg'][:, :, 3]


        hm_loss += self.crit(pred_tensor[0], target_tensor['hmap'])
        t_loss,t_mean_loss,t_var_loss = self.crit_t(pred_t, target_tensor['ind_masks'], target_tensor['inds'], target_t_high, target_t_low,target_t_reg,target_t_cls,self.args)  # tblr_loss
        b_loss,b_mean_loss,b_var_loss = self.crit_b(pred_b, target_tensor['ind_masks'], target_tensor['inds'], target_b_high, target_b_low,target_b_reg,target_b_cls,self.args)
        l_loss,l_mean_loss,l_var_loss = self.crit_l(pred_l, target_tensor['ind_masks'], target_tensor['inds'], target_l_high, target_l_low,target_l_reg,target_l_cls,self.args)
        r_loss,r_mean_loss,r_var_loss = self.crit_r(pred_r, target_tensor['ind_masks'], target_tensor['inds'], target_r_high, target_r_low,target_r_reg,target_r_cls,self.args)
        tblr_loss = t_loss+b_loss+l_loss+r_loss
        mean_loss = t_mean_loss+b_mean_loss+l_mean_loss+r_mean_loss
        var_loss = t_var_loss+b_var_loss+l_var_loss+r_var_loss
#         reg_loss += self.crit_reg(pred_tensor[3], target_tensor['ind_masks'],target_tensor['inds'], target_tensor['tblr_reg']) 
        
        cor_loss += self.crit_cor(pred_tensor[2], target_tensor['cor'])
        
        return hm_loss, tblr_loss,mean_loss,var_loss, cor_loss

from torch import nn
import math
import torch
import torch.nn.functional as F


# class MeanVarianceLoss(nn.Module):

#     def __init__(self):
#         super().__init__()
#         self.lambda_1 = 0.2
#         self.lambda_2 = 0.0000001#0.05
#         self.start_age = 0
#         self.end_age = 99

#     def forward(self, input, target,mask_var):

#         N = input.size()[0]
#         target = target.type(torch.FloatTensor).cuda()
#         m = nn.Softmax(dim=1)
#         p = m(input)
#         # mean loss
#         a = torch.arange(self.start_age, self.end_age + 1, dtype=torch.float32).cuda()
#         mean = (p * a).sum(1, keepdim=True)*mask_var
# #         mean1 = torch.squeeze((p * a).sum(1, keepdim=True), dim=1)
# #         print(mean.shape)
# #         print(mean1.shape)
# #         return
# #         print("target",target)
# #         print("mean",mean)
#         mse = (mean - target)**2
#         mean_loss = mse.mean() / 2.0
# #         print("mse",mse)
# #         print("mean_loss",mean_loss)
# #         return
#         # variance loss
#         b = (a[None, :] - mean[:, None])**2
#         variance_loss = ((p * b).sum(1, keepdim=True)*mask_var).mean()
        
#         return self.lambda_1 * mean_loss, self.lambda_2 * variance_loss


#L1 loss 和 std
# class MeanVarianceLoss(nn.Module):

#     def __init__(self):
#         super().__init__()
#         self.lambda_1 = 0.1
#         self.lambda_2 = 0.01#0.05
#         self.start_age = 0
#         self.end_age = 99

#     def forward(self, input, target,mask):

#         N = input.size()[0]
#         target = target.type(torch.FloatTensor).cuda()
# #         m = nn.Softmax(dim=1)
#         p = input
#         # mean loss
#         a = torch.arange(self.start_age, self.end_age + 1, dtype=torch.float32).cuda()
        
#         mean = (p * a).sum(1, keepdim=True)
# #         print("a",a.shape)# torch.Size([100])
# #         print("mean",(p * a).sum(1, keepdim=True).shape)#torch.Size([1200, 1])
# #         print("mask1",mask.shape)#torch.Size([1200, 1])
#         len_mask = mask.sum()
# #         mse = (mean - target)**2
# #         mean_loss = mse.mean() / 2.0
# #         print(len_mask)
#         L1_loss = (torch.abs(mean-target)*mask).sum()/len_mask
#         mean_loss = L1_loss
#         # variance loss
# #         b = (a[None, :] - mean[:, None])**2
# #         a = a.unsqueeze(0).expand_as(mean).float() 
#         b = (a[None, :] - mean[:, None])**2
# #         print(b)
#         b = b.squeeze(1)
# #         print("b",b.shape)#[1200, 1, 100]
# #         var = p*b
# #         print("VVVVVVVVVVVVVVV",var.shape)
#         var = (p * b).squeeze(1).sum(1, keepdim=True)+1e-7      
#         #print("0000000",var) 833左右
# #         print(var.shape)
# #        print("var",var.shape)#var torch.Size([1200, 1, 100])
#         var = torch.pow(var,1/2)
#         #print("powwwwwwwwwwwww",var) 28左右
# #         print(0)
# #         print(p)
#         variance_loss = (var*mask).sum()/len_mask
# #         print("111111111",(var*mask).sum())
# #         print("22222222",variance_loss)
# #         return
# #         print((var*mask).shape)
# #         variance_loss = var.mean()
# #         print(mean_loss.item(),variance_loss.item())
#         return mean_loss, variance_loss


#smoooth L1 和 var
class MeanVarianceLoss(nn.Module):

    def __init__(self):
        super().__init__()
        self.sl1 = torch.nn.SmoothL1Loss(reduction='sum')
    def forward(self, input, target,mask,args):
#         print(cls_num)
        cls_num = int(args.reg_max/args.omega)+1
#         print(cls_num)
        N = input.size()[0]
        target = target.type(torch.FloatTensor).cuda()
#         m = nn.Softmax(dim=1)
        p = input
#         print(p)
        # mean loss
        a = torch.arange(0, cls_num, dtype=torch.float32).cuda()
#         print((p*a).shape)
        
        mean = (p * a).sum(1, keepdim=True)
#         print(mean)
#         print("a",a.shape)# torch.Size([100])
#         print("mean",(p * a).sum(1, keepdim=True).shape)#torch.Size([1200, 1])
#         print("mask1",mask.shape)#torch.Size([1200, 1])
        len_mask = mask.sum()
#         mse = (mean - target)**2
#         mean_loss = mse.mean() / 2.0
#         print(len_mask)
#         print(mean.shape)
#         print(target.shape)
#         print(mask.shape)
        SL1_loss = self.sl1(mean*args.omega*mask, target * mask)/len_mask
        
        mean_loss = SL1_loss
        # variance loss
#         b = (a[None, :] - mean[:, None])**2
#         a = a.unsqueeze(0).expand_as(mean).float() 
        b = (a[None, :] - mean[:, None])**2
#         print(b)
        b = b.squeeze(1)
#         print("b",b.shape)#[1200, 1, 100]
#         var = p*b
#         print("VVVVVVVVVVVVVVV",var.shape)
        var = (p * b).squeeze(1).sum(1, keepdim=True)+1e-7      
        #print("0000000",var) 833左右
#         print(var.shape)
#        print("var",var.shape)#var torch.Size([1200, 1, 100])
        var = torch.pow(var,1/2)
        #print("powwwwwwwwwwwww",var) 28左右
#         print(0)
#         print(p)
        variance_loss = (var*mask).sum()/len_mask
#         print("111111111",(var*mask).sum())
#         print("22222222",variance_loss)
#         return
#         print((var*mask).shape)
#         variance_loss = var.mean()
#         print(mean_loss.item(),variance_loss.item())
        return mean_loss, variance_loss