import torch
import torch.nn.functional as F


# def distill_loss_common(new_pred_, old_pred_, nc_old, temperature = 3.0):
#     distill_loss = 0
#     for new_pred, old_pred in zip(new_pred_, old_pred_):
#         #print('\n\n\n\n\n\n\n\n',type(new_pred))
#         old_cls = old_pred[..., 5 : 5 + nc_old]   
#         new_cls = new_pred[..., 5 : 5 + nc_old]    
    
#         old_conf = old_pred[..., 4]   
#         new_conf = new_pred[..., 4]   
        
#         old_probs = F.softmax(old_cls / temperature, dim = -1).detach() #总之就是一再防范旧模型不小心又梯度更新
#         new_log_probs = F.log_softmax(new_cls / temperature, dim = -1)
#         distill_loss += F.kl_div(
#             new_log_probs, 
#             old_probs, 
#             reduction = "batchmean", 
#             log_target = False
#         ) * (temperature ** 2)  # 温度缩放补偿


#     return distill_loss

def distill_loss(new_pred_, old_pred_, nc_old, temperature = 3.0):
    distill_loss = 0
    for new_pred, old_pred in zip(new_pred_, old_pred_):
        #print('\n\n\n\n\n\n\n\n',type(new_pred))
        old_cls = old_pred[..., 5 : 5 + nc_old]   
        new_cls = new_pred[..., 5 : 5 + nc_old]    
    
        old_conf = old_pred[..., 4]   
        new_conf = new_pred[..., 4]   
        
        old_probs = F.softmax(old_cls / temperature, dim = -1).detach() #总之就是一再防范旧模型不小心又梯度更新
        new_log_probs = F.log_softmax(new_cls / temperature, dim = -1)
        distill_loss += F.kl_div(
            new_log_probs, 
            old_probs, 
            reduction = "batchmean", 
            log_target = False
        ) * (temperature ** 2)  # 温度缩放补偿


    return distill_loss




