# 蒸馏类分布对其损失函数
from torch.nn import functional as F
def mse_loss(teacher_output, student_output):
    return F.mse_loss(student_output, teacher_output)

def l1_loss(teacher_output, student_output):
    return F.l1_loss(student_output, teacher_output)

def kl_divergence(teacher_logits, student_logits, temperature=1.0):
    teacher_probs = F.log_softmax(teacher_logits / temperature, dim=-1)
    student_probs = F.softmax(student_logits / temperature, dim=-1)
    return F.kl_div(teacher_probs, student_probs, reduction='batchmean') * (temperature ** 2)

def loss_fct(vision_hidden_states, img_token_position, llm_outputs):
    teacher_logits = vision_hidden_states[:,0,:]
    student_logits = llm_outputs.hidden_states[-1][:,img_token_position+1,:]
    # 残差蒸馏损失
    kl_loss = kl_divergence(teacher_logits, student_logits)
    
    # lm损失
    lm_loss = llm_outputs.loss
    
    loss = kl_loss + lm_loss
    return lm_loss
    