import torch
from torch import nn

#--------------------------------------#
#   计算蒸馏损失
#-------------------------------------#
def compute_distillation_output_loss(p, t_p, model, d_weight=1):
    t_ft = torch.cuda.FloatTensor if t_p[0].is_cuda else torch.Tensor
    t_lcls, t_lbox, t_lobj = t_ft([0]), t_ft([0]), t_ft([0])
    h = model.hyp  # hyperparameters
    red = 'mean'  # Loss reduction (sum or mean)
    if red != "mean":
        raise NotImplementedError("reduction must be mean in distillation mode!")

    DboxLoss = nn.MSELoss(reduction="none")
    DclsLoss = nn.MSELoss(reduction="none")
    DobjLoss = nn.MSELoss(reduction="none")
    # per output
    for i, pi in enumerate(p):  # layer index, layer predictions
        t_pi = t_p[i]
        t_obj_scale = t_pi[..., 4].sigmoid()

        # BBox
        b_obj_scale = t_obj_scale.unsqueeze(-1).repeat(1, 1, 1, 1, 4)
        t_lbox += torch.mean(DboxLoss(pi[..., :4], t_pi[..., :4]) * b_obj_scale)

        # Class
        if model.nc > 1:  # cls loss (only if multiple classes)
            c_obj_scale = t_obj_scale.unsqueeze(-1).repeat(1, 1, 1, 1, model.nc)
            # t_lcls += torch.mean(c_obj_scale * (pi[..., 5:] - t_pi[..., 5:]) ** 2)
            t_lcls += torch.mean(DclsLoss(pi[..., 5:], t_pi[..., 5:]) * c_obj_scale)

        # t_lobj += torch.mean(t_obj_scale * (pi[..., 4] - t_pi[..., 4]) ** 2)
        t_lobj += torch.mean(DobjLoss(pi[..., 4], t_pi[..., 4]) * t_obj_scale)
    t_lbox *= h['box']
    t_lobj *= h['obj']
    t_lcls *= h['cls']
    # bs = p[0].shape[0]  # batch size
    loss = (t_lobj + t_lbox + t_lcls) * d_weight
    return loss

# 蒸馏方法1
def distillation_output_MSEloss1111(outs, soft_outs):
    lambda_pi = 10
    loss_distillation = 0
    # pi = []
    # t_pi = []
    t_lcls, t_lbox, t_lobj = 0, 0, 0
    DboxLoss = nn.MSELoss(reduction="none")
    DclsLoss = nn.MSELoss(reduction="none")
    DobjLoss = nn.MSELoss(reduction="none")
    for index in range(len(outs[0])):
        num_grid_h = outs[0][index].size(2)
        num_grid_w = outs[0][index].size(3)
        pi = outs[0][index].view(-1, 3, 13, num_grid_h, num_grid_w).permute(0, 1, 3, 4, 2).contiguous()
        t_pi = soft_outs[0][index].view(-1, 3, 13, num_grid_h, num_grid_w).permute(0, 1, 3, 4, 2).contiguous()
        t_obj_scale = t_pi[..., 4].sigmoid()

        # BBox
        b_obj_scale = t_obj_scale.unsqueeze(-1).repeat(1, 1, 1, 1, 4)
        t_lbox += torch.mean(DboxLoss(pi[..., :4], t_pi[..., :4]) * b_obj_scale)

        # Class
        c_obj_scale = t_obj_scale.unsqueeze(-1).repeat(1, 1, 1, 1, 8)
        t_lcls += torch.mean(DclsLoss(pi[..., 5:], t_pi[..., 5:]) * c_obj_scale)

        # objectness
        t_lobj += torch.mean(DobjLoss(pi[..., 4], t_pi[..., 4]) * t_obj_scale)
    loss_distillation = t_lbox + t_lcls + t_lobj
    loss_distillation = lambda_pi * loss_distillation
    return loss_distillation


# 蒸馏方法2
def distillation_feature_MSEloss(s_f, t_f):
    loss_func1 = nn.MSELoss(reduction="mean")
    loss_func2 = nn.MSELoss(reduction="mean")
    loss_func3 = nn.MSELoss(reduction="mean")

    feature_loss = 0
    dl_1, dl_2, dl_3 = 0, 0, 0
    dl_1 += loss_func1(s_f[0], t_f[0])
    dl_2 += loss_func2(s_f[1], t_f[1])
    dl_3 += loss_func3(s_f[2], t_f[2])
    feature_loss += (dl_1 + dl_2 + dl_3) * 2
    return feature_loss


# 标准蒸馏
def distillation_output_MSEloss(outs, soft_outs):
    lambda_pi = 10
    loss_distillation = 0
    # pi = []
    # t_pi = []
    t_lcls, t_lbox, t_lobj = 0, 0, 0
    DboxLoss = nn.MSELoss(reduction="none")
    DclsLoss = nn.MSELoss(reduction="none")
    DobjLoss = nn.MSELoss(reduction="none")
    for index in range(len(outs)):
        num_grid_h = outs[index].size(2)
        num_grid_w = outs[index].size(3)

        pi = outs[index].view(-1, 3, 6, num_grid_h, num_grid_w).permute(0, 1, 3, 4, 2).contiguous()
        t_pi = soft_outs[index].view(-1, 3, 6, num_grid_h, num_grid_w).permute(0, 1, 3, 4, 2).contiguous()
        t_obj_scale = t_pi[..., 4].sigmoid()

        # BBox
        b_obj_scale = t_obj_scale.unsqueeze(-1).repeat(1, 1, 1, 1, 4)
        t_lbox += torch.mean(DboxLoss(pi[..., :4], t_pi[..., :4]) * b_obj_scale)

        # Class
        c_obj_scale = t_obj_scale.unsqueeze(-1).repeat(1, 1, 1, 1, 8)
        t_lcls += torch.mean(DclsLoss(pi[..., 5:], t_pi[..., 5:]) * c_obj_scale)

        # objectness
        t_lobj += torch.mean(DobjLoss(pi[..., 4], t_pi[..., 4]) * t_obj_scale)
    loss_distillation = t_lbox + t_lcls + t_lobj
    loss_distillation = lambda_pi * loss_distillation
    return loss_distillation
