                if change == "change1":
                    #原损失
                    loss_G = -F.l1_loss(s_logit, t_logit)
                    #激活损失
                    loss_G -= features1_t.abs().mean()
                    loss_G -= features2_t.abs().mean()
                    #信息熵损失:总batch的类别信息熵，希望batch里类别均衡
                    softmax_class=F.softmax(t_logit, dim=1)
                    softmax_o_T = softmax_class.mean(dim=0)
                    loss_G += 5*(softmax_o_T * torch.log10(softmax_o_T)).sum()
                    #一个batch的类别均衡
                    # softmax_class = F.softmax(t_logit, dim=1)
                    # loss_G += (softmax_class * torch.log10(softmax_class)).sum(dim=1).mean(dim=0)
                elif change == "paper":
                    loss_G = -F.l1_loss(s_logit, t_logit)
                    #boundary loss
                    attack = torch.randn(self.bs, 3, 32, 32, requires_grad=False).cuda()
                    attack = attack*0.001
                    fake_new = fake.detach() + attack
                    t_logit_new = self.teacher(fake_new)
                    loss_G += -0.001 * F.l1_loss(t_logit, t_logit_new)

                    #reality loss
                    labels_pseudo = t_logit.data.max(1)[1]
                    loss_G += 5 * F.cross_entropy(t_logit, labels_pseudo)
                    
                    #diversity loss
                    softmax_class=F.softmax(t_logit, dim=1)
                    softmax_o_T = softmax_class.mean(dim=0)
                    loss_G += 5*(softmax_o_T * torch.log10(softmax_o_T)).sum()
                    loss_G += 0.001*(softmax_class * torch.log10(softmax_class)).sum(dim=1).mean(dim=0)
                elif change == "change3":
                    #original boundary loss
                    loss_G = -F.l1_loss(s_logit, t_logit)
                    #attention loss
                    loss_G += (features1_t.abs().mean() - features1_s.abs().mean())
                    loss_G += (features2_t.abs().mean() - features2_s.abs().mean())
                    #信息熵损失:总batch的类别信息熵，希望batch里类别均衡
                    softmax_class=F.softmax(t_logit, dim=1)
                    softmax_o_T = softmax_class.mean(dim=0)
                    loss_G += 5*(softmax_o_T * torch.log10(softmax_o_T)).sum()
                elif change == "paper2":
                    #loss_G = -F.l1_loss(s_logit, t_logit)
                    #boundary loss
                    attack = torch.randn(self.bs, 3, 32, 32, requires_grad=False).cuda()
                    attack = attack*0.001
                    fake_new = fake.detach() + attack
                    t_logit_new = self.teacher(fake_new)
                    loss_G = -0.001 * F.l1_loss(t_logit, t_logit_new)

                    #reality loss
                    labels_pseudo = t_logit.data.max(1)[1]
                    loss_G += 0.1 * F.cross_entropy(s_logit, labels_pseudo)
                    
                    #diversity loss
                    softmax_class=F.softmax(t_logit, dim=1)
                    softmax_o_T = softmax_class.mean(dim=0)
                    loss_G += 5*(softmax_o_T * torch.log10(softmax_o_T)).sum()
                    loss_G += 0.001*(softmax_class * torch.log10(softmax_class)).sum(dim=1).mean(dim=0)
                     