from mindspore import nn, ops
from .KL import KLLoss

"""Paying More Attention to Attention: Improving the Performance of Convolutional Neural Networks via Attention Transfer"""
class ATLoss(nn.LossBase):
    def __init__(self, reduction="mean"):
        super(ATLoss, self).__init__(reduction)
        self.l2_normalize = ops.L2Normalize(axis=1)

    def construct(self, f_s, f_t):
        s_H, t_H = f_s.shape[1], f_t.shape[1]

        if s_H > t_H:
            adaptive_avg_pool_2d = ops.AdaptiveAvgPool2D((None, t_H))
            f_s = adaptive_avg_pool_2d(f_s)
        elif s_H < t_H:
            adaptive_avg_pool_2d = ops.AdaptiveAvgPool2D((None, s_H))
            f_t = adaptive_avg_pool_2d(f_t)
        else:
            pass

        f_s = self.l2_normalize(f_s)
        f_t = self.l2_normalize(f_t)
        x = (f_s - f_t) ** 2
        x = self.get_loss(x)
        return x

class ATLossCell(nn.Cell):
    def __init__(self, student_net, beta):
        super(ATLossCell, self).__init__(auto_prefix=False)
        self.student_net = student_net
        self.kl_loss = KLLoss(4)
        self.ce_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
        self.at_loss = ATLoss()
        self.beta = beta

    def construct(self, teacher_fm, teacher_logits, data, label):
        student_fm, student_logits = self.student_net(data, need_feature_map=True)
        return self.ce_loss(student_logits, label) \
            + self.kl_loss(student_logits, teacher_logits) \
            + self.beta * self.at_loss(student_fm, teacher_fm)
    
    @property
    def backbone_network(self):
        return self.student_net
