from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math
import torch

class SoftTargetsLoss(torch.nn.Module):

    def __init__(self, smoothing=0.0, normalize=True):
        super(SmoothedCrossEntropyLoss, self).__init__()
        self.smoothing = smoothing
        self.normalize = normalize

    def forward(self, tlogits, slogits, labels, temperature = 5, p_soft = 0.5):
        shape = labels.shape
        tlogits = torch.reshape(tlogits, [-1, Tlogits.shape[-1]])
        slogits = torch.reshape(slogits, [-1, Slogits.shape[-1]])
        labels = torch.reshape(labels, [-1])

        # Hard Targets Loss
        hard_log_probs = torch.nn.functional.log_softmax(slogits, dims=-1)
        batch_idx = torch.arange(labels.shape[0], device=slogits.device)
        hloss =Slog_probs[batch_idx, labels]

        if not self.smoothing or not self.training:
            return -torch.reshape(hloss, shape)

        n = Slogits.shape[-1] - 1.0
        p = 1.0 - self.smoothing
        q = self.smoothing / n

        if hard_log_probs.dtype != torch.float16:
            sum_probs = torch.sum(hard_log_probs, dim=-1)
            hloss = p * hloss + q * (sum_probs - hloss)

        else:
            # Prevent FP16 overflow
            sum_probs = torch.sum(hard_log_probs.to(torch.float32), dim=-1)
            hloss = hloss.to(torch.float32)
            hloss = p * hloss + q * (sum_probs - hloss)
            hloss = hloss.to(torch.float16)

        hloss = -torch.reshape(hloss, shape)

        if self.normalize:
            normalizing = -(p * math.log(p) + n * q * math.log(q + 1e-20))
            hloss = hloss - normalizing

        # Soft Targets Loss
        stu_soft_probs = torch.nn.functional.softmax(Slogits / temperature, dim=-1)
        t_soft_probs = torch.nn.functional.softmax(Tlogits / temperature, dim=-1)
        sloss = torch.nn.CrossEntropyLoss(stu_soft_probs, t_soft_probs)

        sloss = temperature ** 2 * sloss
        sloss = -torch.reshape(sloss, shape)

        return sloss * p_soft + hloss * (1 - p_soft)