import torch
import torch.nn.functional as F
import torch.nn as nn


DIVECE = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")


class SCE(nn.Module):
    def __init__(self, config):
        super(SCE, self).__init__()
        self.alpha = torch.tensor(config.alpha, device=DIVECE, dtype=torch.float)
        self.beta = torch.tensor(config.beta, device=DIVECE, dtype=torch.float)
        self.A = torch.tensor(config.A, device=DIVECE, dtype=torch.float)
        self.criteon = nn.CrossEntropyLoss()

    def forward(self, input, target):
        lce = self.criteon(input, target)
        input = F.softmax(input, dim=1)
        pred_arg = torch.zeros_like(target, device=DIVECE, dtype=torch.float)
        for i in range(len(target)):
            pred_arg[i] = input[i][target[i]]
        lrce = torch.mean(-self.A * (1.0 - pred_arg))

        return self.alpha * lce + self.beta * lrce


class SCE_FGM(nn.Module):
    def __init__(self, config):
        super(SCE_FGM, self).__init__()
        self.alpha = config.semi_alpha
        self.beta = config.semi_beta

    def forward(self, input, target):
        batch_size = len(input)
        log_softmax_input = F.log_softmax(input, dim=1)
        log_softmax_target = F.log_softmax(target, dim=1)
        lce = -torch.sum(F.softmax(target, dim=1) * log_softmax_input) / batch_size
        lrce = -torch.sum(F.softmax(input, dim=1) * log_softmax_target) / batch_size
        return self.alpha * lce + self.beta * lrce  # [0.5, 0.1], [0.4, 0.1], [0.3, 0.1], [0.35, 0.1]

