
from pcnets.models.builder import LOSSES
import torch
import torch.nn as nn
import torch.nn.functional as F

__all__ = [
    'CrossEntropy',
    'LabelSmoothCE'
]

@LOSSES.register_module()
class CrossEntropy:
    def __init__(self, reduction='mean'):
        self.reduction = reduction
    
    def __call__(self, input, target):
        return F.cross_entropy(input, target, reduction=self.reduction)

@LOSSES.register_module()
class LabelSmoothCE:
    def __init__(self, reduction='mean'):
        self.reduction = reduction
    
    def __call__(self, input, target):
        eps = 0.2
        n_class = input.size(1)  # the number of feature_dim of the output, which is output channels
        one_hot = F.one_hot(target, n_class)
        one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
        log_prb = F.log_softmax(input, dim=1)
        loss = -(one_hot * log_prb).sum(dim=1)
        if self.reduction == 'mean':
            loss = loss.mean()
        elif self.reduction == 'sum':
            loss = loss.sum()
        else:
            raise ValueError(f'unknown reduction type: {self.reduction}')
        return loss

@LOSSES.register_module()
class FocalLoss(nn.Module):
    def __init__(self, alpha=0.75, gamma=2, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction
    
    def forward(self, inputs, targets):
        if inputs.ndim > 2:
            # (N, C, d1, d2, ..., dK) --> (N * d1 * ... * dK, C)
            c = inputs.shape[1]
            inputs = inputs.permute(0, *range(2, inputs.ndim), 1).reshape(-1, c)
            # (N, d1, d2, ..., dK) --> (N * d1 * ... * dK,)
            targets = targets.view(-1)

        # compute weighted cross entropy term: -alpha * log(pt)
        # (alpha is already part of self.nll_loss)
        log_p = F.log_softmax(inputs, dim=-1)
        ce = self.nll_loss(log_p, targets)

        # get true class column from each row
        all_rows = torch.arange(len(inputs))
        log_pt = log_p[all_rows, targets]

        # compute focal term: (1 - pt)^gamma
        pt = log_pt.exp()
        focal_term = (1 - pt)**self.gamma

        # the full loss: -alpha * ((1 - pt)^gamma) * log(pt)
        loss = focal_term.detach() * ce

        if self.reduction == 'mean':
            loss = loss.mean()
        elif self.reduction == 'sum':
            loss = loss.sum()

        return loss
