import torch
import torch.nn.functional as F


def CB_Weight(distr, beta: float = 0.999):
    distr = torch.tensor(distr, dtype=float)
    # distr tensor [B, Num_cls]
    distr = 1 - torch.pow(beta, distr)
    distr = (1 - beta) / distr
    return F.normalize(distr, dim=0)


class MultiCBFocalLoss(torch.nn.Module):
    def __init__(self, num_cls: int, gamma: int = 2, alpha=None):
        super(MultiCBFocalLoss, self).__init__()
        if alpha is None:
            raise Exception("Need input alpha tensor in device")
        else:
            self.alpha = alpha
        self.gamma = gamma
        self.num_cls = num_cls

    def forward(self, predict, target):
        # predict: [Batch, Num_cls], assert already done softmax
        # target: [Batch, 1], assert label in range(num_cls)

        # softmmax
        predict = F.softmax(predict, dim=1)

        # target's one hot -- [Batch, Num_cls]
        class_mask = F.one_hot(
            target, self.num_cls).reshape([-1, self.num_cls])
        # alpha -- weight for every class -- [N, 1]
        ids = target.view(-1, 1)
        alpha = self.alpha[ids.data.view(-1)]

        # get real class's prob and calculate log(pt)
        pt = (predict * class_mask).sum(dim=1)  # [Batch]
        log_p = pt.log()  # [Batch]
        # calculate fl loss for every class
        loss = -alpha * (torch.pow((1 - pt), self.gamma)) * log_p

        # calculate average
        loss = loss.mean()
        return loss
