'''
Function:
    Define the focal loss
Author:
    Zhenchao Jin
'''
import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import nn, ops, Parameter, Tensor
from luojianet.ops import operations as P



'''
Function:
    SigmoidFocalLoss
Arguments:
    --prediction: prediction of the network
    --target: ground truth
    --scale_factor: scale the loss for loss balance
    --lowest_loss_value: added inspired by ICML2020, "Do We Need Zero Training Loss After Achieving Zero Training Error", https://arxiv.org/pdf/2002.08709.pdf
'''

class MSigmoidFocalLoss(nn.Module):
    def __init__(self, scale_factor=None, gamma=2.0, alpha=0.25, weight=None, reduction='mean'):
        super(MSigmoidFocalLoss, self).__init__()
        self.sigmoid = ops.Sigmoid()
        self.alpha = alpha
        self.gamma = gamma
        self.weight = luojianet.Tensor(weight) if weight is not None else weight
        self.reduction = reduction
        self.avg_factor = scale_factor
        self.binary_cross_entropy_with_logits = nn.BCEWithLogitsLoss(reduction="none")
        self.is_weight = (weight is not None)

    def reduce_loss(self, loss):
        """Reduce loss as specified.
        Args:
            loss (Tensor): Elementwise loss tensor.
        Return:
            Tensor: Reduced loss tensor.
        """
        if self.reduction == "mean":
            return loss.mean()
        elif self.reduction == "sum":
            return loss.sum()
        return loss

    def weight_reduce_loss(self, loss):
        # if avg_factor is not specified, just reduce the loss
        if self.avg_factor is None:
            loss = self.reduce_loss(loss)
        else:
            # if reduction is mean, then average the loss by avg_factor
            if self.reduction == 'mean':
                loss = loss.sum() / self.avg_factor
            # if reduction is 'none', then do nothing, otherwise raise an error
            elif self.reduction != 'none':
                raise ValueError('avg_factor can not be used with reduction="sum"')
        return loss

    def forward(self, pred, target):
        pred_sigmoid = self.sigmoid(pred)
        target = ops.cast(target, pred.dtype)
        pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
        focal_weight = (self.alpha * target + (1 - self.alpha) * (1 - target)) * ops.pow(pt, self.gamma)
        # print(pred.shape, target.shape)

        loss = self.binary_cross_entropy_with_logits(pred, target) * focal_weight
        if self.is_weight:
            weight = self.weight
            if self.weight.shape != loss.shape:
                if self.weight.shape[0] == loss.shape[0]:
                    # For most cases, weight is of shape (num_priors, ),
                    #  which means it does not have the second axis num_class
                    weight = self.weight.view(-1, 1)
                elif self.weight.size == loss.size:
                    # Sometimes, weight per anchor per class is also needed. e.g.
                    #  in FSAF. But it may be flattened of shape
                    #  (num_priors x num_class, ), while loss is still of shape
                    #  (num_priors, num_class).
                    weight = self.weight.view(loss.shape[0], -1)
                elif self.weight.ndim != loss.ndim:
                    raise ValueError(f"weight shape {self.weight.shape} is not match to loss shape {loss.shape}")
            loss = loss * weight
        loss = self.weight_reduce_loss(loss)
        return loss

def SigmoidFocalLoss(prediction, target, scale_factor=1.0, gamma=2, alpha=0.25, weight=None, reduction='mean', ignore_index=None, lowest_loss_value=None):
    # filter according to ignore_index
    if ignore_index is not None:
        # num_classes = prediction.size(-1)
        num_classes = prediction.shape[-1]
        mask = (target != ignore_index)
        mask = mask.astype(luojianet.int32)
        prediction, target = prediction[mask].view(-1, num_classes), target[mask].view(-1)
    # calculate the loss
    # loss = sigmoid_focal_loss(prediction, target.long(), gamma, alpha, weight, reduction)
    loss = MSigmoidFocalLoss(scale_factor, gamma, alpha, weight, reduction)(prediction, target.long())
    # scale the loss
    # loss = loss * scale_factor
    # return the final loss
    if lowest_loss_value:
        return ops.abs(loss - lowest_loss_value) + lowest_loss_value
    return loss