from mind3d.utils.sim_center_utils import _transpose_and_gather_feat
from mindspore import nn, ops, Tensor


class FastFocalLoss(nn.Cell):
    '''
    Reimplemented focal loss, exactly the same as the CornerNet version.
    Faster and costs much less memory.
    Arguments:
    out, target: B x C x H x W
    ind, mask: B x M
    cat (category id for peaks): B x M   
    '''

    def __init__(self):
        super(FastFocalLoss, self).__init__()

    def construct(self, out, target, ind, mask, cat):
        
        # mask = Tensor(mask, mindspore.int32)
        pow = ops.Pow()
        log = ops.Log()
        expand_dims = ops.ExpandDims()
        gt = pow(1 - target, 4)
        neg_loss = pow(out, 2) * gt * log(1 - out)
        neg_loss = neg_loss.sum()
        pos_pred_pix = _transpose_and_gather_feat(out, ind)  # B x M x C
        indx = expand_dims(cat, 2).astype('int32')
        pos_pred = ops.GatherD()(pos_pred_pix, 2, indx)  # B x M
        num_pos = mask.sum()
        pos_loss = log(pos_pred) * pow(1 - pos_pred, 2) * expand_dims(mask, 2)
        pos_loss = pos_loss.sum()
        if num_pos == 0:
            return -neg_loss
        return - (pos_loss + neg_loss) / num_pos


class RegLoss(nn.Cell):
    '''
    Regression loss for an output tensor
        Arguments:
        output (batch x dim x h x w)
        mask (batch x max_objects)
        ind (batch x max_objects)
        target (batch x max_objects x dim)
    '''

    def __init__(self):
        super(RegLoss, self).__init__()

    def construct(self, output, mask, ind, target):
        expand_dims = ops.ExpandDims()
        is_nan = ops.IsNan()
        l1_loss = nn.L1Loss(reduction='none')
        pred = _transpose_and_gather_feat(output, ind)
        mask = expand_dims(mask, 2)
        target_in = is_nan(target)
        target = pred.select(target_in, target)
        loss = l1_loss(pred * mask, target * mask)
        loss = loss / (mask.sum() + 1e-4)
        loss = loss.transpose((2, 1, 0)).sum(axis=2).sum(axis=1)
        return loss