from mxnet.gluon.loss import Loss, SoftmaxCrossEntropyLoss
# from gluonnlp.model.attention_cell import MultiHeadAttentionCell


class SeqBinaryCrossEntropyLoss(Loss):
    def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
        super(SeqBinaryCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
        self._from_sigmoid = from_sigmoid

    def hybrid_forward(self, F, preds, labels, mask=None, weight=1.0,  sample_weight=None):
        if not self._from_sigmoid:
            # We use the stable formula: max(x, 0) - x * z + log(1 + exp(-abs(x)))
            loss = F.relu(preds) - preds * labels + F.Activation(-F.abs(preds), act_type='softrelu')
        else:
            loss = -(F.log(preds+1e-12)*labels + F.log(1.-preds+1e-12)*(1.-labels))
        # loss = - (preds * labels + (1-preds)*(1.0 - labels))
        # if mask is not None:
        #     loss = loss * mask
        # s = F.sum(loss, axis=self.)
        # ret = s
        # if mask is not None:
        #     ret = s / F.sum(mask, axis=-1)
        return F.mean(loss)


