import torch

class BCE_L1(torch.nn.Module):
    '''
        loss = -1/pos * log(pt) - 1/neg * log(1-pt)
    '''
    def __init__(self, gamma=0, alpha=[1.,1.]):
        super().__init__()
        self.gamma = gamma
        self.alpha = alpha
        self.log = torch.nn.BCELoss(reduction='none')

    def forward(self, y_pred, y_true):
        if y_pred.shape[1] != y_true.shape[1]:
            y_true = torch.cat((y_true, 1-y_true), dim=1)
        assert (y_true.shape == y_pred.shape), "predict & target shape don't match"
        batch = y_pred.shape[0]
        y_pred = y_pred.view(batch, -1)
        y_true = y_true.view(batch, -1)

        logpt = self.log(y_pred, y_true)
        n_pos = y_true.sum(dim=1) 
        n_neg = (1-y_true).sum(dim=1) 
        
        pos_loss = y_true*logpt
        neg_loss = (1-y_true)*logpt
        
        loss = pos_loss.sum(dim=1)/n_pos + neg_loss.sum(dim=1)/n_neg 
        return loss.mean()

class mBCELoss(torch.nn.Module):
    """
    Weighted Binary Cross Entropy Loss
    """
    def __init__(self, beta=1):
        super().__init__()
        self.beta = beta

    def forward(self, y_pred, y_true):
        if y_pred.shape[1] != y_true.shape[1]:
            y_true = torch.cat((y_true, 1-y_true), dim=1)
        assert (y_true.shape == y_pred.shape), 'predict & target shape do not match'
        
        def log(input, eps=1e-6):
            return torch.clamp(torch.log(input+eps), min=-100)

        y_pred = y_pred.view(y_pred.shape[0], -1)
        y_true = y_true.view(y_true.shape[0], -1)
        term1 = -y_true*log(y_pred)*self.beta
        term2 = -(1-y_true)*log(1-y_pred)
        return torch.mean(term1+term2)