import torch
import torch.nn as nn
import torch.nn.functional as F


def lovasz_grad(gt_sorted):
    """
    Computes gradient of the Lovasz extension w.r.t sorted errors
    See Alg. 1 in paper
    """
    p = len(gt_sorted)
    gts = gt_sorted.sum()
    intersection = gts - gt_sorted.float().cumsum(0)
    union = gts + (1 - gt_sorted).float().cumsum(0)
    jaccard = 1. - intersection / union
    if p > 1:  # cover 1-pixel case
        jaccard[1:p] = jaccard[1:p] - jaccard[0:p-1]
    return jaccard


def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
    logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
    labels: [P] Tensor, binary ground truth labels (0 or 1)
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * signs)
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.relu(errors_sorted), grad)
    return loss


def lovasz_hinge(logits, labels, per_image=True):
    """
    Binary Lovasz hinge loss
    logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
    labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
    per_image: compute the loss per image instead of per batch
    """
    if per_image:
        loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0)))
                          for log, lab in zip(logits, labels))
    else:
        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels))
    return loss


def flatten_binary_scores(scores, labels):
    """
    Flattens predictions in the batch (binary case)
    Remove labels equal to 'ignore_index'
    """
    scores = scores.view(-1)
    labels = labels.view(-1)
    return scores, labels


def mean(l):
    """
    nanmean compatible with generators.
    """
    l = list(l)
    if len(l) == 0:
        return torch.tensor(0.)
    else:
        return sum(l) / len(l)

__all__ = ['BCEDiceLoss', 'LovaszHingeLoss', 'SoftDiceLoss', 'FocalTverskyLoss', 'ComboLoss']


class BCEDiceLoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, input, target):
        bce = F.binary_cross_entropy_with_logits(input, target)
        smooth = 1e-5
        input = torch.sigmoid(input)
        num = target.size(0)
        input = input.view(num, -1)
        target = target.view(num, -1)
        intersection = (input * target)
        dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)
        dice = 1 - dice.sum() / num
        return 0.5 * bce + dice


class LovaszHingeLoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, input, target):
        input = input.squeeze(1)
        target = target.squeeze(1)
        loss = lovasz_hinge(input, target, per_image=True)

        return loss


class SoftDiceLoss(nn.Module):
    def __init__(self, smooth=1e-5):
        super().__init__()
        self.smooth = smooth

    def forward(self, input, target):
        input = torch.sigmoid(input)
        b = target.size(0)
        input = input.view(b, -1)
        target = target.view(b, -1)
        intersection = (input * target).sum(1)
        dice = (2. * intersection + self.smooth) / (input.sum(1) + target.sum(1) + self.smooth)
        return 1 - dice.mean()


class FocalTverskyLoss(nn.Module):
    def __init__(self, alpha=0.5, beta=0.5, gamma=0.75, smooth=1e-5):
        super().__init__()
        self.alpha = alpha
        self.beta = beta
        self.gamma = gamma
        self.smooth = smooth

    def forward(self, input, target):
        probs = torch.sigmoid(input)
        # Assume shape [B, C, H, W] or [B, 1, H, W]
        dims = (2, 3)
        tp = (probs * target).sum(dim=dims)
        fp = (probs * (1 - target)).sum(dim=dims)
        fn = ((1 - probs) * target).sum(dim=dims)
        tversky = (tp + self.smooth) / (tp + self.alpha * fp + self.beta * fn + self.smooth)
        loss = torch.pow(1 - tversky, self.gamma)
        return loss.mean()


class ComboLoss(nn.Module):
    """Weighted combination of BCEWithLogits and SoftDice."""
    def __init__(self, bce_weight=0.5):
        super().__init__()
        self.bce_weight = bce_weight
        self.softmaxdice = SoftDiceLoss()
        self.bce = nn.BCEWithLogitsLoss()

    def forward(self, input, target):
        return self.bce_weight * self.bce(input, target) + (1 - self.bce_weight) * self.softmaxdice(input, target)
