import torch
import torch.nn as nn
import torch.nn.functional as F

__all__ = ['BCEDiceLoss', 'DiceLoss', 'FocalLoss', 'TverskyLoss']


class BCEDiceLoss(nn.Module):
    """BCE + Dice Loss for binary segmentation"""

    def __init__(self):
        super().__init__()

    def forward(self, input, target):
        # Binary Cross Entropy
        bce = F.binary_cross_entropy_with_logits(input, target)

        # Dice Loss
        smooth = 1e-5
        input = torch.sigmoid(input)
        num = target.size(0)
        input = input.view(num, -1)
        target = target.view(num, -1)
        intersection = (input * target)
        dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)
        dice = 1 - dice.sum() / num

        return 0.5 * bce + dice


class DiceLoss(nn.Module):
    """Dice Loss for multi-class segmentation"""

    def __init__(self, smooth=1e-5):
        super().__init__()
        self.smooth = smooth

    def forward(self, input, target):
        smooth = self.smooth

        # Convert input to probabilities via softmax for multi-class
        input_soft = F.softmax(input, dim=1)

        # Convert target to one-hot encoding if it is not already
        if target.dim() == 3:
            # Target is of shape (batch_size, H, W) with class indices
            target_onehot = F.one_hot(target, num_classes=input.shape[1]).permute(0, 3, 1, 2).float()
        else:
            target_onehot = target

        # Calculate intersection and union for each class
        intersection = (input_soft * target_onehot).sum(dim=(2, 3))
        union = input_soft.sum(dim=(2, 3)) + target_onehot.sum(dim=(2, 3))

        # Calculate Dice coefficient for each class
        dice = (2. * intersection + smooth) / (union + smooth)

        # Average over classes and batches
        dice_loss = 1 - dice.mean()

        return dice_loss


class FocalLoss(nn.Module):
    """
    Focal Loss for dense predictions.
    Args:
        gamma (float): Focusing parameter. Default: 2.0
        alpha (Tensor, optional): Weighting factor per class. Default: None
        reduction (str): Reduction method: 'none', 'mean', or 'sum'. Default: 'mean'
    """

    def __init__(self, gamma=2.0, alpha=None, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.alpha = alpha
        self.reduction = reduction

    def forward(self, input, target):
        # Calculate cross entropy loss
        ce_loss = F.cross_entropy(input, target, reduction='none', weight=self.alpha)

        # Calculate focal loss
        pt = torch.exp(-ce_loss)
        focal_loss = ((1 - pt) ** self.gamma) * ce_loss

        # Apply reduction
        if self.reduction == 'mean':
            return focal_loss.mean()
        elif self.reduction == 'sum':
            return focal_loss.sum()
        else:
            return focal_loss


class TverskyLoss(nn.Module):
    """
    Tversky Loss for segmentation tasks
    Args:
        alpha (float): Weight for false positives. Default: 0.5
        beta (float): Weight for false negatives. Default: 0.5
        smooth (float): Smoothing factor. Default: 1e-5
    """

    def __init__(self, alpha=0.5, beta=0.5, smooth=1e-5):
        super(TverskyLoss, self).__init__()
        self.alpha = alpha
        self.beta = beta
        self.smooth = smooth

    def forward(self, input, target):
        # Apply sigmoid for binary segmentation
        input = torch.sigmoid(input)

        # Flatten tensors
        input_flat = input.view(-1)
        target_flat = target.view(-1)

        # Calculate true positives, false positives, false negatives
        true_pos = (input_flat * target_flat).sum()
        false_neg = (target_flat * (1 - input_flat)).sum()
        false_pos = ((1 - target_flat) * input_flat).sum()

        # Calculate Tversky index
        tversky = (true_pos + self.smooth) / (true_pos + self.alpha * false_neg + self.beta * false_pos + self.smooth)

        return 1 - tversky


class MultiClassDiceLoss(nn.Module):
    """
    Multi-class Dice Loss that handles class imbalance
    Args:
        smooth (float): Smoothing factor. Default: 1e-5
        ignore_index (int): Index to ignore. Default: None
    """

    def __init__(self, smooth=1e-5, ignore_index=None):
        super(MultiClassDiceLoss, self).__init__()
        self.smooth = smooth
        self.ignore_index = ignore_index

    def forward(self, input, target):
        smooth = self.smooth

        # Apply softmax to get probabilities
        input_soft = F.softmax(input, dim=1)

        # Convert target to one-hot if needed
        if target.dim() == 3:
            target_onehot = F.one_hot(target, num_classes=input.shape[1]).permute(0, 3, 1, 2).float()
        else:
            target_onehot = target

        # Calculate Dice for each class
        dice_loss = 0
        num_classes = input.shape[1]

        for class_idx in range(num_classes):
            if self.ignore_index is not None and class_idx == self.ignore_index:
                continue

            input_class = input_soft[:, class_idx, :, :]
            target_class = target_onehot[:, class_idx, :, :]

            intersection = (input_class * target_class).sum()
            union = input_class.sum() + target_class.sum()

            if union > 0:
                dice = (2. * intersection + smooth) / (union + smooth)
                dice_loss += 1 - dice
            else:
                # If no pixels of this class, skip
                continue

        # Average over classes
        return dice_loss / num_classes