import torch
import torch.nn as nn


class AngularPenaltySMLoss(nn.Module):
    def __init__(self, loss_type="cosface", eps=1e-7, s=20, m=0.0):
        super(AngularPenaltySMLoss, self).__init__()
        loss_type = loss_type
        assert loss_type in [
            "arcface",
            "sphereface",
            "cosface",
            "crossentropy",
        ]
        if loss_type == "arcface":
            self.s = 64.0 if not s else s
            self.m = 0.5 if not m else m
        if loss_type == "sphereface":
            self.s = 64.0 if not s else s
            self.m = 1.35 if not m else m
        if loss_type == "cosface":
            self.s = 20.0 if not s else s
            self.m = 0.0 if not m else m
        self.loss_type = loss_type
        self.eps = eps

        self.cross_entropy = nn.CrossEntropyLoss()

    def forward(self, wf, labels):
        if self.loss_type == "crossentropy":
            return self.cross_entropy(wf, labels)
        else:
            # Initialize numerator to avoid unbound variable error
            numerator = None

            if self.loss_type == "cosface":
                numerator = self.s * (
                    torch.diagonal(wf.transpose(0, 1)[labels]) - self.m
                )
            elif self.loss_type == "arcface":
                numerator = self.s * torch.cos(
                    torch.acos(
                        torch.clamp(
                            torch.diagonal(wf.transpose(0, 1)[labels]),
                            -1.0 + self.eps,
                            1 - self.eps,
                        )
                    )
                    + self.m
                )
            elif self.loss_type == "sphereface":
                numerator = self.s * torch.cos(
                    self.m
                    * torch.acos(
                        torch.clamp(
                            torch.diagonal(wf.transpose(0, 1)[labels]),
                            -1.0 + self.eps,
                            1 - self.eps,
                        )
                    )
                )
            else:
                raise ValueError(f"Unsupported loss type: {self.loss_type}")

            if numerator is None:
                raise ValueError(f"Failed to compute numerator for loss type: {self.loss_type}")

            excl = torch.cat(
                [
                    torch.cat((wf[i, :y], wf[i, y + 1 :])).unsqueeze(0)
                    for i, y in enumerate(labels)
                ],
                dim=0,
            )
            denominator = torch.exp(numerator) + torch.sum(
                torch.exp(self.s * excl), dim=1
            )
            L = numerator - torch.log(denominator)
            return -torch.mean(L)


class FocalLoss(nn.Module):
    """Focal Loss implementation for imbalanced classification"""

    def __init__(self, alpha=1, gamma=2, reduction="mean"):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction
        self.ce_loss = nn.CrossEntropyLoss(reduction="none")

    def forward(self, inputs, targets):
        ce_loss = self.ce_loss(inputs, targets)
        pt = torch.exp(-ce_loss)
        focal_loss = self.alpha * (1 - pt) ** self.gamma * ce_loss

        if self.reduction == "mean":
            return focal_loss.mean()
        elif self.reduction == "sum":
            return focal_loss.sum()
        else:
            return focal_loss


class LabelSmoothingLoss(nn.Module):
    """Label Smoothing Loss"""

    def __init__(self, num_classes, smoothing=0.1):
        super(LabelSmoothingLoss, self).__init__()
        self.num_classes = num_classes
        self.smoothing = smoothing
        self.confidence = 1.0 - smoothing

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=-1)
        with torch.no_grad():
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (self.num_classes - 1))
            true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
        return torch.mean(torch.sum(-true_dist * pred, dim=-1))


class DistillationLoss(nn.Module):
    """Knowledge Distillation Loss"""

    def __init__(self, temperature=4.0, alpha=0.5):
        super(DistillationLoss, self).__init__()
        self.temperature = temperature
        self.alpha = alpha
        self.ce_loss = nn.CrossEntropyLoss()
        self.kl_loss = nn.KLDivLoss(reduction="batchmean")

    def forward(self, student_logits, teacher_logits, targets):
        # Hard target loss
        hard_loss = self.ce_loss(student_logits, targets)

        # Soft target loss
        soft_targets = nn.functional.softmax(teacher_logits / self.temperature, dim=-1)
        soft_prob = nn.functional.log_softmax(student_logits / self.temperature, dim=-1)
        soft_loss = self.kl_loss(soft_prob, soft_targets) * (self.temperature**2)

        return self.alpha * hard_loss + (1 - self.alpha) * soft_loss


class LossFactory:
    """Factory class for creating different types of loss functions"""

    @staticmethod
    def create_loss(loss_type, **kwargs):
        """
        Create a loss function based on the specified type

        Args:
            loss_type (str): Type of loss function to create
            **kwargs: Additional arguments for the loss function

        Returns:
            Loss function instance
        """
        loss_type = loss_type.lower()

        if loss_type == "cross_entropy" or loss_type == "crossentropy":
            # Support for weighted cross entropy loss
            scale = kwargs.get("scale", 1.0)
            base_loss = nn.CrossEntropyLoss()

            if scale != 1.0:
                # Create a wrapper class for weighted cross entropy
                class WeightedCrossEntropyLoss(nn.Module):
                    def __init__(self, base_loss, scale):
                        super().__init__()
                        self.base_loss = base_loss
                        self.scale = scale

                    def forward(self, inputs, targets):
                        return self.scale * self.base_loss(inputs, targets)

                return WeightedCrossEntropyLoss(base_loss, scale)
            else:
                return base_loss

        elif loss_type in ["angular", "cosface", "arcface", "sphereface"]:
            # Handle angular loss types properly
            angular_type = kwargs.get("angular_type", loss_type)
            if angular_type == "angular":
                angular_type = "cosface"  # Default to cosface for generic "angular"

            return AngularPenaltySMLoss(
                loss_type=angular_type,
                eps=kwargs.get("eps", 1e-7),
                s=kwargs.get("scale", kwargs.get("s", 20)),
                m=kwargs.get("margin", kwargs.get("m", 0.0)),
            )

        elif loss_type == "focal":
            return FocalLoss(
                alpha=kwargs.get("alpha", 1),
                gamma=kwargs.get("gamma", 2),
                reduction=kwargs.get("reduction", "mean"),
            )

        elif loss_type == "label_smoothing":
            return LabelSmoothingLoss(
                num_classes=kwargs.get("num_classes", 10),
                smoothing=kwargs.get("smoothing", 0.1),
            )

        elif loss_type == "distillation":
            return DistillationLoss(
                temperature=kwargs.get("temperature", 4.0),
                alpha=kwargs.get("alpha", 0.5),
            )

        else:
            raise ValueError(f"Unsupported loss type: {loss_type}")

    @staticmethod
    def get_available_losses():
        """Get list of available loss functions"""
        return [
            "cross_entropy",
            "angular",
            "cosface",
            "arcface",
            "sphereface",
            "focal",
            "label_smoothing",
            "distillation",
        ]
