import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Optional


class MoECtn(nn.Module):
    """Unified loss class for MoV and MoVi models.

    Methods:
        backward: backpropagate all the losses.
    """

    def __init__(self, loss_coef: float = 0.01):
        super().__init__()
        self.device = torch.device("cpu")
        self.loss_coef = loss_coef
        self.routing_losses = []
        self.additional_losses = []

        self.addtional_lossfun = []

    def reset_loss(self):
        self.routing_losses = []
        self.additional_losses = []

    def add(self, lossfun):
        self.addtional_lossfun.append(lossfun)

    def forward(self, logits, targets):
        self.device = logits.device
        for lossfun in self.addtional_lossfun:
            self.additional_losses.append(lossfun(logits, targets))

    def compute_zloss(
        self,
        module,
        module_in,
        router_dict: dict,
    ):
        del module, module_in
        router_logits = router_dict["logits"]

        shape = router_logits.shape
        log_z = torch.logsumexp(router_logits, dim=-1)
        z_loss = log_z**2

        self.routing_losses.append(
            self.loss_coef
            * torch.sum(z_loss, dtype=torch.float32)
            / np.prod(shape[:-1])
        )

    def compute_balance_loss(
        self,
        module,
        module_in,
        router_dict: dict,
    ):
        if module.training:
            del module, module_in
            """Compute load balancing loss."""
            expert_mask = router_dict["expert_mask"]
            router_logits = router_dict["logits"]
            router_probs = F.softmax(router_logits, dim=-1)
            # router_probs = F.sigmoid(router_logits)
            num_experts = router_probs.shape[-1]
            router_prob_per_expert = torch.mean(
                router_probs, dtype=torch.float32, dim=-2
            )

            if expert_mask is not None:
                # portion of tokens assigned to each expert
                tokens_per_expert = torch.mean(
                    expert_mask, dtype=torch.float32, dim=-2
                )
                # compute load balancing loss
                self.routing_losses.append(
                    self.loss_coef
                    * torch.mean(
                        tokens_per_expert * router_prob_per_expert,
                        dtype=torch.float32,
                    )
                    * num_experts**2
                )
            else:
                self.routing_losses.append(
                    self.loss_coef
                    * torch.mean(
                        router_prob_per_expert, dtype=torch.float32
                    )
                    * num_experts**2
                )

    def compute_expert_ctr_loss(
        self,
        module,
        module_in,
        expert_dict: dict,
    ):
        if module.training and expert_dict["expert_feats"] is not None:
            del module
            del module_in
            device = expert_dict["expert_feats"].device
            # [B L E D]
            expert_feats = expert_dict["expert_feats"]
            expert_mask = expert_dict["expert_mask"]
            _, _, E, _ = expert_feats.shape
            # [B L E], get bool mask
            # top_k = int(
            #     torch.max(torch.sum(expert_mask, dim=-1, keepdim=True))
            # )
            # [B L E D] -> [B L top_k D]
            topk_feats = expert_feats[expert_mask.bool()]
            sim_logits = topk_feats @ topk_feats.t()

            exp_tgt = expert_mask.nonzero()[:, -1]
            exp_tgt = exp_tgt.unsqueeze(1)
            exp_tgt_one_hot = torch.zeros(len(exp_tgt), E).to(device)
            exp_tgt_one_hot.scatter_(1, exp_tgt, 1)
            labels = exp_tgt_one_hot @ exp_tgt_one_hot.t()

            mask = torch.eye(labels.shape[0], dtype=torch.bool).to(
                device
            )
            labels = labels[~mask].view(labels.shape[0], -1)
            sim_logits = (
                sim_logits[~mask].view(sim_logits.shape[0], -1) / 2
            )

            loss_fn = nn.BCEWithLogitsLoss()
            self.routing_losses.append(
                2.0 * self.loss_coef * loss_fn(sim_logits, labels)
            )

    def backward(self, *args):
        loss_total = torch.tensor(0.0).to(self.device)

        for argloss in args:
            assert isinstance(
                argloss, torch.Tensor
            ), "loss must be a tensor"
            assert argloss.grad_fn, "loss must can be backpropagated"
            loss_total += argloss

        loss_total += self.value
        loss_total.backward()
        self.reset_loss()

    @property
    def value(self) -> torch.Tensor:
        routing_loss = (
            torch.stack(self.routing_losses).mean()
            if self.routing_losses
            else torch.tensor(0.0).to(self.device)
        )
        additional_loss = (
            torch.stack(self.additional_losses).mean()
            if self.additional_losses
            else torch.tensor(0.0).to(self.device)
        )
        return routing_loss + additional_loss

    @property
    def route_value(self) -> torch.Tensor:
        routing_loss = (
            torch.stack(self.routing_losses).mean()
            if self.routing_losses
            else torch.tensor(0.0)
        )
        return routing_loss

    @property
    def additional_value(self) -> torch.Tensor:
        additional_loss = (
            torch.stack(self.additional_losses).mean()
            if self.additional_losses
            else torch.tensor(0.0)
        )
        return additional_loss


def _load_balancing_loss(
    router_probs: torch.Tensor,
    expert_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    """Compute load balancing loss.

    Args:
      router_probs: <float>[batch_size, length, num_experts] router
        probabilities.
      expert_mask: <float>[batch_size, length, num_experts] expert mask.

    Returns:
      Scalar load balancing loss.
    """
    num_experts = router_probs.shape[-1]

    router_prob_per_expert = torch.mean(
        router_probs, dtype=torch.float32, dim=-2
    )

    if expert_mask is not None:
        # portion of tokens assigned to each expert
        tokens_per_expert = torch.mean(
            expert_mask, dtype=torch.float32, dim=-2
        )
        # compute load balancing loss
        return (
            torch.mean(
                tokens_per_expert * router_prob_per_expert,
                dtype=torch.float32,
            )
            * num_experts**2
        )
    else:
        return (
            torch.mean(router_prob_per_expert, dtype=torch.float32)
            * num_experts**2
        )


def _router_z_loss(router_logits: torch.Tensor) -> torch.Tensor:
    """Compute router z-loss.

     The router z-loss was introduced in Designing Effective Sparse Expert Models
     (https://arxiv.org/abs/2202.08906). It encourages router logits to remain
     small in an effort to improve stability.

    Args:
      router_logits: <float>[batch_size, length, num_experts] router
        logits.

    Returns:
      Scalar router z-loss.
    """
    batch_size, length, _ = router_logits.shape
    log_z = torch.logsumexp(router_logits, dim=-1)
    z_loss = log_z**2
    return torch.sum(z_loss, dtype=torch.float32) / (
        batch_size * length
    )
