"""Router implementation."""

from typing import (
    Any,
    Iterable,
    Optional,
    Sequence,
    Tuple,
    Union,
    Callable,
)

import torch.nn as nn
import torch
from functools import partial
import torch.nn.functional as F

RouterOutput = Any
default_kernel_init = partial(nn.init.normal_, mean=0, std=2e-2)
default_bias_init = partial(nn.init.zeros_)


class RouterWeights(nn.Module):
    """Router module converting token inputs to router logits.

    Attributes:
      use_bias: Whether or not to use the bias term in computing the logits.
      dtype: Numerical float type for router logit computation.
      kernel_init: Initialization scheme for kernel.
      bias_init: Initialization scheme for bias.
      precision: XLA precision for array computations.
      axis: Axes along which to apply the dense router weights transformation.
        Defaults to final axis (typically the "hidden dimension").
      kernel_axis_names: Logical axis names to use for kernel sharding.
      reshape_kernel: Whether to reshape the kernel parameter to 2D for Adafactor.
    """

    def __init__(
        self,
        use_bias: bool = True,
        dtype: torch.dtype = torch.float16,
        kernel_init: Callable = default_kernel_init,
        bias_init: Callable = default_bias_init,
        axis: Union[Iterable[int], int] = -1,
    ):
        self.use_bias = use_bias
        self.dtype = dtype
        self.kernel_init = kernel_init
        self.bias_init = bias_init
        self.axis = axis

    def forward(
        self, token_inputs: torch.Tensor, num_experts: int
    ) -> torch.Tensor:
        """Applies RouterWeights module.

        Args:
          token_inputs: Flattened batch of tokens with shape <float>[num_groups,
            group_size, hidden_dim].
          num_experts: Number of experts.

        Returns:
          Router logits with shape <float>[num_groups, group_size, num_experts].
        """
        batch_size, _, hidden_dim = token_inputs.shape
        dense_layer = nn.Linear(
            in_features=hidden_dim,  # Parameter set dynamically based on input size
            out_features=num_experts,  # Parameter set dynamically based on num_experts
            bias=self.use_bias,
        )
        self.kernel_init(dense_layer.weight)
        if self.use_bias:
            self.bias_init(dense_layer.weightbias)

        token_inputs = token_inputs.view(-1, hidden_dim)
        output = dense_layer(token_inputs)
        output = output.view(batch_size, -1, num_experts)
        return output


class Router(nn.Module):
    """Abstract base router class, defining router API and inner workings.

    Attributes:
      router_weights: Configurable module used to compute router logits from token
        inputs.
      jitter_noise: Amplitude of jitter noise applied to router logits.
      ignore_padding_tokens: Whether to ignore padding tokens during routing. Note
        that some routers (e.g. TokensChooseMaskedRouter) will completely ignore
        padding tokens, while others (e.g. TokensChooseScatterRouter and
        ExpertsChooseMaskedRouter) will simply down-weight the probability of
        selecting padding tokens.
    """

    def __init__(
        self,
        num_experts: int,
        router_weights: RouterWeights,
        jitter_noise: float,
        ignore_padding_tokens: bool = True,
        input_axis_names: Sequence[str] = ("batch", "length", "mlp"),
        top_k: int = 2,
        load_balancing_loss: bool = False,
    ):
        super().__init__()
        self.router_weights = router_weights
        self.jitter_noise = jitter_noise
        self.ignore_padding_tokens = ignore_padding_tokens
        self.input_axis_names = input_axis_names
        self.top_k = top_k
        self.load_balancing_loss = load_balancing_loss

    def forward(
        self,
        token_inputs: torch.Tensor,
        num_experts: int,
        apply_jitter: bool = True,
    ) -> torch.Tensor:
        """Computes dispatch and combine arrays for routing to experts.

        Args:
          token_inputs: [batch, seq_len, hidden_dim] inputs to send to experts.
          num_experts: Number of experts.
          apply_jitter: If true, apply jitter noise during routing.

        Returns:
          Router indices or mask arrays (depending on router type).
        """
        token_inputs = self._with_sharding_constraint(token_inputs)

        router_probs, router_logits = (
            self._compute_router_probabilities(
                token_inputs, num_experts, apply_jitter
            )
        )

        if self.ignore_padding_tokens:
            padding_mask = (
                torch.sum(torch.abs(token_inputs), dim=-1) > 0
            )
            router_logits *= padding_mask.unsqueeze(-1)
        else:
            padding_mask = None

        return router_probs

    def _with_sharding_constraint(self, tensor):
        # Placeholder function for partitioning mechanism, not needed in PyTorch
        return tensor

    def _compute_router_probabilities(
        self,
        token_inputs: torch.Tensor,
        num_experts: int,
        apply_jitter: bool,
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """Computes router probabilities from input tokens.

        Args:
          token_inputs: [batch, seq_len, hidden_dim] from which router probabilities
            are computed.
          num_experts: Number of experts.
          apply_jitter: If true, apply jitter noise.

        Returns:
          - [batch, seq_len, num_experts] probabilities for each token and expert.
            Used for routing tokens to experts.
          - [batch, seq_len, num_experts] raw router logits. Used for computing
            router z-loss.
        """
        if apply_jitter and self.jitter_noise > 0:
            token_inputs *= torch.empty_like(token_inputs).uniform_(
                1.0 - self.jitter_noise, 1.0 + self.jitter_noise
            )

        router_logits = self.router_weights(token_inputs, num_experts)

        router_probabilities = F.softmax(router_logits, dim=-1)

        if self.top_k is not None:
            topk_mask, _ = self._top_k_mask(
                router_probabilities, self.top_k
            )

        return (
            router_probabilities * topk_mask
            if self.top_k is not None
            else router_probabilities
        ), router_logits

    def _top_k_mask(
        self, array: torch.Tensor, k: int
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        _, indices = torch.topk(array, k, dim=-1)
        mask = torch.zeros_like(array)
        mask.scatter_(-1, indices, 1)
        return mask, indices

    def _compute_routing_instructions(
        self,
        router_probs: torch.Tensor,
        padding_mask: Optional[torch.Tensor],
        expert_capacity: int,
    ) -> torch.Tensor:
        """Computes instructions for routing inputs to experts."""
        raise NotImplementedError(
            "Router is an abstract class that should be subclassed."
        )


def _load_balancing_loss(
    router_probs: torch.Tensor,
    expert_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    """Compute load balancing loss."""
    num_experts = router_probs.shape[-1]

    router_prob_per_expert = router_probs.mean(dim=-2)

    if expert_mask is not None:
        tokens_per_expert = expert_mask.mean(dim=-2)
        return (
            tokens_per_expert * router_prob_per_expert
        ).mean() * num_experts**2
    else:
        return router_prob_per_expert.mean() * num_experts**2


def _router_z_loss(router_logits: torch.Tensor) -> torch.Tensor:
    """Compute router z-loss."""
    num_groups, tokens_per_group, _ = router_logits.shape
    log_z = torch.logsumexp(router_logits, dim=-1)
    z_loss = log_z**2
    return z_loss.sum() / (num_groups * tokens_per_group)


def _favor_one_hot_slices() -> bool:
    """Returns true iff running on TPUs."""
    return False  # Placeholder for TPU specific check, not needed in PyTorch


def _take_along_axis(
    array: torch.Tensor, indices: torch.Tensor, axis: int
) -> torch.Tensor:
    """Takes values from the input array by matching 1D index and data slices."""
    if axis != -1 and axis != array.ndim - 1:
        raise ValueError("Only last dimension slicing is supported.")
    return torch.gather(array, axis, indices)
