"""Router implementation."""

from typing import (
    Iterable,
    Optional,
    Tuple,
    Union,
    Callable,
)

import torch.nn as nn
import torch
from functools import partial
import torch.nn.functional as F

default_kernel_init = nn.init.kaiming_uniform_
# default_kernel_init = partial(nn.init.normal_, mean=0, std=2e-2)
default_bias_init = partial(nn.init.zeros_)


class RouterWeights(nn.Module):
    """Create router weights for the soft routor.

    Attributes:
        input_dim: The input dimension.
        num_experts: Number of experts.
        kernel_init: Initialization scheme for kernel.
        bias_init: Initialization scheme for bias.
        use_bias: Whether or not to use the bias term in computing the logits.
        dtype: Numerical float type for router logit computation.
        axis: Axes along which to apply router prob computation.
            Defaults to final axis (typically the "hidden dimension").

        kernel: The kernel parameter, a linear transformation applied to the input.
        bias: The bias parameter, added to the output.

    Methods:
        _init: Initializes the router weights.
        forward: Applies RouterWeights module to input features.
    """

    def __init__(
        self,
        input_dim: int,
        num_experts: int = 10,
        kernel_init: Callable = default_kernel_init,
        bias_init: Callable = default_bias_init,
        use_bias: bool = True,
        dtype: torch.dtype = torch.float32,
        axis: Union[Iterable[int], int] = -1,
    ) -> None:
        super().__init__()
        self.use_bias = use_bias
        self.dtype = dtype
        self.kernel_init = kernel_init
        self.bias_init = bias_init
        self.axis = axis
        self.num_experts = num_experts
        self._init(input_dim, num_experts)

    def _init(self, input_dim: int, num_experts: int) -> None:
        """Initializes the router weights."""
        self.kernel = nn.Parameter(
            torch.ones(input_dim, num_experts, dtype=self.dtype)
        )
        self.kernel_init(self.kernel)
        if self.use_bias:
            self.bias = nn.Parameter(
                torch.empty(num_experts, dtype=self.dtype)
            )
            self.bias_init(self.bias)

    def forward(self, token_inputs: torch.Tensor) -> torch.Tensor:
        """Applies RouterWeights module to input features.

        Args:
            token_inputs: Flattened batch of tokens with shape
                <float>[batch size, length, hidden_dim].

        Returns:
            Router logits with shape <float>[batch size, length, num_experts].
        """
        logits = torch.tensordot(
            token_inputs, self.kernel, dims=([-1], [0])  # type: ignore
        )
        if self.use_bias:
            logits += self.bias
        return logits


class ClsRouterWeights(nn.Module):
    """Create router weights for the soft routor.

    Attributes:
        input_dim: The input dimension.
        num_experts: Number of experts.
        kernel_init: Initialization scheme for kernel.
        bias_init: Initialization scheme for bias.
        use_bias: Whether or not to use the bias term in computing the logits.
        dtype: Numerical float type for router logit computation.
        axis: Axes along which to apply router prob computation.
            Defaults to final axis (typically the "hidden dimension").

        kernel: The kernel parameter, a linear transformation applied to the input.
        bias: The bias parameter, added to the output.

    Methods:
        _init: Initializes the router weights.
        forward: Applies RouterWeights module to input features.
    """

    def __init__(
        self,
        input_dim: int,
        num_experts: int = 10,
        kernel_init: Callable = default_kernel_init,
        bias_init: Callable = default_bias_init,
        use_bias: bool = True,
        dtype: torch.dtype = torch.float32,
        axis: Union[Iterable[int], int] = -1,
    ) -> None:
        super().__init__()
        self.use_bias = use_bias
        self.dtype = dtype
        self.kernel_init = kernel_init
        self.bias_init = bias_init
        self.axis = axis
        self.num_experts = num_experts
        self._init(input_dim, num_experts)

    def _init(self, input_dim: int, num_experts: int) -> None:
        """Initializes the router weights."""
        self.kernel = nn.Parameter(
            torch.ones(input_dim, num_experts, dtype=self.dtype)
        )
        self.kernel_init(self.kernel)
        if self.use_bias:
            self.bias = nn.Parameter(
                torch.empty(num_experts, dtype=self.dtype)
            )
            self.bias_init(self.bias)

    def forward(self, token_inputs: torch.Tensor) -> torch.Tensor:
        """Applies RouterWeights module to input features.

        Args:
            token_inputs: Flattened batch of tokens with shape
                <float>[batch size, length, hidden_dim].

        Returns:
            Router logits with shape <float>[batch size, length, num_experts].
        """
        # [batch size, 1, num_experts]
        logits = torch.tensordot(
            token_inputs[:, :1, :], self.kernel, dims=([-1], [0])  # type: ignore
        )
        if self.use_bias:
            logits += self.bias
        logits = logits.repeat(1, token_inputs.shape[1], 1)
        return logits


class SoftRouter(nn.Module):
    """Router module converting token inputs to router logits.

    Attributes:
        num_experts: Number of experts.
        router_weights: Configurable module used to compute router logits from token
            inputs.
        jitter_noise: Amplitude of jitter noise applied to router logits.
        top_k: Number of top experts to select.
        load_balancing_loss: Whether to apply load balancing loss.
        ignore_padding_tokens: Whether to ignore padding tokens during routing.
        dtype: Numeric float type for returned combine array. All actual
            computations are performed in float32 of the input for stability.

    Methods:
        forward: Applies Router module to input features.
        _compute_router_probs: Computes router probabilities.
        _top_k_mask: Computes top-k mask for router probs
    """

    def __init__(
        self,
        router_weights: Union[RouterWeights, ClsRouterWeights],
        input_dim: int,
        num_experts: int = 10,
        top_k: Optional[int] = None,
        jitter_noise: float = 1e-1,
        ignore_padding_tokens: bool = False,
        dtype: torch.dtype = torch.float32,
    ) -> None:
        super().__init__()
        self.router_weights = router_weights
        self.input_dim = input_dim

        self.num_experts = num_experts
        self.top_k = top_k
        self.jitter_noise = jitter_noise
        self.ignore_padding_tokens = ignore_padding_tokens
        self.dtype = dtype

    def _compute_router_probs(
        self, token_inputs: torch.Tensor, apply_jitter: bool = True
    ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
        """Computes router probabilities.

        Args:
            token_inputs: [batch, seq_len, hidden_dim] inputs to send to experts.
            apply_jitter: If true, apply jitter noise during routing.

        Returns:
          - <float32>[batch, seq_len, num_experts] probabilities for
            each token and expert. Used for routing tokens to experts.
          - <float>[batch, seq_len, num_experts] raw router logits.
            Used for computing router z-loss.
        """

        # # only apply jitter noise during training
        if apply_jitter and self.jitter_noise > 0 and self.training:
            # create jitter noise for token_inputs
            token_inputs *= torch.empty_like(token_inputs).uniform_(
                1.0 - self.jitter_noise, 1.0 + self.jitter_noise
            )

        router_logits = self.router_weights(token_inputs)
        if self.training:
            router_probs = F.gumbel_softmax(
                router_logits, tau=1, dim=-1
            )
        else:
            router_probs = F.softmax(router_logits, dim=-1)
        expert_mask = None
        if self.top_k is not None:
            expert_mask, _ = self._top_k_mask(
                router_probs, self.top_k
            )
            router_probs = F.softmax(
                router_probs * expert_mask, dim=-1
            )

        return router_probs, router_logits, expert_mask

    def _top_k_mask(
        self, array: torch.Tensor, k: int
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """Computes top-k mask for router probs.

        Args:
            array: [batch, seq_len, num_experts] array to apply top-k mask.
            k: Number of experts to select.


        Returns:
            - <float32>[batch, seq_len, num_experts] mask for top-k experts.
            - <int64>[batch, seq_len, k] indices of top-k experts.
        """
        if k > array.shape[-1]:
            k = array.shape[-1]
        _, indices = torch.topk(array, k, dim=-1)
        mask = torch.zeros_like(array)
        mask.scatter_(-1, indices, 1)
        return mask, indices

    def forward(
        self,
        token_inputs: torch.Tensor,
        apply_jitter: bool = True,
        logits: bool = True,
    ) -> dict:
        """Applies Router module to input features.

        Args:
            token_inputs: [batch, seq_len, hidden_dim] inputs to send to experts.

        Returns:
          - <float32>[batch, seq_len, num_experts] probabilities for
            each token and expert. Used for routing tokens to experts.
          - <float>[batch, seq_len, num_experts] raw router logits.
            Used for computing router z-loss.
        """
        token_inputs = token_inputs.clone()
        if self.ignore_padding_tokens:
            raise NotImplementedError(
                "Padding token handling not implemented."
            )
        else:
            router_probs, router_logits, expert_mask = (
                self._compute_router_probs(token_inputs, apply_jitter)
            )

        if logits:
            return {
                "probs": router_probs,
                "logits": router_logits,
                "expert_mask": expert_mask,
            }
        else:
            return {
                "probs": router_probs,
                "logits": None,
                "expert_mask": expert_mask,
            }
