"""Router implementation."""

from typing import (
    Iterable,
    Optional,
    Tuple,
    Union,
    Callable,
)

import torch.nn as nn
import torch
from functools import partial
import torch.nn.functional as F

default_kernel_init = nn.init.kaiming_uniform_
# default_kernel_init = partial(nn.init.normal_, mean=0, std=2e-2)
default_bias_init = partial(nn.init.zeros_)


class RouterWeights(nn.Module):
    """Create router weights for the soft routor.

    Attributes:
        input_dim: The input dimension.
        num_experts: Number of experts.
        kernel_init: Initialization scheme for kernel.
        bias_init: Initialization scheme for bias.
        use_bias: Whether or not to use the bias term in computing the logits.
        dtype: Numerical float type for router logit computation.

        kernel: The kernel parameter, a linear transformation applied to the input.
        bias: The bias parameter, added to the output.

    Methods:
        _init: Initializes the router weights.
        forward: Applies RouterWeights module to input features.
    """

    def __init__(
        self,
        input_dim: int,
        num_experts: int,
        kernel_init: Callable = default_kernel_init,
        bias_init: Callable = default_bias_init,
        use_bias: bool = False,
        dtype: torch.dtype = torch.float32,
    ) -> None:
        super().__init__()
        self.use_bias = use_bias
        self.dtype = dtype
        self.kernel_init = kernel_init
        self.bias_init = bias_init
        self.num_experts = num_experts
        self.kernel, self.bias = self._init(input_dim, num_experts)
        self.register_hook()

    def _init(self, input_dim: int, num_experts: int):
        """Initializes the router weights."""
        kernel = nn.Parameter(torch.ones(input_dim, num_experts, dtype=self.dtype))
        self.kernel_init(kernel)

        bias = torch.Tensor()
        if self.use_bias:
            raise NotImplementedError("Bias not implemented.")
            bias = nn.Parameter(torch.empty(num_experts, dtype=self.dtype))
            self.bias_init(bias)

        return kernel, bias

    def forward(self, token_inputs: torch.Tensor) -> torch.Tensor:
        """Applies RouterWeights module to input features.

        Args:
            token_inputs: Flattened batch of tokens with shape
                <float>[batch size, nb_task, hidden_dim].

        Returns:
            Router logits with shape <float>[batch size, nb_task, num_experts/nb_task].
        """
        logits = torch.tensordot(
            token_inputs, self.kernel, dims=([-1], [0])  # type: ignore
        )
        if self.use_bias:
            logits += self.bias
        return logits

    def add_expert(self) -> None:
        """Add an expert to the router."""
        self.num_experts += 1
        self.kernel = nn.Parameter(
            torch.cat(
                [self.kernel, torch.mean(self.kernel, dim=-1, keepdim=True)],
                dim=-1,
            )
        )
        if self.use_bias:
            self.bias = nn.Parameter(
                torch.cat(
                    [self.bias, torch.mean(self.bias, dim=-1, keepdim=True)],
                    dim=-1,
                )
            )

    # register a hook to the kernel parameter to 10x the grad
    def register_hook(self):

        def hook(grad):
            return grad * 5

        self.kernel.register_hook(hook)


class ClsRouterWeights(nn.Module):
    """Create router weights for the soft routor.

    Attributes:
        input_dim: The input dimension.
        num_experts: Number of experts.
        kernel_init: Initialization scheme for kernel.
        bias_init: Initialization scheme for bias.
        use_bias: Whether or not to use the bias term in computing the logits.
        dtype: Numerical float type for router logit computation.
        axis: Axes along which to apply router prob computation.
            Defaults to final axis (typically the "hidden dimension").

        kernel: The kernel parameter, a linear transformation applied to the input.
        bias: The bias parameter, added to the output.

    Methods:
        _init: Initializes the router weights.
        forward: Applies RouterWeights module to input features.
    """

    def __init__(
        self,
        input_dim: int,
        num_experts: int,
        kernel_init: Callable = default_kernel_init,
        bias_init: Callable = default_bias_init,
        use_bias: bool = True,
        dtype: torch.dtype = torch.float32,
        axis: Union[Iterable[int], int] = -1,
    ) -> None:
        super().__init__()
        self.use_bias = use_bias
        self.dtype = dtype
        self.kernel_init = kernel_init
        self.bias_init = bias_init
        self.axis = axis
        self.num_experts = num_experts
        self._init(input_dim, num_experts)

    def _init(self, input_dim: int, num_experts: int) -> None:
        """Initializes the router weights."""
        self.kernel = nn.Parameter(torch.ones(input_dim, num_experts, dtype=self.dtype))
        self.kernel_init(self.kernel)
        if self.use_bias:
            self.bias = nn.Parameter(torch.empty(num_experts, dtype=self.dtype))
            self.bias_init(self.bias)

    def forward(self, token_inputs: torch.Tensor) -> torch.Tensor:
        """Applies RouterWeights module to input features.

        Args:
            token_inputs: Flattened batch of tokens with shape
                <float>[batch size, length, hidden_dim].

        Returns:
            Router logits with shape <float>[batch size, length, num_experts].
        """
        # [batch size, length, num_experts]
        logits = torch.tensordot(
            token_inputs, self.kernel, dims=([-1], [0])  # type: ignore
        )
        if self.use_bias:
            logits += self.bias
        return logits


class SoftRouter(nn.Module):
    """Router module converting token inputs to router logits.

    Attributes:
        router_weights: Configurable module used to compute router logits from token
            inputs.
        top_k: Number of top experts to select.
        jitter_noise: Amplitude of jitter noise applied to router logits.
        ignore_padding_tokens: Whether to ignore padding tokens during routing.
        dtype: Numeric float type for returned combine array. All actual
            computations are performed in float32 of the input for stability.

    Methods:
        forward: Applies Router module to input features.
        _compute_router_probs: Computes router probabilities.
        _top_k_mask: Computes top-k mask for router probs
    """

    def __init__(
        self,
        router_weights: Union[RouterWeights, ClsRouterWeights],
        top_k: Optional[int] = None,
        jitter_noise: float = 1e-3,
        ignore_padding_tokens: bool = False,
        dtype: torch.dtype = torch.float32,
    ) -> None:
        super().__init__()
        self.router_weights = router_weights
        self.num_experts = router_weights.num_experts

        self.top_k = top_k
        self.jitter_noise = jitter_noise
        self.ignore_padding_tokens = ignore_padding_tokens
        self.dtype = dtype

    def forward(
        self,
        token_inputs: torch.Tensor,
        apply_jitter: bool = True,
        r_logits: bool = True,
    ) -> dict:
        """Applies Router module to input features.

        Args:
            token_inputs: [batch, nb_task, hidden_dim] inputs to send to experts.

        Returns:
          - <float32>[batch, nb_task, num_experts] probabilities for
            each token and expert. Used for routing tokens to experts.
          - <float>[batch, nb_task, num_experts] raw router logits.
            Used for computing router z-loss.
        """
        token_inputs = token_inputs.clone()
        if self.ignore_padding_tokens:
            raise NotImplementedError("Padding token handling not implemented.")
        else:
            router_probs, router_logits, expert_mask = self._compute_router_probs(
                token_inputs, apply_jitter
            )

        o: dict = {
            "logits": torch.einsum("bse,bec->bsc", router_probs, token_inputs),
        }

        if r_logits:
            ex = {
                "r_probs": router_probs,
                "r_logits": router_logits,
                "expert_mask": expert_mask,
            }
        else:
            ex = {
                "r_probs": router_probs,
                "r_logits": None,
                "expert_mask": expert_mask,
            }
        o.update(ex)
        return o

    def _compute_router_probs(
        self, token_inputs: torch.Tensor, apply_jitter: bool = True
    ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
        """Computes router probabilities.

        Args:
            token_inputs: [batch, nb_task, hidden_dim] inputs to send to experts.
            apply_jitter: If true, apply jitter noise during routing.

        Returns:
          - <float32>[batch, nb_task, num_experts] probabilities for
            each token and expert. Used for routing tokens to experts.
          - <float>[batch, nb_task, num_experts] raw router logits.
            Used for computing router z-loss.
        """

        # # only apply jitter noise during training
        if apply_jitter and self.jitter_noise > 0 and self.training:
            # create jitter noise for token_inputs
            token_inputs *= torch.empty_like(token_inputs).uniform_(
                1.0 - self.jitter_noise, 1.0 + self.jitter_noise
            )

        router_logits = self.router_weights(token_inputs)
        # topk
        expert_mask = None
        if self.top_k is not None:
            expert_mask, _ = self._top_k_mask(router_logits, self.top_k)
            router_logits = router_logits * expert_mask
        # normalize
        if router_logits.size(-1) > 1:
            if self.training:
                router_probs = F.gumbel_softmax(router_logits, tau=1, dim=-1)
            else:
                router_probs = F.softmax(router_logits, dim=-1)
        else:
            raise NotImplementedError("Not implemented")
            # router_probs = F.softmax(router_logits, dim=-1)
            router_probs = F.sigmoid(torch.exp(router_logits / 768))
            # router_probs = torch.ones_like(router_logits)
        eye = torch.eye(router_probs.size(-1), device=router_probs.device)
        router_probs = 0.1 * router_probs + (1 - 0.1) * eye[None, ...].repeat(
            router_probs.size(0), 1, 1
        )
        return router_probs, router_logits, expert_mask

    def _top_k_mask(
        self, array: torch.Tensor, k: int
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """Computes top-k mask for router probs.

        Args:
            array: [batch, nb_task, num_experts] array to apply top-k mask.
            k: Number of experts to select.


        Returns:
            - <float32>[batch, nb_task, num_experts] mask for top-k experts.
            - <int64>[batch, nb_task, k] indices of top-k experts.
        """
        if k > array.shape[-1]:
            k = array.shape[-1]
        _, indices = torch.topk(array, k, dim=-1)
        mask = torch.zeros_like(array)
        mask.scatter_(-1, indices, 1)
        return mask, indices

    def update_weights(self, router_weights: RouterWeights) -> None:
        """Update router weights."""
        self.router_weights = router_weights
        self.num_experts = router_weights.num_experts
