import torch
import torch.nn as nn


class MoV(nn.Module):
    """MLP MoV implementation

    Attributes:
      router: Router class
      hidden_dim: The hidden dimension of the linear layer.
      num_experts: Number of experts
      ia3_init: How to initialize the scaling variable.
      dtype: The dtype of the activations for this module.
    """

    def __init__(
        self,
        router,
        hidden_dim,
        num_experts=10,
        ia3_init=None,
        dtype=torch.float32,
    ):
        super(MoV, self).__init__()
        self.router = router
        self.num_experts = num_experts
        self.mov_init = (
            ia3_init if ia3_init is not None else nn.init.ones_
        )

        self.scaling = torch.nn.Parameter(
            torch.empty((self.num_experts, hidden_dim))
        )
        self.mov_init(self.scaling)
        self.dtype = dtype

    def forward(self, x):
        """MLP MoV implementation.
        Args:
            x: Flattened batch of tokens with shape
                <float>[batch size, length, hidden_dim].
        """
        *rest, hidden = x.shape
        # [batch, seq_len, num_experts]
        router_probs = self.router(x)
        # [1, 1, num_experts, hidden_dim]
        scaling = self.scaling.view((1,) * len(rest) + self.scaling.shape)
        # [batch, seq_len, hidden_dim]
        scaling = torch.einsum("...e,...ed->...d", router_probs, scaling)

        return x * scaling


class MoVAttention(nn.Module):
    """MoV implementation for the Attention class.

    Attributes:
      router: Router class
      heads: The number of attention heads.
      kv_hidden: The hidden dimension of the linear layer.
      num_experts: Number of experts
      ia3_init: How to initialize the scaling variable.
      dtype: The dtype of the activations for this module.
    """

    def __init__(
        self,
        router,
        heads,
        kv_hidden,
        num_experts=10,
        ia3_init=None,
        dtype=torch.float32,
    ):
        super(MoVAttention, self).__init__()
        self.router = router
        self.num_experts = num_experts
        self.mova_init = (
            ia3_init if ia3_init is not None else nn.init.ones_
        )

        self.scaling = torch.nn.Parameter(
            torch.empty((heads, self.num_experts, kv_hidden))
        )
        self.mova_init(self.scaling)
        self.dtype = dtype

    def forward(self, x):
        *rest, heads, kv = x.shape
        # [batch, seq_len, heads, num_experts]
        router_probs = self.router(x)

        # [1, 1, heads, num_experts, kv_hidden]
        scaling = self.scaling.view((1,) * len(rest) + self.scaling.shape)

        # [batch, seq_len, heads, kv_hidden]
        scaling = torch.einsum("...e,...ed->...d", router_probs, scaling)

        return x * scaling
