import torch
import torch.nn as nn


class IA3(nn.Module):
    """IA3 scaling to use with a linear layer.

    Note:
      This module is used as the intermediate_conv module in the flaxformer
      MlpBlock. The MlpBlock only applies this intermediate conv to one of the
      parallel activation functions it uses, but because these parallel
      activations are combined with multiplication, IA3 applies an multiplicative
      scaling, and multiplication is associative we can apply the scaling to just
      that activation and get the same result as if we applied it afterwards.

    Attributes:
      hidden_dim: The hidden dimension of the linear layer.
      init: How to initialize the scaling variable. Defaults to ones.
      dtype: The dtype of the activations for this module.
    """

    def __init__(
        self, hidden_dim, ia3_init=None, dtype=torch.float32
    ):
        super(IA3, self).__init__()
        self.ia3_init = (
            ia3_init if ia3_init is not None else nn.init.ones_
        )
        self.scaling = torch.nn.Parameter(torch.ones((hidden_dim,)))
        self.ia3_init(self.scaling)
        self.dtype = dtype

    def forward(self, x):
        *rest, hidden = x.shape
        # Reshape to broadcast over batch, seq, etc.
        scaling = self.scaling.view(
            (1,) * len(rest) + self.scaling.shape
        )
        return x * scaling


class IA3Attention(nn.Module):
    """A version of IA3 scaling to use with the Attention class.

    Note:
      Because of where we can hook into the flaxformer attention class (the
      `(k|v)_conv` module) the input to this function is already reshaped into
      [..., length, heads, kv] so we shape our scaling to match those last two
      dimensions. This will result in the same value as if we were to reshape
      the variable and do a single d_model scale.
      TODO: Rewrite as a single class that infers the number of dims
      to extract from the input to use to shape the param from the number of dims
      in the axis names.

    Attributes:
      init: How to initialize the scaling variable.
      dtype: The dtype of the activations for this module.
    """

    def __init__(self, heads, head_dim, ia3_init=None, dtype=torch.float32):
        super(IA3Attention, self).__init__()
        self.ia3_init = (
            ia3_init if ia3_init is not None else nn.init.ones_
        )
        self.scaling = torch.nn.Parameter(torch.ones((heads, head_dim)))
        self.ia3_init(self.scaling)
        self.dtype = dtype

    def forward(self, x):
        *rest, heads, kv = x.shape
        # Reshape to broadcast over batch, seq, etc.
        scaling = self.scaling.view((1,) * len(rest) + self.scaling.shape)
        return x * scaling
