import torch
import torch.nn as nn
from src.models.routing.soft_router import SoftRouter


class LoRaMLP(nn.Module):
    """
    Standard LoRa
    """

    def __init__(
        self,
        hidden_dim,
        rank=8,
        output_dim=None,
        lora_init_A=nn.init.kaiming_uniform_,
        lora_init_B=nn.init.zeros_,
        dtype=torch.float32,
    ):
        super(LoRaMLP, self).__init__()
        self.rank = rank
        self.lora_init_A = lora_init_A
        self.lora_init_B = lora_init_B
        self.dtype = dtype
        self.output_dim = output_dim

        self.lora_a = nn.Parameter(
            torch.empty((hidden_dim, rank), dtype=dtype)
        )
        self.lora_init_A(self.lora_a)
        self.lora_b = nn.Parameter(
            torch.empty(
                (rank, output_dim or hidden_dim),
                dtype=dtype,
            )
        )
        self.lora_init_B(self.lora_b)

    def forward(self, x):
        """
        Args:
            x: input tensor [batch, seq_len, hidden_dim]

        Returns:
            LoRA output tensor [batch, seq_len, hidden_dim]
        """
        # [batch, seq_len, hidden_dim]
        ax = torch.einsum("bsd,dr->bsr", x, self.lora_a)
        # [batch, seq_len, hidden_dim]
        bax = torch.einsum("bsr,rd->bsd", ax, self.lora_b)
        expert_dict = {
            "o": bax,
            "expert_feats": None,
            "expert_mask": None,
        }
        return expert_dict


class MoLoRaMLP(nn.Module):
    """MoLoRa implementation for MLP layer
    x: [batch, seq_len, hidden_dim] ->
    mola(x) -> ax: [batch, seq_len, num_experts, rank]
    molb(ax) -> bax [batch, seq_len, num_experts, output_dim]
    router(x) -> router_probs [batch, seq_len, num_experts]
    torch.einsum(router_probs, bax) -> ebax [batch, seq_len, hidden_dim]

    Attributes:
      router: Router class
      rank: LoRA rank
      alpha = LoRA aplha
      lora_init_A: LoRA A initializer
      lora_init_B: LoRA B initializer
      lora_axis_names_A: Sharding axis names for LoRA A
      lora_axis_names_B: Sharding axis names for LoRA B
      num_experts: Number of expert
      dtype: Activation dtype
      output_dim: LoRA output dimensions
    """

    def __init__(
        self,
        router: SoftRouter,
        hidden_dim,
        rank=8,
        lora_init_A=nn.init.kaiming_uniform_,
        lora_init_B=nn.init.zeros_,
        num_experts=10,
        dtype=torch.float32,
        output_dim=None,
        alpha=16,
    ):
        super(MoLoRaMLP, self).__init__()
        self.router = router
        self.rank = rank
        self.lora_init_A = lora_init_A
        self.lora_init_B = lora_init_B
        self.num_experts = num_experts
        self.dtype = dtype
        self.output_dim = output_dim
        self.alpha = alpha

        self.molora_a = nn.Parameter(
            torch.empty((num_experts, hidden_dim, rank), dtype=dtype)
        )
        self.molora_b = nn.Parameter(
            torch.empty(
                (num_experts, rank, output_dim or hidden_dim),
                dtype=dtype,
            )
        )

        self.lora_init_A(self.molora_a)
        self.lora_init_B(self.molora_b)

    def forward(self, x):
        """
        Args:
            x: input tensor [batch, seq_len, hidden_dim]

        Returns:
            LoRA output tensor [batch, seq_len, hidden_dim]
        """

        # # [batch, seq_len, num_experts]
        router_dict = self.router(x, self.num_experts)
        router_probs = router_dict["probs"]

        # [batch, seq_len, num_experts, rank]
        ax = torch.einsum("bsd,edr->bser", x, self.molora_a)
        # [batch, seq_len, num_experts, output_dim]
        bax = torch.einsum("bser,erd->bsed", ax, self.molora_b)

        # [batch, seq_len, hidden_dim]
        ebax = torch.einsum("...e,...ed->...d", router_probs, bax)
        expert_dict = {
            "o": ebax,
            "expert_feats": bax,
            "expert_mask": router_dict["expert_mask"],
        }
        return expert_dict


class MoLoRaAttention(nn.Module):
    """MoLoRa implementation for Attention class
    x: [batch, seq_len, hidden_dim] ->
    mola(x) -> ax: [batch, seq_len, num_experts, rank]
    molb(ax) -> bax [batch, seq_len, num_experts, output_dim]
    router(x) -> router_probs [batch, seq_len, num_experts]
    torch.einsum(router_probs, bax) -> ebax [batch, seq_len, hidden_dim]
    ebax.view([batch, seq_len, num_heads, head_dim])


    Attributes:
      router: Router class
      rank: LoRA rank
      alpha = LoRA aplha
      lora_init_A: LoRA A initializer
      lora_init_B: LoRA B initializer
      lora_axis_names_A: Sharding axis names for LoRA A
      lora_axis_names_B: Sharding axis names for LoRA B
      num_experts: Number of expert
      dtype: Activation dtype
      output_dim: LoRA output dimensions
      num_heads: Number of heads
    """

    def __init__(
        self,
        router,
        hidden_dim,
        rank=8,
        output_dim=None,
        num_experts=10,
        num_heads=12,
        alpha=16,
        lora_init_A=nn.init.normal_,
        lora_init_B=nn.init.zeros_,
        dtype=torch.float32,
    ):
        super(MoLoRaAttention, self).__init__()
        self.router = router
        self.rank = rank
        self.lora_init_A = lora_init_A
        self.lora_init_B = lora_init_B
        self.num_experts = num_experts
        self.num_heads = num_heads
        self.dtype = dtype
        self.output_dim = output_dim
        self.alpha = alpha

        self.molora_a = nn.Parameter(
            torch.empty((num_experts, hidden_dim, rank), dtype=dtype)
        )
        self.lora_init_A(self.molora_a)

        self.molora_b = nn.Parameter(
            torch.empty(
                (num_experts, rank, output_dim or hidden_dim),
                dtype=dtype,
            )
        )
        self.lora_init_B(self.molora_b)

    def forward(self, x):
        # [batch, seq_len, hidden_dim]
        *rest, hidden = x.shape

        # [batch, seq_len, num_experts]
        router_probs = self.router(x, self.num_experts)

        # [batch, seq_len, num_experts, rank]
        ax = torch.einsum("bsd,edr->bser", x, self.molora_a)

        # [batch, seq_len, num_experts, rank]
        bax = torch.einsum("bser,erd->bsed", ax, self.molora_b)

        # [batch, seq_len, hidden_dim]
        ebax = torch.einsum("...e,...ed->...d", router_probs, bax)

        # LoRA scaling
        ebax = ebax * (self.alpha / self.rank)

        # Reshape to [batch, seq_len, num_heads, head_dim]
        ebax = ebax.view(
            *rest, self.num_heads, hidden // self.num_heads
        )

        return ebax
