import torch
import torch.nn as nn
import torch.nn.functional as F


class ExpertMLP(nn.Module):
    def __init__(self, model_dim: int, hidden_dim: int):
        super().__init__()
        self.fc1 = nn.Linear(model_dim, hidden_dim)
        self.act = nn.GELU()
        self.fc2 = nn.Linear(hidden_dim, model_dim)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.fc2(self.act(self.fc1(x)))


class MoEFeedForward(nn.Module):
    def __init__(
        self,
        model_dim: int,
        num_experts: int,
        hidden_dim: int = None,
        top_k: int = 1,
    ):
        super().__init__()
        assert top_k in (1, 2)
        self.model_dim = model_dim
        self.num_experts = num_experts
        self.top_k = top_k
        hidden_dim = hidden_dim or (4 * model_dim)

        self.experts = nn.ModuleList([ExpertMLP(model_dim, hidden_dim) for _ in range(num_experts)])
        self.gate = nn.Linear(model_dim, num_experts, bias=False)
        self.aux_loss = torch.tensor(0.0)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        B, S, H = x.shape
        logits = self.gate(x)
        probs = F.softmax(logits, dim=-1)

        if self.top_k == 1:
            return self._forward_top1(x, probs)
        else:
            return self._forward_top2(x, probs)

    def _forward_top1(self, x: torch.Tensor, probs: torch.Tensor) -> torch.Tensor:
        B, S, H = x.shape
        N = B * S
        x_flat = x.reshape(N, H)
        probs_flat = probs.reshape(N, self.num_experts)
        e_idx = torch.argmax(probs_flat, dim=-1)

        y_flat = torch.zeros_like(x_flat)
        device = x.device

        counts = torch.zeros(self.num_experts, device=device)
        for e in range(self.num_experts):
            mask = (e_idx == e)
            if mask.any():
                x_e = x_flat[mask]
                y_e = self.experts[e](x_e)
                y_flat[mask] = y_e
                counts[e] = mask.sum()

        p_mean = probs_flat.mean(dim=0)
        f = counts / max(float(N), 1.0)
        self.aux_loss = self.num_experts * torch.sum(p_mean * f)

        return y_flat.reshape(B, S, H)

    def _forward_top2(self, x: torch.Tensor, probs: torch.Tensor) -> torch.Tensor:
        B, S, H = x.shape
        N = B * S
        x_flat = x.reshape(N, H)
        probs_flat = probs.reshape(N, self.num_experts)
        top_vals, top_idx = torch.topk(probs_flat, k=2, dim=-1)

        w1 = top_vals[:, 0]
        w2 = top_vals[:, 1]
        e1 = top_idx[:, 0]
        e2 = top_idx[:, 1]

        y1 = torch.zeros_like(x_flat)
        y2 = torch.zeros_like(x_flat)
        device = x.device

        counts = torch.zeros(self.num_experts, device=device)
        for e in range(self.num_experts):
            mask1 = (e1 == e)
            if mask1.any():
                x_e1 = x_flat[mask1]
                y_e1 = self.experts[e](x_e1)
                y1[mask1] = y_e1
                counts[e] += mask1.sum()

            mask2 = (e2 == e)
            if mask2.any():
                x_e2 = x_flat[mask2]
                y_e2 = self.experts[e](x_e2)
                y2[mask2] = y_e2
                counts[e] += mask2.sum()

        y_flat = w1.unsqueeze(-1) * y1 + w2.unsqueeze(-1) * y2

        p_mean = probs_flat.mean(dim=0)
        f = counts / max(float(N), 1.0)
        self.aux_loss = self.num_experts * torch.sum(p_mean * f)

        return y_flat.reshape(B, S, H)


class TransformerBlockMoE(nn.Module):
    def __init__(
        self,
        model_dim: int,
        num_heads: int,
        num_experts: int,
        top_k: int = 1,
        ff_hidden_dim: int = None,
        dropout: float = 0.0,
    ):
        super().__init__()
        self.attn = nn.MultiheadAttention(model_dim, num_heads, dropout=dropout, batch_first=True)
        self.ln1 = nn.LayerNorm(model_dim)
        self.ln2 = nn.LayerNorm(model_dim)
        self.moe = MoEFeedForward(model_dim, num_experts, hidden_dim=ff_hidden_dim, top_k=top_k)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x: torch.Tensor, attn_mask: torch.Tensor = None) -> tuple[torch.Tensor, torch.Tensor]:
        h = self.ln1(x)
        a, _ = self.attn(h, h, h, attn_mask=attn_mask)
        x = x + self.dropout(a)
        h = self.ln2(x)
        y = self.moe(h)
        x = x + self.dropout(y)
        return x, self.moe.aux_loss


if __name__ == "__main__":
    torch.manual_seed(0)
    B, S, H = 2, 6, 16
    x = torch.randn(B, S, H)

    block_top1 = TransformerBlockMoE(model_dim=H, num_heads=4, num_experts=4, top_k=1)
    y1, aux1 = block_top1(x)
    print("top-1", y1.shape, float(aux1))

    block_top2 = TransformerBlockMoE(model_dim=H, num_heads=4, num_experts=4, top_k=2)
    y2, aux2 = block_top2(x)
    print("top-2", y2.shape, float(aux2))

