import math
from dataclasses import dataclass
from typing import Tuple, Optional, Literal
import copy

import torch
import torch.nn as nn
import torch.nn.functional as F

from args import ModelArgs


class MoEGate(nn.Module):
    """
    Gating mechanism for routing inputs in a mixture-of-experts (MoE) model.

    Attributes:
        dim (int): Dimensionality of input features.
        topk (int): Number of top experts activated for each input.
        n_groups (int): Number of groups for routing.
        topk_groups (int): Number of groups to route inputs to.
        score_func (str): Scoring function ('softmax' or 'sigmoid').
        route_scale (float): Scaling factor for routing weights.
        weight (torch.nn.Parameter): Learnable weights for the gate.
        bias (Optional[torch.nn.Parameter]): Optional bias term for the gate.
    """
    def __init__(self, args: ModelArgs):
        super().__init__()
        self.dim = args.dim
        self.topk = args.n_activated_experts
        self.n_groups = args.n_expert_groups
        self.topk_groups = args.n_limited_groups
        self.score_func = args.score_func
        self.route_scale = args.route_scale
        self.weight = nn.Linear(args.dim, args.n_routed_experts)

    def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        # (batch * seq_len, dim) => (batch * seq_len, n_routed_experts)
        scores = self.weight(x) 
        scores = F.softmax(scores, dim=-1)

        # (batch * seq_len, topk)
        weights, indices = torch.topk(scores, self.topk, dim=-1)
        return weights, indices
    

class MoEExpert(nn.Module):
    def __init__(self, dim: int, inter_dim: int):
        super().__init__()
        self.input_to_inter1 = nn.Linear(dim, inter_dim)
        self.input_to_inter2 = nn.Linear(dim, inter_dim)
        self.inter_to_output = nn.Linear(inter_dim, dim)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        ii1 = self.input_to_inter1(x)
        ii2 = self.input_to_inter2(x)
        inter = F.silu(ii1) * ii2
        return self.inter_to_output(inter)
    

class MoE(nn.Module):
    def __init__(self, args: ModelArgs):
        super().__init__()
        self.dim = args.dim
        self.n_routed_experts = args.n_routed_experts
        self.n_activated_experts = args.n_activated_experts
        self.gate = MoEGate(args)
        self.experts = nn.ModuleList([
            MoEExpert(args.dim, args.moe_inter_dim) for _ in range(self.n_routed_experts)
        ])
        self.shared_experts = MoEExpert(args.dim, args.n_shared_experts * args.moe_inter_dim)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        shape = x.size() # (batch, seq_len, dim)
        # 展平成二维，方便处理
        x = x.view((-1, self.dim)) # (batch * seq_len, dim)

        # 对每个 token，找出 topk 个专家 (batch * seq_len, topk)
        weights, indices = self.gate(x)
        # 统计每个专家被路由了多少次
        counts = torch.bincount(
            indices.flatten(),
            minlength=self.n_routed_experts
        ).tolist()

        # 对第 i 个专家，找到被分配给它的所有 token，然后单独处理
        y = torch.zeros_like(x)
        for i in range(self.n_routed_experts):
            if counts[i] == 0:
                continue
            expert = self.experts[i]
            # Get index in `indices`, which element value == i
            idx, top = torch.where(indices == i)
            # x[idx] 取出这个输入这个专家的 x 子集
            # weights[idx, top, None] 取出每个 token 对这个 expert 的分配权重
            y[idx] += expert(x[idx]) * weights[idx, top, None]
        
        z = self.shared_experts(x)
        
        return (y + z).view(shape)


if __name__ == '__main__':
    args = ModelArgs()
    moe = MoE(args)

    x = torch.randn((1, 12, args.dim))    

    y = moe(x)

    print(y)
    print(x.shape)
    print(y.shape)