# models/llama_moe.py
import torch
from torch import nn
from torch.utils.checkpoint import checkpoint
from .layers import RMSNorm, SharedExpert, AttentionBlock,EfficientMoELayer

class LLaMA4MoEBlock(nn.Module):
    """Single Transformer Block with MoE"""
    def __init__(self,
                 d_model: int,
                 n_heads: int,
                 block_size: int,
                 rope_theta: float,
                 num_experts: int,
                 experts_per_tok: int,
                 expert_hidden_size: int,
                 shared_hidden_size: int,
                 ):
        super().__init__()
        self.norm1 = RMSNorm(d_model)
        self.norm2 = RMSNorm(d_model)
        self.attn = AttentionBlock(d_model, n_heads, block_size, rope_theta)
        self.moe = EfficientMoELayer(d_model, num_experts, experts_per_tok, expert_hidden_size)
        self.shared = SharedExpert(d_model, shared_hidden_size)

    def forward(self, x: torch.Tensor) -> torch.Tensor: # [B,T,D]
        x = x + self.attn(self.norm1(x)) # [B,T,D]
        norm_x = self.norm2(x)           # [B,T,D]
        moe_out = self.moe(norm_x)       # [B,T,D]
        shared_out = self.shared(norm_x) # [B,T,D]
        return x + moe_out + shared_out  # [B,T,D]

class LLaMA4MoEModel(nn.Module):
    """Complete LLaMA4-MoE Language Model"""
    def __init__(self,
                 vocab_size: int,           # 词表大小
                 d_model: int,              # 隐藏维度大小
                 n_layers: int,             # transformer层数
                 n_heads: int,              # 注意力头数
                 block_size: int,           # 序列长度
                 rope_theta: float,         # RoPE旋转
                 num_experts: int,          # moe专家个数
                 experts_per_tok: int,      # 每个token交给几个专家处理
                 expert_hidden_size: int,   # moe专家隐藏层大小
                 shared_hidden_size: int    # 共享专家隐藏层大小
                 ):
        super().__init__()
        # 词嵌入，被特殊设计的层，和linear不同，输入维度不要求是vocab_size，使用标量token_IDs代替，自动查表
        self.embed = nn.Embedding(vocab_size, d_model) # 矩阵形状[vocab_size,d_model]

        # 多层 Transformer + MoE
        self.blocks = nn.ModuleList(
            [LLaMA4MoEBlock(d_model,
                            n_heads,
                            block_size,
                            rope_theta,
                            num_experts,
                            experts_per_tok,
                            expert_hidden_size,
                            shared_hidden_size
                            )
                for _ in range(n_layers)]
        )

        # 最终归一化
        self.norm_final = RMSNorm(d_model)

        # 输出层
        self.output = nn.Linear(d_model, vocab_size, bias=False) # [vocab_size,d_model]
        self.output.weight = self.embed.weight  # 参数矩阵形状一样，共享权重，减少计算

    def forward(self, x: torch.Tensor) -> torch.Tensor: # [B,T]
        x = self.embed(x)  # 传入[B,T]被当作[B,T,vocab_size]处理得到[B, T, D]
        for block in self.blocks:
            # x = block(x)
            x = checkpoint(block, x, use_reentrant=False)
            # 梯度检查点技术，只保存输入，不保存中间结果，在n_layers > 8 时使用，可节省大量显存
        x = self.norm_final(x)  # [B, T, D]
        logits = self.output(x)  # [B, T, vocab_size]
        return logits