# models/layers.py
import torch
from torch import nn
import torch.nn.functional as F

class RMSNorm(nn.Module):
    """Root Mean Square Layer Normalization"""
    def __init__(self, dim: int, eps: float = 1e-6):
        super().__init__()
        # 可训练的缩放参数gamma
        self.gamma = nn.Parameter(torch.ones(dim)) # [D,]
        self.eps = eps # 避免分母为0

    def forward(self, x: torch.Tensor) -> torch.Tensor: # x -> [B,T,D]
        # torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) [B,T,1]
        # keepdim=True保持了D之前维度不变
        # self.gamma [D,]
        return self.gamma * x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) # [B,T,D]

class EfficientMoELayer(nn.Module):
    """高效 MoE 实现，显存友好，计算高效"""
    def __init__(self,
                 d_model: int,
                 num_experts: int,
                 num_experts_per_tok: int,
                 hidden_size: int
                 ):
        super().__init__()
        self.d_model = d_model
        self.num_experts = num_experts
        self.num_experts_per_tok = num_experts_per_tok
        self.hidden_size = hidden_size

        # 路由器
        self.router = nn.Linear(d_model, num_experts, bias=False)

        # 专家参数：拆分为独立的 Linear 层（便于按需加载）
        # 注意：这里使用 nn.ModuleList，而不是 nn.Parameter
        self.gate_proj = nn.ModuleList([
            nn.Linear(d_model, hidden_size, bias=False) for _ in range(num_experts)
        ])
        self.up_proj = nn.ModuleList([
            nn.Linear(d_model, hidden_size, bias=False) for _ in range(num_experts)
        ])
        self.down_proj = nn.ModuleList([
            nn.Linear(hidden_size, d_model, bias=False) for _ in range(num_experts)
        ])

        self.act = nn.SiLU()

        # 初始化
        for expert in self.gate_proj:
            nn.init.normal_(expert.weight, std=0.02)
        for expert in self.up_proj:
            nn.init.normal_(expert.weight, std=0.02)
        for expert in self.down_proj:
            nn.init.normal_(expert.weight, std=0.02)

    def forward(self, x: torch.Tensor) -> torch.Tensor: # (B,T,D)
        B, T, D = x.shape
        x_flat = x.view(-1, D)  # (B*T, D)
        logits = self.router(x_flat)  # (B*T, num_experts)

        # 找路由最大的num_experts_per_tok个专家对应的 路由值和位置
        topk_weights, topk_indices = torch.topk(logits, self.num_experts_per_tok, dim=-1)
        # (B*T,num_experts_per_tok)

        # 每个 token 路由到对应专家的权重
        topk_weights = F.softmax(topk_weights, dim=-1)  # 更合理的权重（原用 sigmoid 不归一化）
        # (B*T,num_experts_per_tok)

        # 初始化输出
        out = torch.zeros_like(x_flat)  # (B*T, D)

        # 遍历每个被选中的专家，从最权威的开始
        for i in range(self.num_experts_per_tok):
            expert_weights = topk_weights[:, i]  # (B*T,)
            expert_idx = topk_indices[:, i]       # (B*T,)

            # 获取该 token 被路由到该专家的 mask
            for e in range(self.num_experts):
                mask = (expert_idx == e) # (B*T,)
                if mask.any(): # 如果至少有一个元素是True
                    # 只对被路由到专家 e 的 token 计算
                    inputs = x_flat[mask]  # (M, D) M是被路由到该专家e的token个数

                    # MoE FFN: down_proj(act(gate_proj(x)) * up_proj(x))
                    gate = self.gate_proj[e](inputs)  # (M, H)
                    up = self.up_proj[e](inputs)      # (M, H)
                    fused = self.act(gate) * up       # (M, H)
                    expert_out = self.down_proj[e](fused)  # (M, D)

                    # 加权累加
                    out[mask] += expert_out * expert_weights[mask].unsqueeze(-1)
                    # out[mask] -> (M, D)
                    # expert_weights[mask].unsqueeze(-1) -> (M, 1)
        return out.view(B, T, D)

class MoELayer(nn.Module):
    """Mixture of Experts Layer"""
    def __init__(self,
                 d_model: int,
                 num_experts: int,
                 num_experts_per_tok: int,
                 hidden_size: int
                 ):
        super().__init__()

        self.num_experts = num_experts
        self.num_experts_per_tok = num_experts_per_tok

        # 路由router：计算输入token与每个专家的关联
        self.router = nn.Linear(d_model, num_experts, bias=False)

        # 这是所有专家的“上投影矩阵池” (num_experts, D, 2H)
        # 每个专家有自己的 gate_up_proj[i] 参数 (D, 2H)
        # 维度 2*H 是为了后续拆分成 gate 和 up
        self.gate_up_proj = nn.Parameter(torch.empty(num_experts, d_model, 2 * hidden_size))
        nn.init.normal_(self.gate_up_proj, mean=0.0, std=0.02)

        # 每个专家有自己的 down_proj[i] 参数 (H, D)
        self.down_proj = nn.Parameter(torch.empty(num_experts, hidden_size, d_model))
        nn.init.normal_(self.down_proj, mean=0.0, std=0.02)

        # 激活函数SiLU
        self.activation = nn.SiLU()

    def forward(self, x: torch.Tensor) -> torch.Tensor: # (B,T,D)
        B, T, D = x.size()

        # 路由router：计算每个token与每个专家的匹配度
        logits = self.router(x) # (B,T,num_experts)

        # 找匹配度最大的num_experts_per_tok个专家对应的 匹配度和位置
        topk_logits, topk_indices = torch.topk(logits, self.num_experts_per_tok, dim=-1)
        # topk_logits -> (B,T,num_experts_per_tok) 最大的位置的匹配度值 从大到小排
        # topk_indices -> (B,T,num_experts_per_tok) 最大的位置的对应索引

        # 计算每个 token 路由到对应专家的权重
        routing_weights = torch.sigmoid(topk_logits).view(-1)# (B*T*num_experts_per_tok,)

        # 获取被选中的专家编号
        selected_experts = topk_indices.view(-1)  # (B*T*num_experts_per_tok,)
        # 获取对应专家的参数矩阵
        expert_proj = self.gate_up_proj[selected_experts] # (B*T*num_experts_per_tok,D,2H)
        down_proj = self.down_proj[selected_experts] # (B*T*num_experts_per_tok,H,D)

        # 展平输入 token
        x_flat = x.view(B * T, D)  # (B*T,D)

        # 构建一个(B*T*num_experts_per_tok)长度的索引
        token_idx = torch.arange(B * T, device=x.device).repeat_interleave(self.num_experts_per_tok)
        # torch.arange(B * T) → [0, 1, 2, ..., B*T-1]，每个数字代表一个 token 的 flat 索引
        # .repeat_interleave(k) → 每个 token 在内部重复 k 次
        # token_idx = [0,0, 1,1, 2,2, 3,3, 4,4, 5,5] -> (B*T*num_experts_per_tok,)

        # 按照索引取数，每个token会连续取k遍。分别送往它被路由到的 k 个专家
        expert_inputs = x_flat[token_idx] # (B*T*num_experts_per_tok , D)

        # 计算 gate_up 投影（FFN 第一层）
        gate_up = torch.bmm(expert_inputs.unsqueeze(1), expert_proj).squeeze(1)
        # expert_inputs.unsqueeze(1) -> (B*T*num_experts_per_tok, 1, D)
        # bmm( [B*T*k, 1, D], [B*T*k, D, 2*H] ) 矩阵乘法：(1×D) @ (D×2H) → (1×2H) 批量执行 B*T*k 次
        # .squeeze(1) -> (B*T*k, 2*H)

        # 输出维度 2*H 是为了拆分成 gate 和 up
        gate, up = gate_up.chunk(2, dim=-1) # (B*T*k, H)

        # gate 控制“哪些信息应该通过” , up 是“要传递的信息”
        act = self.activation(gate) * up # 逐元素相乘 (B*T*k, H)

        # 计算 down_proj 输出（FFN 第二层）
        out = torch.bmm(act.unsqueeze(1), down_proj).squeeze(1) * routing_weights.unsqueeze(-1)
        # act.unsqueeze(1) -> (B*T*k,1,H)
        # bmm( [B*T*k, 1, H], [B*T*k, H, D] ) → [B*T*k, 1, D]
        # .squeeze(1) → [B*T*k, D]
        # routing_weights.unsqueeze(-1) → [B*T*k, 1]
        # 广播乘法：每个专家输出乘以其路由权重
        # out -> (B*T*k, D)
        # 我们希望将out的第一维按照每k项加权合并，得到(B*T, D)

        # 初始化聚合容器
        combined = torch.zeros(B * T, D,device=x.device,dtype=x.dtype) # (B * T, D)
        combined_weight = torch.zeros(B * T, 1,device=x.device) # (B * T, 1)

        # self.scatter_add_(dim, index, src) 把src中的值，根据 index 指定的位置，加到 self 上
        combined.scatter_add_(0, token_idx.unsqueeze(-1).expand(-1, D), out)
        # dim=0：在第 0 维（token 维度）上操作
        # out -> (B*T*k, D)
        # token_idx.unsqueeze(-1).expand(-1, D) -> (B*T*k, D)

        # 例如：token_idx = [0,0,1,1] （k=2）
        # out = [o00, o01, o10, o11]
        # combined[0] += o00 + o01；combined[1] += o10 + o11

        # 输出combined (B * T, D)

        # 聚合路由权重（用于归一化）
        combined_weight.scatter_add_(0, token_idx.unsqueeze(-1), routing_weights.unsqueeze(-1))
        # token_idx.unsqueeze(-1): [B*T*k, 1]
        # routing_weights.unsqueeze(-1): [B*T*k, 1]
        # combined_weight: [B * T, 1]  # 每个 token 的总路由权重

        combined = combined / (combined_weight + 1e-6) # [B*T, D]

        return combined.view(B, T, D)

class SharedExpert(nn.Module):
    """Shared Expert FFN"""
    def __init__(self, d_model: int, hidden_size: int):
        super().__init__()

        # gate：对输入进行加权
        self.gate = nn.Linear(d_model, hidden_size, bias=False)
        # up：将输入投影到隐藏空间
        self.up = nn.Linear(d_model, hidden_size, bias=False)
        # down：将隐藏空间的输出映射回模型维度
        self.down = nn.Linear(hidden_size, d_model, bias=False)
        # SiLU激活函数
        self.activation = nn.SiLU()

    def forward(self, x: torch.Tensor) -> torch.Tensor: # [B,T,D]

        # gated计算：用 gate 激活决定加权，up 将其映射到隐藏空间

        # torch.sigmoid(self.gate(x)) -> [B,T,HIDDEN]
        # self.activation(self.up(x)) -> [B,T,HIDDEN]
        gated = torch.sigmoid(self.gate(x)) * self.activation(self.up(x))

        # down 投影回原始维度
        return self.down(gated) # [B,T,D]

class AttentionBlock(nn.Module):
    """Multi-Head Attention with RoPE"""
    def __init__(self,
                 d_model: int,
                 n_heads: int,
                 block_size: int,
                 rope_theta: float
                 ):
        super().__init__()
        self.d_model = d_model

        # 审核头数是否可用
        assert d_model % n_heads == 0, f"d_model ({d_model}) must be divisible by n_heads ({n_heads})"
        self.n_heads = n_heads

        # 审核单头维度是否偶数可用
        assert (d_model // n_heads) % 2 == 0, f"d_k (d_model // n_heads = {self.d_k}) must be even for RoPE"
        self.d_k = d_model // n_heads

        self.block_size = block_size
        self.rope_theta = rope_theta

        # 一次线性映射搞定Q、K、V
        self.qkv = nn.Linear(d_model, 3 * d_model, bias=False)

        # 输出投影
        self.out_proj = nn.Linear(d_model, d_model, bias=False)

        # RoPE
        # 位置张量 # [0,1,2,3,...,127] (T)
        pos = torch.arange(self.block_size).float()

        # 频率倒数张量 inverse frequency基频倒数 d_k=32, inv_freq -> (16) 即 (d_k//2)
        inv_freq = 1.0/(self.rope_theta**(torch.arange(0,self.d_k, 2).float()/self.d_k))

        # 组合位置张量和频率倒数张量相乘 得到各个位置各个频率的旋转角度
        freqs = torch.einsum('i,j->ij', pos, inv_freq)
        # 等价于 freqs[i, j] = pos[i] * inv_freq[j]
        # freqs形状[T,d_k//2]

        self.register_buffer( # 注册为 buffer 意味着它不是模型参数，但会随模型一起保存/加载。
            "freqs_cis",
            # torch.polar(abs, angle) 构造复数：abs * exp(i * angle)
            # torch.ones_like(freqs)构造和freqs形状相同的 1 张量。然后按照freqs旋转
            torch.polar(torch.ones_like(freqs), freqs).unsqueeze(0).unsqueeze(2)
            # .unsqueeze(0).unsqueeze(2) 添加 batch 和 head 维度
            # 得[1, T, 1, d_k // 2]，为了后续能和 (B, T, H, d_k) 的 Q/K 广播对齐
            # 对任何batch任何head应用相同的旋转；旋转仅仅和位置T和维度一半d_k // 2有关
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor: # [B,T,D]
        B, T, D = x.size()

        # 线性映射并拆分得到Q、K、V
        qkv= self.qkv(x).view(B, T, self.n_heads, 3, self.d_k) # qkv:(B, T, H, 3, Dk)
        q, k, v = qkv.unbind(dim=3) # q, k, v: (B, T, H, Dk)

        # 应用 RoPE 旋转位置编码
        def apply_rope(tensor): # (B, T, H, Dk)
            t = tensor.view(B, T, self.n_heads, self.d_k // 2, 2) # (B,T,H,Dk//2,2)

            # torch.view_as_complex(t) 将最后的2维解释为一个复数 (B,T,H,Dk//2)
            # (real, imag) → real + i·imag
            # (B,T,H,dk//2) * (1, T, 1, dk//2) 后者广播为 (B,T,H,dk//2)实现对复数的旋转
            t = torch.view_as_complex(t) * self.freqs_cis[:,:T]

            # 将旋转后的复数转回实数(B,T,H,dk//2,2)，并且将第4维及之后合并得(B,T,H,dk)
            return torch.view_as_real(t).flatten(3)

        q, k = apply_rope(q), apply_rope(k) # (B, T, H, Dk)

        # 转置 为 (B, H, T, Dk) 以便运算 (view做不到)
        q = q.transpose(1, 2)  # -> (B, H, T, Dk)
        k = k.transpose(1, 2)
        v = v.transpose(1, 2)

        # 使用 PyTorch 内置的优化注意力（自动使用 FlashAttention 风格内核）
        out = F.scaled_dot_product_attention(
            q, k, v,
            dropout_p=0.0,
            attn_mask=None, # padding mask 无
            is_causal=True  # causal mask 自动创建因果掩码，无需手动mask_fill
        ) # (B, H, T, Dk)

        # 恢复形状，拼接多头
        out = out.transpose(1, 2).contiguous().view(B, T, D) # (B, T, D)

        return self.out_proj(out) # (B, T, D)