import torch
import torch.nn as nn

# LlaMA2默认使用SiLU激活函数
ACT2FN = {
    "silu": nn.SiLU(),
}

class LlaMAMLP(nn.Module):
    def __init__(
        self,
        hidden_size: int,
        intermediate_size: int,
        hidden_act: str,
    ):
        super().__init__()
        self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
        self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
        self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
        self.act_fn = ACT2FN[hidden_act]

    def forward(self, x):
        return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))


if __name__ == "__main__":
    print("=== LlaMA MLP 测试 ===")

    # 测试参数
    batch_size = 2
    seq_len = 8
    hidden_size = 4096      # LlaMA的隐藏层维度
    intermediate_size = 11008  # LlaMA的中间层维度
    hidden_act = "silu"     # LlaMA使用SiLU激活函数

    # 创建输入数据
    x = torch.randn(batch_size, seq_len, hidden_size)
    print(f"输入shape: {x.shape}")

    # 创建LlaMA MLP
    llama_mlp = LlaMAMLP(
        hidden_size=hidden_size,
        intermediate_size=intermediate_size,
        hidden_act=hidden_act
    )

    print(f"\n=== 模型结构 ===")
    print(f"gate_proj: {hidden_size} → {intermediate_size}")
    print(f"up_proj: {hidden_size} → {intermediate_size}")
    print(f"down_proj: {intermediate_size} → {hidden_size}")
    print(f"激活函数: {hidden_act}")

    # 前向传播
    output = llama_mlp(x)
    print(f"\n=== 前向传播 ===")
    print(f"输出shape: {output.shape}")
    print(f"输入输出shape一致: {output.shape == x.shape}")

    # 参数量分析
    total_params = sum(p.numel() for p in llama_mlp.parameters())
    print(f"\n=== 参数分析 ===")
    print(f"总参数量: {total_params:,}")
    print(f"gate_proj参数: {llama_mlp.gate_proj.weight.numel():,}")
    print(f"up_proj参数: {llama_mlp.up_proj.weight.numel():,}")
    print(f"down_proj参数: {llama_mlp.down_proj.weight.numel():,}")

    # 验证门控机制
    print(f"\n=== 门控机制验证 ===")
    with torch.no_grad():
        gate_output = llama_mlp.gate_proj(x)  # [batch, seq, intermediate]
        up_output = llama_mlp.up_proj(x)      # [batch, seq, intermediate]
        activated_gate = llama_mlp.act_fn(gate_output)  # SiLU激活
        gated = activated_gate * up_output     # 门控相乘
        final_output = llama_mlp.down_proj(gated)  # 投影回原维度

        # 验证与forward结果一致
        manual_match = torch.allclose(output, final_output, atol=1e-6)
        print(f"手动计算与forward一致: {manual_match}")

    # LlaMA2配置验证
    print(f"\n=== LlaMA2配置验证 ===")
    print(f"激活函数: {hidden_act} (LlaMA2默认)")
    print(f"偏置项: False (LlaMA2不使用偏置)")
    print(f"中间层比例: {intermediate_size/hidden_size:.2f}x (LlaMA2标准比例)")

    # 梯度测试
    print(f"\n=== 梯度测试 ===")
    loss = output.sum()
    loss.backward()
    print(f"gate_proj梯度: {llama_mlp.gate_proj.weight.grad is not None}")
    print(f"up_proj梯度: {llama_mlp.up_proj.weight.grad is not None}")
    print(f"down_proj梯度: {llama_mlp.down_proj.weight.grad is not None}")

    # 与标准FFN对比
    print(f"\n=== 与标准FFN对比 ===")
    # 标准FFN只有两个线性层
    standard_ffn_params = hidden_size * intermediate_size * 2
    llama_mlp_params = total_params

    print(f"标准FFN参数量: {standard_ffn_params:,}")
    print(f"LlaMA MLP参数量: {llama_mlp_params:,}")
    print(f"参数量比例: {llama_mlp_params / standard_ffn_params:.2f}x")

    print(f"\n=== 测试完成 ===")
    print(f"LlaMA MLP特点:")
    print(f"- 使用门控机制 (gate projection)")
    print(f"- SiLU激活函数")
    print(f"- 无偏置项 (bias=False)")
    print(f"- 参数量比标准FFN多50%，但性能更好")