import torch
import torch.nn as nn

class RMSNorm(nn.Module):
    def __init__(self, hidden_size, eps=1e-6):
        """
        RMSNorm is equivalent to T5LayerNorm
        """
        super().__init__()
        self.weight = nn.Parameter(torch.ones(hidden_size))
        self.variance_epsilon = eps

    def forward(self, hidden_states):
        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
        hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)

        # convert into half-precision if necessary
        if self.weight.dtype in [torch.float16, torch.bfloat16]:
            hidden_states = hidden_states.to(self.weight.dtype)

        return self.weight * hidden_states


if __name__ == "__main__":
    print("=== RMSNorm 测试 ===")

    # 测试参数
    batch_size = 2
    seq_len = 8
    hidden_size = 512
    eps = 1e-6

    # 创建测试数据
    hidden_states = torch.randn(batch_size, seq_len, hidden_size)
    print(f"输入shape: {hidden_states.shape}")
    print(f"输入统计: 均值={hidden_states.mean().item():.4f}, 标准差={hidden_states.std().item():.4f}")

    # 创建RMSNorm层
    rmsnorm = RMSNorm(hidden_size, eps=eps)
    print(f"\nRMSNorm配置:")
    print(f"  hidden_size: {hidden_size}")
    print(f"  eps: {eps}")
    print(f"  weight shape: {rmsnorm.weight.shape}")

    # 前向传播
    output = rmsnorm(hidden_states)
    print(f"\n前向传播结果:")
    print(f"  输出shape: {output.shape}")
    print(f"  输入输出shape一致: {output.shape == hidden_states.shape}")
    print(f"  输出统计: 均值={output.mean().item():.4f}, 标准差={output.std().item():.4f}")

    # 验证RMS归一化效果
    print(f"\n=== RMS归一化验证 ===")
    # 计算RMS (Root Mean Square)
    rms_input = torch.sqrt(hidden_states.pow(2).mean(-1, keepdim=True))
    rms_output = torch.sqrt(output.pow(2).mean(-1, keepdim=True))

    print(f"输入RMS范围: [{rms_input.min().item():.4f}, {rms_input.max().item():.4f}]")
    print(f"输出RMS范围: [{rms_output.min().item():.4f}, {rms_output.max().item():.4f}]")
    print(f"输出RMS接近1: {torch.allclose(rms_output, torch.ones_like(rms_output), atol=1e-3)}")

    # 手动验证计算过程
    print(f"\n=== 手动计算验证 ===")
    with torch.no_grad():
        # 1. 计算方差 (平方的均值)
        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
        print(f"方差shape: {variance.shape}")

        # 2. 归一化
        normalized = hidden_states * torch.rsqrt(variance + eps)

        # 3. 应用权重
        manual_output = rmsnorm.weight * normalized

        # 验证一致性
        is_close = torch.allclose(output, manual_output, atol=1e-6)
        print(f"手动计算与forward一致: {is_close}")

    # 与LayerNorm对比
    print(f"\n=== 与LayerNorm对比 ===")
    layernorm = nn.LayerNorm(hidden_size, eps=eps)
    layernorm_output = layernorm(hidden_states)

    print(f"RMSNorm输出均值: {output.mean().item():.6f}")
    print(f"LayerNorm输出均值: {layernorm_output.mean().item():.6f}")
    print(f"RMSNorm输出标准差: {output.std().item():.6f}")
    print(f"LayerNorm输出标准差: {layernorm_output.std().item():.6f}")

    # 参数量对比
    rmsnorm_params = sum(p.numel() for p in rmsnorm.parameters())
    layernorm_params = sum(p.numel() for p in layernorm.parameters())
    print(f"RMSNorm参数量: {rmsnorm_params}")
    print(f"LayerNorm参数量: {layernorm_params}")
    print(f"参数量比例: {rmsnorm_params/layernorm_params:.2f}")

    # 测试不同数据类型
    print(f"\n=== 数据类型测试 ===")

    # 测试float16
    rmsnorm_fp16 = RMSNorm(hidden_size, eps=eps)
    rmsnorm_fp16.weight.data = rmsnorm_fp16.weight.data.to(torch.float16)

    input_fp16 = hidden_states.to(torch.float16)
    output_fp16 = rmsnorm_fp16(input_fp16)
    print(f"Float16输入: {input_fp16.dtype}, 输出: {output_fp16.dtype}")

    # 梯度测试
    print(f"\n=== 梯度测试 ===")
    hidden_states.requires_grad_(True)
    loss = rmsnorm(hidden_states).sum()
    loss.backward()
    print(f"输入梯度存在: {hidden_states.grad is not None}")
    print(f"权重梯度存在: {rmsnorm.weight.grad is not None}")
    print(f"权重梯度shape: {rmsnorm.weight.grad.shape}")

    # 测试不同维度
    print(f"\n=== 不同维度测试 ===")
    test_dims = [128, 256, 1024, 4096]
    for dim in test_dims:
        test_rmsnorm = RMSNorm(dim)
        test_input = torch.randn(1, 5, dim)
        test_output = test_rmsnorm(test_input)
        print(f"维度{dim}: {test_input.shape} → {test_output.shape}")

    print(f"\n=== 测试完成 ===")
    print(f"RMSNorm特点:")
    print(f"- 只有缩放参数，没有偏移参数，参数量减半")
    print(f"- 使用RMS (Root Mean Square) 进行归一化")
    print(f"- 计算更简单，不需要计算均值")
    print(f"- 在LLaMA等现代模型中广泛使用")