import torch
import torch.nn as nn

class LayerNorm(nn.Module):
    """Construct a layernorm module (See citation for details)."""

    def __init__(self, features, eps=1e-6):
        super(LayerNorm, self).__init__()
        self.a_2 = nn.Parameter(torch.ones(features))
        self.b_2 = nn.Parameter(torch.zeros(features))
        self.eps = eps

    def forward(self, x):
        mean = x.mean(-1, keepdim=True)
        std = x.std(-1, keepdim=True)
        return self.a_2 * (x - mean) / (std + self.eps) + self.b_2


if __name__ == "__main__":
    print("=== LayerNorm 层归一化测试 ===")

    # 测试参数
    batch_size = 2
    seq_len = 5
    features = 512  # 特征维度

    # 创建LayerNorm层
    layer_norm = LayerNorm(features)

    # 创建测试数据
    x = torch.randn(batch_size, seq_len, features)
    print(f"输入张量shape: {x.shape}")  # [2, 5, 512]
    print(f"输入张量统计信息:")
    print(f"  均值: {x.mean().item():.4f}")
    print(f"  标准差: {x.std().item():.4f}")
    print(f"  最小值: {x.min().item():.4f}")
    print(f"  最大值: {x.max().item():.4f}")

    # 测试前向传播
    print(f"\n--- LayerNorm前向传播 ---")
    output = layer_norm(x)
    print(f"输出张量shape: {output.shape}")  # [2, 5, 512]
    print(f"输入输出shape相同: {output.shape == x.shape}")

    # 验证归一化效果
    print(f"\n--- 归一化效果验证 ---")
    print(f"输出张量统计信息:")
    print(f"  均值: {output.mean().item():.4f}")
    print(f"  标准差: {output.std().item():.4f}")
    print(f"  最小值: {output.min().item():.4f}")
    print(f"  最大值: {output.max().item():.4f}")

    # 验证每个位置的归一化（最后一维）
    print(f"\n--- 位置级归一化验证 ---")
    # 选择第一个样本的第一个位置进行验证
    sample_position = output[0, 0, :]  # [512]
    print(f"单个位置向量均值: {sample_position.mean().item():.6f} (应接近0)")
    print(f"单个位置向量标准差: {sample_position.std().item():.6f} (应接近1)")

    # 查看可学习参数
    print(f"\n--- 可学习参数 ---")
    print(f"缩放参数 a_2 shape: {layer_norm.a_2.shape}")  # [512]
    print(f"偏移参数 b_2 shape: {layer_norm.b_2.shape}")  # [512]
    print(f"a_2 初始值 (前5个): {layer_norm.a_2[:5]}")  # 应该都是1
    print(f"b_2 初始值 (前5个): {layer_norm.b_2[:5]}")  # 应该都是0
    print(f"eps 值: {layer_norm.eps}")

    # 手动验证计算过程
    print(f"\n--- 手动计算验证 ---")
    with torch.no_grad():
        # 计算均值和标准差
        mean = x.mean(-1, keepdim=True)
        std = x.std(-1, keepdim=True)

        print(f"计算得到的均值shape: {mean.shape}")  # [2, 5, 1]
        print(f"计算得到的标准差shape: {std.shape}")  # [2, 5, 1]

        # 手动归一化
        manual_output = layer_norm.a_2 * (x - mean) / (std + layer_norm.eps) + layer_norm.b_2

        # 验证是否与forward结果一致
        diff = torch.abs(output - manual_output).max()
        print(f"手动计算与forward结果最大差异: {diff.item():.8f}")

    # 与PyTorch内置LayerNorm对比
    print(f"\n--- 与PyTorch内置LayerNorm对比 ---")
    pytorch_layernorm = nn.LayerNorm(features)
    pytorch_output = pytorch_layernorm(x)

    print(f"PyTorch LayerNorm输出shape: {pytorch_output.shape}")
    print(f"自定义vs PyTorch 输出统计对比:")
    print(f"  自定义输出均值: {output.mean().item():.6f}")
    print(f"  PyTorch输出均值: {pytorch_output.mean().item():.6f}")
    print(f"  自定义输出标准差: {output.std().item():.6f}")
    print(f"  PyTorch输出标准差: {pytorch_output.std().item():.6f}")

    # 测试梯度计算
    print(f"\n--- 梯度计算测试 ---")
    loss = output.sum()
    loss.backward()
    print(f"a_2 参数梯度存在: {layer_norm.a_2.grad is not None}")
    print(f"b_2 参数梯度存在: {layer_norm.b_2.grad is not None}")

    if layer_norm.a_2.grad is not None:
        print(f"a_2 梯度 shape: {layer_norm.a_2.grad.shape}")
        print(f"b_2 梯度 shape: {layer_norm.b_2.grad.shape}")

    # 测试不同维度
    print(f"\n--- 不同维度测试 ---")
    # 测试3D张量
    x_3d = torch.randn(4, 8, features)
    output_3d = layer_norm(x_3d)
    print(f"3D输入: {x_3d.shape} → 输出: {output_3d.shape}")

    # 测试2D张量
    x_2d = torch.randn(10, features)
    output_2d = layer_norm(x_2d)
    print(f"2D输入: {x_2d.shape} → 输出: {output_2d.shape}")

    print(f"\n=== 测试完成 ===")