import torch
import torch.nn as nn

# 因为只是在 -1 维度 做 layer norm ，所以维度应该是不会变化的
# input  : (batch_size, seq_len, feature)
# output : (batch_size, seq_len, feature)

class LayerNormLayer(nn.Module):
    def __init__(self, features, eps=1e-6):
        super(LayerNormLayer, self).__init__()
        # 引入 a2 和 b2 对最终的结果进行偏移和缩放（仿射变换）
        self.a_2 = nn.Parameter(torch.ones(features))
        self.b_2 = nn.Parameter(torch.zeros(features))
        self.eps = eps

    def forward(self, x):
        mean = x.mean(-1, keepdim=True)
        std = x.std(-1, keepdim=True)
        return self.a_2 * (x - mean) / (std + self.eps) + self.b_2


if __name__ == '__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    # 假设输入(batch_size=2, seq_len=3, features=4)
    x = torch.tensor([
        [[1.0, 2.0, 3.0, 4.0],
         [2.0, 3.0, 4.0, 5.0],
         [3.0, 4.0, 5.0, 6.0]],

        [[4.0, 3.0, 2.0, 1.0],
         [5.0, 4.0, 3.0, 2.0],
         [6.0, 5.0, 4.0, 3.0]]
    ]).to(device)

    layer_norm = LayerNormLayer(features=4).to(device)
    layer_norm.eval()  # 切换到 eval 模式

    # 手动指定 a2 和 b2 的大小
    with torch.no_grad():
        layer_norm.a_2.fill_(2.0)  # 方差
        layer_norm.b_2.fill_(0.5)  # 均值

    output = layer_norm(x)

    print("Input shape:", x.shape)
    print("Output shape:", output.shape)

    print(output)

    print("Output mean:", output.mean(-1))
    print("Output std:", output.std(-1))
