"""
@Author  : 吕申凯
@Time    : 2022/9/18 19:48
@File    : Layer_normalization.py
@Function: 
"""

import torch
import torch.nn as nn
from tools.common_tools import set_seed

if __name__ == '__main__':
    set_seed(1)  # 设置随机种子

    batch_size = 8
    num_features = 6  # 特征个数

    features_shape = (3, 4)  # 每个特征的形状
    # 生成特征
    feature_map = torch.ones(features_shape)  # 2D
    # 生成数据样本
    feature_maps = torch.stack([feature_map * (i + 1) for i in range(num_features)], dim=0)  # 3D
    # 生成mini_batch
    # feature_maps_bs是一个[8, 6, 3, 4]的4维向量, B * C * H * W
    feature_maps_bs = torch.stack([feature_maps for i in range(batch_size)], dim=0)  # 4D

    # 以数据为单位进行归一化，即对每个数据中多个特征的所有值求取均值和方差
    ln = nn.LayerNorm(feature_maps_bs.size()[1:], elementwise_affine=True)
    # ln = nn.LayerNorm(feature_maps_bs.size()[1:], elementwise_affine=False)
    # [6, 3, 4]\[3, 4]这个数据必须是feature_maps_bs的后面的维度，[8, 6, 3, 4]
    # 如果是[6,3]是不允许的
    # ln = nn.LayerNorm([6, 3, 4])
    # 以特征为单位进行归一化，即对每个特征中的数据求取均值和方差
    # ln = nn.LayerNorm([3, 4])

    output = ln(feature_maps_bs)

    print("Layer Normalization")
    print(ln.weight.shape)
    print(feature_maps_bs[0, ...])
    print(output[0, ...])
