import torch
from torch import nn
from copy import deepcopy


def my_ln(input: torch.Tensor, weight=None, bia=None):
    batchs = input.size(0)
    num = input.size(1) * input.size(2) * input.size(3)
    output = deepcopy(input)
    # c个均值 c个标准差 c维度gamma c维theta  前两个统计得到,后两个学习得到
    exs = []
    dxs = []
    gamma = torch.ones(size=(batchs,))
    theta = torch.zeros(size=(batchs,))
    if weight:
        gamma = weight
    if bia:
        theta = bia
    # 按照batch遍历计算
    for batch in range(batchs):
        ex = input[batch,:, :, :].sum() / num
        exs.append(ex)
        dx = torch.pow(torch.pow(input[batch,:, :, :] - ex, 2).sum() / num, 0.5)
        dxs.append(dx)
        output[batch,:, :, :] = (output[batch,:, :, :] - ex) / (dx + 0.00001)  # 0.00001 for zero denominator
        output[batch,:, :, :] = gamma[batch] * output[batch,:, :, :] + theta[batch]
    return output


if __name__ == '__main__':
    ln = nn.LayerNorm((3, 5, 5))
    input = torch.randn(size=(2, 3, 5, 5))
    output = ln(input)
    output_myln = my_ln(input)

    print('input:\n{}'.format(input))
    print("bn weight:\n{}\nbn bias:\n{}".format(ln.weight, ln.bias))
    print("bn output:\n{}".format(output))
    print("bn output_myln:\n{}".format(output_myln))

    pass
