import torch
import torch.nn as nn
import numpy as np

# Define the layer normalization module
class LayerNorm(nn.Module):
    def __init__(self, num_features, eps=1e-5):
        super(LayerNorm, self).__init__()
        self.gamma = nn.Parameter(torch.ones(num_features))
        self.beta = nn.Parameter(torch.zeros(num_features))
        self.eps = eps

    def forward(self, x):
        # Compute the mean and variance
        mean = x.mean(dim=-1, keepdim=True)
        var = x.var(dim=-1, keepdim=True, unbiased=False)
        # Normalize the input
        x_hat = (x - mean) / torch.sqrt(var + self.eps)

        # Scale and shift
        y = self.gamma * x_hat + self.beta
        return y

def test_torch_versioni():
    # Example usage
    batch_size = 2
    num_features = 4
    x = torch.randn(batch_size, num_features)

    layer_norm = LayerNorm(num_features)
    output = layer_norm(x)
    print(output)

def layerNormCPU(bufferH):
    #a,b is learned parameters, the last axis is for feature
    _x,b,a = bufferH
    nEmbed = bufferH[0].shape[2]
    _0  = np.mean(_x,2)[:,:,np.newaxis] #get mean
    _1  = _x - _0
    _2  = _1 * _1
    _3  = np.mean(_2,2)[:,:,np.newaxis] # get stdv
    _4  = np.array(1e-12,dtype=np.float32)
    _5  = _4.reshape(1,1,1)
    _6  = _3 + _5
    _7  = np.sqrt(_6)
    _8  = 1 / _7                # 1/sqrt(...)
    _9  = b
    _10 = _9.reshape(1,1,nEmbed)
    _11 = _8 * _10              # b/sqrt(...)
    _12 = _0 * _11              # bμ/sqrt(...)
    _13 = a
    _14 = _13.reshape(1,1,nEmbed)
    _15 = _14 - _12             # a-bμ/sqrt(...)
    _16 = _x * _11              # bx/sqrt(...)
    _17 = _15 + _16             # b(x-μ)/sqrt(...)+a
    _18 = _17.reshape(bufferH[0].shape[0],bufferH[0].shape[1],bufferH[0].shape[2])
    return _18


if __name__ == "__main__":
    nBS             = 1024
    nSL             = 256
    nEmbedding      = 256
    nTime           = 100
    epsilon         = 1e-6

    bufferH = []
    bufferH.append( np.random.rand(nBS,nSL,nEmbedding).astype(np.float32).reshape(nBS,nSL,nEmbedding) * 2 - 1)
    bufferH.append( np.ones(nEmbedding).astype(np.float32) )
    bufferH.append( np.zeros(nEmbedding).astype(np.float32) )
    #buffer has data, learned mean(0) and stdv(1)

    res = layerNormCPU(bufferH)
    print(res.shape)