import torch
import torch.nn as nn

class RMSNorm(nn.Module):
    def __init__(self,hidden_size,partial=-1.0,eps=1e-8,bias=False):
        """
            Root Mean Square Layer Normalization
        Args:
            hidden_size: 输入向量的维度大小
            partial: partial RMSNorm, valid value [0, 1], default -1.0 (disabled)
            eps: epsilon value, default 1e-8
            bias: whether use bias term for RMSNorm, disabled by
            default because RMSNorm doesn't enforce re-centering invariance.
        """
        super(RMSNorm,self).__init__()
        self.hidden_size = hidden_size
        self.partial = partial
        self.eps = eps
        self.bias = bias

        self.g = nn.Parameter(torch.ones(hidden_size))
        self.register_parameter('scale',self.g)

        if self.bias:
            self.offset = nn.Parameter(torch.zeros(hidden_size))
            self.register_parameter('bias',self.offset)

    def forward(self,x):
        if self.partial<0. or self.partial>1.:
            norm_x = x.norm(2,dim=-1,keepdim=True)   # 在最后维度上计算2范数
            d_x = self.hidden_size
        else:
            partial_size = int(self.hidden_size * self.partial)
            partial_x,_ = torch.split(x,[partial_size,self.hidden_size-partial_size],dim=-1)  # 在最后维度上把x分成两部分，前一部分的dim为partial_size，后一部分的dim为self.hidden_size-partial_size

            norm_x = partial_x.norm(2,dim=-1,keepdim=True)
            d_x = partial_size

        rms_x = norm_x * d_x ** (-0.5)
        x_norm = self.g * (x / (rms_x + self.eps))

        if self.bias:
            x_norm += self.offset

        return x_norm


if __name__ == '__main__':
    random_num = torch.rand((1,3,10))
    hidden_state = 1 + 99* random_num
    print("hidden_state:\n", hidden_state)

    rms_norm = RMSNorm(hidden_size=hidden_state.shape[-1])
    print("Output rms norm without partial:\n",rms_norm(hidden_state))

    p_rms_norm = RMSNorm(hidden_size=hidden_state.shape[-1],partial=0.65)
    print("Output rms norm with partial=0.65:\n",p_rms_norm(hidden_state))
