
import numpy as np

import torch
import torch.nn as nn


class ValueNorm(nn.Module):
    """ 对观测向量进行归一化 - 跨越前norm_axes维度 """

    def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")):
        """
        初始化ValueNorm模块
        
        参数:
            input_shape: 输入形状
            norm_axes: 归一化的轴数
            beta: 指数移动平均的衰减率
            per_element_update: 是否按元素更新
            epsilon: 防止除零的小常数
            device: 计算设备
        """
        super(ValueNorm, self).__init__()

        self.input_shape = input_shape  # 输入形状
        self.norm_axes = norm_axes  # 归一化的轴数
        self.epsilon = epsilon  # 防止除零的小常数
        self.beta = beta  # 指数移动平均的衰减率
        self.per_element_update = per_element_update  # 是否按元素更新
        self.tpdv = dict(dtype=torch.float32, device=device)  # 张量参数设备字典

        # 初始化运行均值参数
        self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
        # 初始化运行均值平方参数
        self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
        # 初始化去偏项
        self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
        
        self.reset_parameters()  # 重置参数

    def reset_parameters(self):
        """重置所有参数为零"""
        self.running_mean.zero_()  # 将运行均值置零
        self.running_mean_sq.zero_()  # 将运行均值平方置零
        self.debiasing_term.zero_()  # 将去偏项置零

    def running_mean_var(self):
        """
        计算去偏后的均值和方差
        
        返回:
            debiased_mean: 去偏后的均值
            debiased_var: 去偏后的方差
        """
        # 计算去偏后的均值
        debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)
        # 计算去偏后的均值平方
        debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)
        # 计算去偏后的方差，并确保最小值为1e-2
        debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
        return debiased_mean, debiased_var

    @torch.no_grad()
    def update(self, input_vector):
        """
        更新运行统计量
        
        参数:
            input_vector: 输入向量
        """
        # 将numpy数组转换为torch张量
        if type(input_vector) == np.ndarray:
            input_vector = torch.from_numpy(input_vector)
        input_vector = input_vector.to(**self.tpdv)  # 将输入向量移至指定设备

        # 计算批次均值
        batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
        # 计算批次平方均值
        batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))

        # 根据更新方式确定权重
        if self.per_element_update:
            batch_size = np.prod(input_vector.size()[:self.norm_axes])
            weight = self.beta ** batch_size
        else:
            weight = self.beta

        # 更新运行均值
        self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
        # 更新运行均值平方
        self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
        # 更新去偏项
        self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))

    def normalize(self, input_vector):
        """
        归一化输入向量
        
        参数:
            input_vector: 输入向量
            
        返回:
            归一化后的向量
        """
        # 确保输入是float32类型
        if type(input_vector) == np.ndarray:
            input_vector = torch.from_numpy(input_vector)
        input_vector = input_vector.to(**self.tpdv)

        # 获取均值和方差
        mean, var = self.running_mean_var()
        # 执行归一化
        out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
        
        return out

    def denormalize(self, input_vector):
        """
        将归一化数据转换回原始分布
        
        参数:
            input_vector: 归一化后的输入向量
            
        返回:
            反归一化后的向量
        """
        if type(input_vector) == np.ndarray:
            input_vector = torch.from_numpy(input_vector)
        input_vector = input_vector.to(**self.tpdv)

        # 获取均值和方差
        mean, var = self.running_mean_var()
        # 执行反归一化
        out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
        
        # 转换为numpy数组
        out = out.cpu().numpy()
        
        return out
