import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

class PopArt(torch.nn.Module):
    """
    PopArt归一化层，用于自适应地归一化价值函数目标
    """
    def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5, device=torch.device("cpu")):
        """
        初始化PopArt层
        
        参数:
            input_shape: 输入特征的维度
            output_shape: 输出的维度
            norm_axes: 归一化的轴
            beta: 滑动平均的系数
            epsilon: 数值稳定性的小常数
            device: 计算设备
        """
        super(PopArt, self).__init__()

        self.beta = beta  # 滑动平均系数
        self.epsilon = epsilon  # 数值稳定性常数
        self.norm_axes = norm_axes  # 归一化轴
        self.tpdv = dict(dtype=torch.float32, device=device)  # 张量参数字典

        self.input_shape = input_shape  # 输入形状
        self.output_shape = output_shape  # 输出形状

        # 初始化权重和偏置
        self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape)).to(**self.tpdv)
        self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
        
        # 初始化统计量参数
        self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False).to(**self.tpdv)  # 标准差
        self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)  # 均值
        self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)  # 平方均值
        self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)  # 去偏项

        # 重置参数
        self.reset_parameters()

    def reset_parameters(self):
        """
        重置层的参数
        """
        # 使用Kaiming均匀初始化权重
        torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        if self.bias is not None:
            # 计算扇入
            fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            # 均匀初始化偏置
            torch.nn.init.uniform_(self.bias, -bound, bound)
        # 将统计量参数置零
        self.mean.zero_()
        self.mean_sq.zero_()
        self.debiasing_term.zero_()

    def forward(self, input_vector):
        """
        前向传播
        
        参数:
            input_vector: 输入向量
            
        返回:
            线性变换后的结果
        """
        # 将输入转换为张量并移动到指定设备
        if type(input_vector) == np.ndarray:
            input_vector = torch.from_numpy(input_vector)
        input_vector = input_vector.to(**self.tpdv)

        # 执行线性变换
        return F.linear(input_vector, self.weight, self.bias)
    
    @torch.no_grad()
    def update(self, input_vector):
        """
        更新归一化统计量
        
        参数:
            input_vector: 用于更新统计量的输入向量
        """
        # 将输入转换为张量并移动到指定设备
        if type(input_vector) == np.ndarray:
            input_vector = torch.from_numpy(input_vector)
        input_vector = input_vector.to(**self.tpdv)
        
        # 保存旧的均值和标准差
        old_mean, old_stddev = self.mean, self.stddev

        # 计算批次均值和平方均值
        batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
        batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))

        # 更新滑动平均
        self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
        self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
        self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))

        # 计算标准差
        self.stddev = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)

        # 更新权重和偏置以保持输出不变
        self.weight = self.weight * old_stddev / self.stddev
        self.bias = (old_stddev * self.bias + old_mean - self.mean) / self.stddev

    def debiased_mean_var(self):
        """
        计算去偏后的均值和方差
        
        返回:
            去偏后的均值和方差
        """
        # 计算去偏均值
        debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
        # 计算去偏平方均值
        debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
        # 计算去偏方差
        debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
        return debiased_mean, debiased_var

    def normalize(self, input_vector):
        """
        归一化输入向量
        
        参数:
            input_vector: 要归一化的输入向量
            
        返回:
            归一化后的向量
        """
        # 将输入转换为张量并移动到指定设备
        if type(input_vector) == np.ndarray:
            input_vector = torch.from_numpy(input_vector)
        input_vector = input_vector.to(**self.tpdv)

        # 获取去偏均值和方差
        mean, var = self.debiased_mean_var()
        # 执行归一化
        out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
        
        return out

    def denormalize(self, input_vector):
        """
        反归一化输入向量
        
        参数:
            input_vector: 要反归一化的输入向量
            
        返回:
            反归一化后的向量
        """
        # 将输入转换为张量并移动到指定设备
        if type(input_vector) == np.ndarray:
            input_vector = torch.from_numpy(input_vector)
        input_vector = input_vector.to(**self.tpdv)

        # 获取去偏均值和方差
        mean, var = self.debiased_mean_var()
        # 执行反归一化
        out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
        
        # 转换为numpy数组
        out = out.cpu().numpy()

        return out
