import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.distributions.normal import Normal

"""DQN"""

# Q网络的基类，继承自nn.Module，提供了Q网络的基本功能
class QNetBase(nn.Module):  # nn.Module is a standard PyTorch Network
    def __init__(self, state_dim: int, action_dim: int):
        super().__init__()
        # 探索率，用于在探索和利用之间进行权衡
        self.explore_rate = 0.125
        # 状态维度
        self.state_dim = state_dim
        # 动作维度
        self.action_dim = action_dim
        # 网络结构，后续会被具体实现
        self.net = None  # build_mlp(dims=[state_dim + action_dim, *dims, 1])

        # 状态的均值，不参与训练
        self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
        # 状态的标准差，不参与训练
        self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
        # 值的均值，不参与训练
        self.value_avg = nn.Parameter(torch.zeros((1,)), requires_grad=False)
        # 值的标准差，不参与训练
        self.value_std = nn.Parameter(torch.ones((1,)), requires_grad=False)

    # 对状态进行归一化处理
    def state_norm(self, state: Tensor) -> Tensor:
        return (state - self.state_avg) / self.state_std

    # 对值进行反归一化处理
    def value_re_norm(self, value: Tensor) -> Tensor:
        return value * self.value_std + self.value_avg

# 普通的Q网络，继承自QNetBase
class QNet(QNetBase):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 构建多层感知机网络
        self.net = build_mlp(dims=[state_dim, *dims, action_dim])
        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net[-1], std=0.1)

    def forward(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算Q值
        value = self.net(state)
        # 对Q值进行反归一化
        value = self.value_re_norm(value)
        return value  # Q values for multiple actions

    # 根据状态获取动作
    def get_action(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        if self.explore_rate < torch.rand(1):
            # 利用，选择Q值最大的动作
            action = self.net(state).argmax(dim=1, keepdim=True)
        else:
            # 探索，随机选择一个动作
            action = torch.randint(self.action_dim, size=(state.shape[0], 1))
        return action

# 决斗网络架构的Q网络，继承自QNetBase
class QNetDuel(QNetBase):  # Dueling DQN
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 状态编码网络
        self.net_state = build_mlp(dims=[state_dim, *dims])
        # 优势值网络
        self.net_adv = build_mlp(dims=[dims[-1], 1])  # advantage value
        # Q值网络
        self.net_val = build_mlp(dims=[dims[-1], action_dim])  # Q value

        # 对优势值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_adv[-1], std=0.1)
        # 对Q值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_val[-1], std=0.1)

    def forward(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_state(state)  # encoded state
        # 计算Q值
        q_val = self.net_val(s_enc)  # q value
        # 计算优势值
        q_adv = self.net_adv(s_enc)  # advantage value
        # 计算决斗网络的Q值
        value = q_val - q_val.mean(dim=1, keepdim=True) + q_adv  # dueling Q value
        # 对Q值进行反归一化
        value = self.value_re_norm(value)
        return value

    # 根据状态获取动作
    def get_action(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        if self.explore_rate < torch.rand(1):
            # 对状态进行编码
            s_enc = self.net_state(state)  # encoded state
            # 计算Q值
            q_val = self.net_val(s_enc)  # q value
            # 选择Q值最大的动作
            action = q_val.argmax(dim=1, keepdim=True)
        else:
            # 随机选择一个动作
            action = torch.randint(self.action_dim, size=(state.shape[0], 1))
        return action

# 双Q网络，继承自QNetBase
class QNetTwin(QNetBase):  # Double DQN
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 状态编码网络
        self.net_state = build_mlp(dims=[state_dim, *dims])
        # 第一个Q值网络
        self.net_val1 = build_mlp(dims=[dims[-1], action_dim])  # Q value 1
        # 第二个Q值网络
        self.net_val2 = build_mlp(dims=[dims[-1], action_dim])  # Q value 2
        # softmax激活函数
        self.soft_max = nn.Softmax(dim=1)

        # 对第一个Q值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_val1[-1], std=0.1)
        # 对第二个Q值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_val2[-1], std=0.1)

    def forward(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_state(state)  # encoded state
        # 计算第一个Q值
        q_val = self.net_val1(s_enc)  # q value
        return q_val  # one group of Q values

    # 获取两个Q网络的Q值
    def get_q1_q2(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_state(state)  # encoded state
        # 计算第一个Q值
        q_val1 = self.net_val1(s_enc)  # q value 1
        # 对第一个Q值进行反归一化
        q_val1 = self.value_re_norm(q_val1)
        # 计算第二个Q值
        q_val2 = self.net_val2(s_enc)  # q value 2
        # 对第二个Q值进行反归一化
        q_val2 = self.value_re_norm(q_val2)
        return q_val1, q_val2  # two groups of Q values

    # 根据状态获取动作
    def get_action(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_state(state)  # encoded state
        # 计算第一个Q值
        q_val = self.net_val1(s_enc)  # q value
        if self.explore_rate < torch.rand(1):
            # 选择Q值最大的动作
            action = q_val.argmax(dim=1, keepdim=True)
        else:
            # 计算动作概率
            a_prob = self.soft_max(q_val)
            # 根据概率采样动作
            action = torch.multinomial(a_prob, num_samples=1)
        return action

# 决斗双Q网络，继承自QNetBase
class QNetTwinDuel(QNetBase):  # D3QN: Dueling Double DQN
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 状态编码网络
        self.net_state = build_mlp(dims=[state_dim, *dims])
        # 第一个优势值网络
        self.net_adv1 = build_mlp(dims=[dims[-1], 1])  # advantage value 1
        # 第一个Q值网络
        self.net_val1 = build_mlp(dims=[dims[-1], action_dim])  # Q value 1
        # 第二个优势值网络
        self.net_adv2 = build_mlp(dims=[dims[-1], 1])  # advantage value 2
        # 第二个Q值网络
        self.net_val2 = build_mlp(dims=[dims[-1], action_dim])  # Q value 2
        # 软最大化激活函数
        self.soft_max = nn.Softmax(dim=1)

        # 对第一个优势值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_adv1[-1], std=0.1)
        # 对第一个Q值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_val1[-1], std=0.1)
        # 对第二个优势值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_adv2[-1], std=0.1)
        # 对第二个Q值网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_val2[-1], std=0.1)

    def forward(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_state(state)  # encoded state
        # 计算第一个Q值
        q_val = self.net_val1(s_enc)  # q value
        # 计算第一个优势值
        q_adv = self.net_adv1(s_enc)  # advantage value
        # 计算第一个决斗网络的Q值
        value = q_val - q_val.mean(dim=1, keepdim=True) + q_adv  # one dueling Q value
        # 对Q值进行反归一化
        value = self.value_re_norm(value)
        return value

    # 获取两个决斗网络的Q值
    def get_q1_q2(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_state(state)  # encoded state

        # 计算第一个Q值
        q_val1 = self.net_val1(s_enc)  # q value 1
        # 计算第一个优势值
        q_adv1 = self.net_adv1(s_enc)  # advantage value 1
        # 计算第一个决斗网络的Q值
        q_duel1 = q_val1 - q_val1.mean(dim=1, keepdim=True) + q_adv1
        # 对第一个决斗网络的Q值进行反归一化
        q_duel1 = self.value_re_norm(q_duel1)

        # 计算第二个Q值
        q_val2 = self.net_val2(s_enc)  # q value 2
        # 计算第二个优势值
        q_adv2 = self.net_adv2(s_enc)  # advantage value 2
        # 计算第二个决斗网络的Q值
        q_duel2 = q_val2 - q_val2.mean(dim=1, keepdim=True) + q_adv2
        # 对第二个决斗网络的Q值进行反归一化
        q_duel2 = self.value_re_norm(q_duel2)
        return q_duel1, q_duel2  # two dueling Q values

    # 根据状态获取动作
    def get_action(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_state(state)  # encoded state
        # 计算第一个Q值
        q_val = self.net_val1(s_enc)  # q value
        if self.explore_rate < torch.rand(1):
            # 选择Q值最大的动作
            action = q_val.argmax(dim=1, keepdim=True)
        else:
            # 计算动作概率
            a_prob = self.soft_max(q_val)
            # 根据概率采样动作
            action = torch.multinomial(a_prob, num_samples=1)
        return action

"""Actor (policy network)"""

# 策略网络的基类，继承自nn.Module，提供了策略网络的基本功能
class ActorBase(nn.Module):
    def __init__(self, state_dim: int, action_dim: int):
        super().__init__()
        # 状态维度
        self.state_dim = state_dim
        # 动作维度
        self.action_dim = action_dim
        # 网络结构，后续会被具体实现
        self.net = None  # build_mlp(dims=[state_dim, *dims, action_dim])
        # 探索动作噪声的标准差
        self.explore_noise_std = None  # standard deviation of exploration action noise
        # 动作分布，默认为正态分布
        self.ActionDist = torch.distributions.normal.Normal

        # 状态的均值，不参与训练
        self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
        # 状态的标准差，不参与训练
        self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)

    # 对状态进行归一化处理
    def state_norm(self, state: Tensor) -> Tensor:
        return (state - self.state_avg) / self.state_std

# 普通的策略网络，继承自ActorBase
class Actor(ActorBase):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 构建多层感知机网络
        self.net = build_mlp(dims=[state_dim, *dims, action_dim])
        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net[-1], std=0.1)

        # 探索动作噪声的标准差
        self.explore_noise_std = 0.1  # standard deviation of exploration action noise

    def forward(self, state: Tensor) -> Tensor:
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作，并使用tanh激活函数将动作限制在[-1, 1]范围内
        return self.net(state).tanh()  # action.tanh()

    # 获取用于探索的动作
    def get_action(self, state: Tensor) -> Tensor:  # for exploration
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作，并使用tanh激活函数
        action = self.net(state).tanh()
        # 生成探索噪声，并将其限制在[-0.5, 0.5]范围内
        noise = (torch.randn_like(action) * self.explore_noise_std).clamp(-0.5, 0.5)
        # 将动作和噪声相加，并将结果限制在[-1.0, 1.0]范围内
        return (action + noise).clamp(-1.0, 1.0)

    # 获取带有指定标准差噪声的动作
    def get_action_noise(self, state: Tensor, action_std: float) -> Tensor:
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作，并使用tanh激活函数
        action = self.net(state).tanh()
        # 生成指定标准差的噪声，并将其限制在[-0.5, 0.5]范围内
        noise = (torch.randn_like(action) * action_std).clamp(-0.5, 0.5)
        # 将动作和噪声相加，并将结果限制在[-1.0, 1.0]范围内
        return (action + noise).clamp(-1.0, 1.0)

# SAC算法的策略网络，继承自ActorBase
class ActorSAC(ActorBase):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 状态编码网络，输出不经过激活函数
        self.net_s = build_mlp(dims=[state_dim, *dims], if_raw_out=False)  # network of encoded state
        # 用于计算动作均值和对数标准差的网络
        self.net_a = build_mlp(dims=[dims[-1], action_dim * 2])  # the average and log_std of action

        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net_a[-1], std=0.1)

    def forward(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_s(state)  # encoded state
        # 获取动作的均值
        a_avg = self.net_a(s_enc)[:, :self.action_dim]
        # 使用tanh激活函数将动作均值限制在[-1, 1]范围内
        return a_avg.tanh()  # action

    # 获取动作
    def get_action(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_s(state)  # encoded state
        # 将网络输出拆分为动作均值和对数标准差
        a_avg, a_std_log = self.net_a(s_enc).chunk(2, dim=1)
        # 将对数标准差限制在[-16, 2]范围内，并进行指数运算得到标准差
        a_std = a_std_log.clamp(-16, 2).exp()

        # 根据动作均值和标准差创建正态分布
        dist = Normal(a_avg, a_std)
        # 从分布中采样动作，并使用tanh激活函数将动作限制在[-1, 1]范围内
        return dist.rsample().tanh()  # action (re-parameterize)

    # 获取动作及其对数概率
    def get_action_logprob(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_s(state)  # encoded state
        # 将网络输出拆分为动作均值和对数标准差
        a_avg, a_std_log = self.net_a(s_enc).chunk(2, dim=1)
        # 将对数标准差限制在[-16, 2]范围内，并进行指数运算得到标准差
        a_std = a_std_log.clamp(-16, 2).exp()

        # 根据动作均值和标准差创建正态分布
        dist = Normal(a_avg, a_std)
        # 从分布中重参数化采样动作
        action = dist.rsample()

        # 使用tanh激活函数将动作限制在[-1, 1]范围内
        action_tanh = action.tanh()
        # 计算动作的对数概率
        logprob = dist.log_prob(a_avg)
        # 根据tanh函数的导数修正对数概率
        logprob -= (-action_tanh.pow(2) + 1.000001).log()  # fix logprob using the derivative of action.tanh()
        return action_tanh, logprob.sum(1)

# 修正版的SAC策略网络，继承自ActorSAC
class ActorFixSAC(ActorSAC):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(dims=dims, state_dim=state_dim, action_dim=action_dim)
        # 定义Softplus激活函数
        self.soft_plus = torch.nn.Softplus()

    # 获取动作及其对数概率
    def get_action_logprob(self, state):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 对状态进行编码
        s_enc = self.net_s(state)  # encoded state
        # 将网络输出拆分为动作均值和对数标准差
        a_avg, a_std_log = self.net_a(s_enc).chunk(2, dim=1)
        # 将对数标准差限制在[-16, 2]范围内，并进行指数运算得到标准差
        a_std = a_std_log.clamp(-16, 2).exp()

        # 根据动作均值和标准差创建正态分布
        dist = Normal(a_avg, a_std)
        # 从分布中重参数化采样动作
        action = dist.rsample()

        # 计算动作的对数概率
        logprob = dist.log_prob(a_avg)
        # 使用Softplus函数修正对数概率
        logprob -= 2 * (math.log(2) - action - self.soft_plus(action * -2))  # fix logprob using SoftPlus
        return action.tanh(), logprob.sum(1)

# PPO算法的策略网络，继承自ActorBase
class ActorPPO(ActorBase):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 构建多层感知机网络
        self.net = build_mlp(dims=[state_dim, *dims, action_dim])
        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net[-1], std=0.1)

        # 可训练的动作标准差的对数
        self.action_std_log = nn.Parameter(torch.zeros((1, action_dim)), requires_grad=True)  # trainable parameter

    def forward(self, state: Tensor) -> Tensor:
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作，并使用tanh激活函数将动作限制在[-1, 1]范围内
        return self.net(state).tanh()  # action.tanh()

    # 获取用于探索的动作及其对数概率
    def get_action(self, state: Tensor) -> (Tensor, Tensor):  # for exploration
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作均值
        action_avg = self.net(state)
        # 计算动作标准差
        action_std = self.action_std_log.exp()

        # 根据动作均值和标准差创建分布
        dist = self.ActionDist(action_avg, action_std)
        # 从分布中采样动作
        action = dist.sample()
        # 计算动作的对数概率
        logprob = dist.log_prob(action).sum(1)
        return action, logprob

    # 获取动作的对数概率和熵
    def get_logprob_entropy(self, state: Tensor, action: Tensor) -> (Tensor, Tensor):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作均值
        action_avg = self.net(state)
        # 计算动作标准差
        action_std = self.action_std_log.exp()

        # 根据动作均值和标准差创建分布
        dist = self.ActionDist(action_avg, action_std)
        # 计算动作的对数概率
        logprob = dist.log_prob(action).sum(1)
        # 计算分布的熵
        entropy = dist.entropy().sum(1)
        return logprob, entropy

    # 将动作转换为适合环境的格式
    @staticmethod
    def convert_action_for_env(action: Tensor) -> Tensor:
        return action.tanh()

# 离散动作的PPO策略网络，继承自ActorBase
class ActorDiscretePPO(ActorBase):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 构建多层感知机网络
        self.net = build_mlp(dims=[state_dim, *dims, action_dim])
        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net[-1], std=0.1)

        # 动作分布，使用分类分布
        self.ActionDist = torch.distributions.Categorical
        # 软最大化激活函数
        self.soft_max = nn.Softmax(dim=-1)

    def forward(self, state: Tensor) -> Tensor:
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作概率（未经过softmax）
        a_prob = self.net(state)  # action_prob without softmax
        # 选择概率最大的动作的索引
        return a_prob.argmax(dim=1)  # get the indices of discrete action

    # 获取动作及其对数概率
    def get_action(self, state: Tensor) -> (Tensor, Tensor):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作概率，并使用softmax激活函数
        a_prob = self.soft_max(self.net(state))
        # 根据动作概率创建分类分布
        a_dist = self.ActionDist(a_prob)
        # 从分布中采样动作
        action = a_dist.sample()
        # 计算动作的对数概率
        logprob = a_dist.log_prob(action)
        return action, logprob

    # 获取动作的对数概率和熵
    def get_logprob_entropy(self, state: Tensor, action: Tensor) -> (Tensor, Tensor):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算动作概率，并使用softmax激活函数
        a_prob = self.soft_max(self.net(state))  # action.shape == (batch_size, 1), action.dtype = torch.int
        # 根据动作概率创建分类分布
        dist = self.ActionDist(a_prob)
        # 计算动作的对数概率
        logprob = dist.log_prob(action.squeeze(1))
        # 计算分布的熵
        entropy = dist.entropy()
        return logprob, entropy

    # 将动作转换为适合环境的格式
    @staticmethod
    def convert_action_for_env(action: Tensor) -> Tensor:
        return action.long()

"""Critic (value network)"""

# 价值网络的基类，继承自nn.Module，提供了价值网络的基本功能
class CriticBase(nn.Module):  # todo state_norm, value_norm
    def __init__(self, state_dim: int, action_dim: int):
        super().__init__()
        # 状态维度
        self.state_dim = state_dim
        # 动作维度
        self.action_dim = action_dim
        # 网络结构，后续会被具体实现
        self.net = None  # build_mlp(dims=[state_dim + action_dim, *dims, 1])

        # 状态的均值，不参与训练
        self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
        # 状态的标准差，不参与训练
        self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
        # 值的均值，不参与训练
        self.value_avg = nn.Parameter(torch.zeros((1,)), requires_grad=False)
        # 值的标准差，不参与训练
        self.value_std = nn.Parameter(torch.ones((1,)), requires_grad=False)

    # 对状态进行归一化处理
    def state_norm(self, state: Tensor) -> Tensor:
        return (state - self.state_avg) / self.state_std  # todo state_norm

    # 对值进行反归一化处理
    def value_re_norm(self, value: Tensor) -> Tensor:
        return value * self.value_std + self.value_avg  # todo value_norm

# 普通的价值网络，继承自CriticBase
class Critic(CriticBase):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 构建多层感知机网络
        self.net = build_mlp(dims=[state_dim + action_dim, *dims, 1])

        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net[-1], std=0.5)

    def forward(self, state: Tensor, action: Tensor) -> Tensor:
        # 对状态进行归一化
        state = self.state_norm(state)
        # 将状态和动作拼接起来
        values = self.net(torch.cat((state, action), dim=1))
        # 对值进行反归一化
        values = self.value_re_norm(values)
        # 去除维度为1的维度
        return values.squeeze(dim=1)  # q value

# 双价值网络，共享参数，继承自CriticBase
class CriticTwin(CriticBase):  # shared parameter
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 构建多层感知机网络，输出两个值
        self.net = build_mlp(dims=[state_dim + action_dim, *dims, 2])

        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net[-1], std=0.5)

    def forward(self, state, action):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 将状态和动作拼接起来
        values = self.net(torch.cat((state, action), dim=1))
        # 对值进行反归一化
        values = self.value_re_norm(values)
        # 计算两个值的均值
        return values.mean(dim=1)  # mean Q value

    # 获取两个值中的最小值
    def get_q_min(self, state, action):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 将状态和动作拼接起来
        values = self.net(torch.cat((state, action), dim=1))
        # 对值进行反归一化
        values = self.value_re_norm(values)
        # 获取两个值中的最小值
        return torch.min(values, dim=1)[0]  # min Q value

    # 获取两个值
    def get_q1_q2(self, state, action):
        # 对状态进行归一化
        state = self.state_norm(state)
        # 将状态和动作拼接起来
        values = self.net(torch.cat((state, action), dim=1))
        # 对值进行反归一化
        values = self.value_re_norm(values)
        # 分别获取两个值
        return values[:, 0], values[:, 1]  # two Q values

# PPO算法的价值网络，继承自CriticBase
class CriticPPO(CriticBase):
    def __init__(self, dims: [int], state_dim: int, action_dim: int):
        super().__init__(state_dim=state_dim, action_dim=action_dim)
        # 构建多层感知机网络
        self.net = build_mlp(dims=[state_dim, *dims, 1])
        # 对网络最后一层进行正交初始化
        layer_init_with_orthogonal(self.net[-1], std=0.5)

    def forward(self, state: Tensor) -> Tensor:
        # 对状态进行归一化
        state = self.state_norm(state)
        # 前向传播计算值
        value = self.net(state)
        # 对值进行反归一化
        value = self.value_re_norm(value)
        # 去除维度为1的维度
        return value.squeeze(1)  # q value

"""utils"""

# 构建多层感知机（MultiLayer Perceptron）
def build_mlp(dims: [int], activation: nn = None, if_raw_out: bool = True) -> nn.Sequential:
    """
    build MLP (MultiLayer Perceptron)

    dims: 中间维度，`dims[-1]` 是该网络的输出维度
    activation: 激活函数
    if_remove_out_layer: 是否移除输出层的激活函数。
    """
    if activation is None:
        activation = nn.ReLU
    net_list = []
    for i in range(len(dims) - 1):
        net_list.extend([nn.Linear(dims[i], dims[i + 1]), activation()])
    if if_raw_out:
        # 删除输出层的激活函数，以保持原始输出
        del net_list[-1]  
    return nn.Sequential(*net_list)

# 使用正交初始化对网络层进行初始化
def layer_init_with_orthogonal(layer, std=1.0, bias_const=1e-6):
    torch.nn.init.orthogonal_(layer.weight, std)
    torch.nn.init.constant_(layer.bias, bias_const)

# 用于调整张量形状的自定义模块
class NnReshape(nn.Module):
    def __init__(self, *args):
        super().__init__()
        self.args = args

    def forward(self, x):
        return x.view((x.size(0),) + self.args)

# 密集网络，计划作为超参数调整层数
class DenseNet(nn.Module):  # plan to hyper-param: layer_number
    def __init__(self, lay_dim):
        super().__init__()
        # 第一个密集层
        self.dense1 = nn.Sequential(nn.Linear(lay_dim * 1, lay_dim * 1), nn.Hardswish())
        # 第二个密集层
        self.dense2 = nn.Sequential(nn.Linear(lay_dim * 2, lay_dim * 2), nn.Hardswish())
        # 输入维度
        self.inp_dim = lay_dim
        # 输出维度
        self.out_dim = lay_dim * 4

    def forward(self, x1):  # x1.shape==(-1, lay_dim*1)
        # 将输入和第一个密集层的输出拼接起来
        x2 = torch.cat((x1, self.dense1(x1)), dim=1)
        # 将拼接后的结果和第二个密集层的输出拼接起来
        return torch.cat(
            (x2, self.dense2(x2)), dim=1
        )  # x3  # x2.shape==(-1, lay_dim*4)

# 像素级状态编码器
class ConvNet(nn.Module):  # pixel-level state encoder
    def __init__(self, inp_dim, out_dim, image_size=224):
        super().__init__()
        if image_size == 224:
            self.net = nn.Sequential(  # size==(batch_size, inp_dim, 224, 224)
                nn.Conv2d(inp_dim, 32, (5, 5), stride=(2, 2), bias=False),
                nn.ReLU(inplace=True),  # size=110
                nn.Conv2d(32, 48, (3, 3), stride=(2, 2)),
                nn.ReLU(inplace=True),  # size=54
                nn.Conv2d(48, 64, (3, 3), stride=(2, 2)),
                nn.ReLU(inplace=True),  # size=26
                nn.Conv2d(64, 96, (3, 3), stride=(2, 2)),
                nn.ReLU(inplace=True),  # size=12
                nn.Conv2d(96, 128, (3, 3), stride=(2, 2)),
                nn.ReLU(inplace=True),  # size=5
                nn.Conv2d(128, 192, (5, 5), stride=(1, 1)),
                nn.ReLU(inplace=True),  # size=1
                NnReshape(-1),  # size (batch_size, 1024, 1, 1) ==> (batch_size, 1024)
                nn.Linear(192, out_dim),  # size==(batch_size, out_dim)
            )
        elif image_size == 112:
            self.net = nn.Sequential(  # size==(batch_size, inp_dim, 112, 112)
                nn.Conv2d(inp_dim, 32, (5, 5), stride=(2, 2), bias=False),
                nn.ReLU(inplace=True),  # size=54
                nn.Conv2d(32, 48, (3, 3), stride=(2, 2)),
                nn.ReLU(inplace=True),  # size=26
                nn.Conv2d(48, 64, (3, 3), stride=(2, 2)),
                nn.ReLU(inplace=True),  # size=12
                nn.Conv2d(64, 96, (3, 3), stride=(2, 2)),
                nn.ReLU(inplace=True),  # size=5
                nn.Conv2d(96, 128, (5, 5), stride=(1, 1)),
                nn.ReLU(inplace=True),  # size=1
                NnReshape(-1),  # size (batch_size, 1024, 1, 1) ==> (batch_size, 1024)
                nn.Linear(128, out_dim),  # size==(batch_size, out_dim)
            )
        else:
            assert image_size in {224, 112}

    def forward(self, x):
        # assert x.shape == (batch_size, inp_dim, image_size, image_size)
        # 调整张量维度顺序
        x = x.permute(0, 3, 1, 2)
        # 对输入进行归一化
        x = x / 128.0 - 1.0
        return self.net(x)

    # 检查网络的功能
    @staticmethod
    def check():
        inp_dim = 3
        out_dim = 32
        batch_size = 2
        image_size = [224, 112][1]
        # from elegantrl.net import Conv2dNet
        net = ConvNet(inp_dim, out_dim, image_size)

        image = torch.ones((batch_size, image_size, image_size, inp_dim), dtype=torch.uint8) * 255
        print(image.shape)
        output = net(image)
        print(output.shape)
