import torch
from typing import Tuple
from torch import Tensor

from elegantrl.train.config import Config
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import ActorPPO, CriticPPO
from elegantrl.agents.net import ActorDiscretePPO


class AgentPPO(AgentBase):
    """
    PPO算法。论文 “Proximal Policy Optimization Algorithms”。作者 John Schulman 等人，2017年。

    net_dims: 多层感知机(MLP)的中间层维度
    state_dim: 状态的维度(状态向量的数量)
    action_dim: 动作的维度(或离散动作的数量)
    gpu_id: 训练设备的GPU编号。当CUDA不可用时使用CPU
    args: 用于智能体训练的参数。`args = Config()`
    """

    def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
        # 设置策略网络类，默认为ActorPPO
        self.act_class = getattr(self, "act_class", ActorPPO)
        # 设置价值网络类，默认为CriticPPO
        self.cri_class = getattr(self, "cri_class", CriticPPO)
        # 调用父类的初始化方法
        super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
        # 标记是否为离线策略算法，PPO是在线策略算法，所以设为False
        self.if_off_policy = False

        # 从参数中获取比率裁剪值，默认为0.25，用于限制策略更新的幅度
        self.ratio_clip = getattr(args, "ratio_clip", 0.25)  # `ratio.clamp(1 - clip, 1 + clip)`
        # 从参数中获取GAE(广义优势估计)的lambda值，默认为0.95，用于稀疏奖励场景
        self.lambda_gae_adv = getattr(args, "lambda_gae_adv", 0.95)  # could be 0.50~0.99 # GAE for sparse reward
        # 从参数中获取熵正则化系数，默认为0.01，用于增加策略的探索性
        self.lambda_entropy = getattr(args, "lambda_entropy", 0.01)  # could be 0.00~0.20
        # 将熵正则化系数转换为张量并放到指定设备上
        self.lambda_entropy = torch.tensor(self.lambda_entropy, dtype=torch.float32, device=self.device)

        # 如果使用V-trace方法来计算优势值
        if getattr(args, 'if_use_v_trace', False):
            # 使用V-trace方法计算优势值
            self.get_advantages = self.get_advantages_vtrace  # get advantage value in reverse time series (V-trace)
        else:
            # 使用原始方法计算优势值
            self.get_advantages = self.get_advantages_origin  # get advantage value using critic network
        # 初始化价值的均值为0
        self.value_avg = torch.zeros(1, dtype=torch.float32, device=self.device)
        # 初始化价值的标准差为1
        self.value_std = torch.ones(1, dtype=torch.float32, device=self.device)

    def explore_one_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
        """
        通过智能体与单个环境实例的交互来收集轨迹。

        env: 强化学习训练环境，需要支持env.reset()和env.step()方法，应该是单个环境
        horizon_len: 探索时收集horizon_len步的数据用于更新网络
        return: `(states, actions, rewards, undones)` 用于离线策略
            env_num == 1
            states.shape == (horizon_len, env_num, state_dim)
            actions.shape == (horizon_len, env_num, action_dim)
            logprobs.shape == (horizon_len, env_num, action_dim)
            rewards.shape == (horizon_len, env_num)
            undones.shape == (horizon_len, env_num)
        """
        # 初始化状态张量，用于存储收集的状态数据
        states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
        # 初始化动作张量，用于存储收集的动作数据
        actions = torch.zeros((horizon_len, self.num_envs, self.action_dim), dtype=torch.float32).to(self.device)
        # 初始化对数概率张量，用于存储动作的对数概率
        logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化奖励张量，用于存储收集的奖励数据
        rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化完成标志张量，用于存储环境是否完成的标志
        dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)

        # 获取上一个状态，单个环境的状态形状为 (1, state_dim)
        state = self.last_state  # shape == (1, state_dim) for a single env.

        # 获取策略网络的动作生成函数
        get_action = self.act.get_action
        # 获取策略网络的动作转换函数，用于将动作转换为环境可接受的格式
        convert = self.act.convert_action_for_env
        # 循环收集horizon_len步的数据
        for t in range(horizon_len):
            # 根据当前状态获取动作和对应的对数概率
            action, logprob = get_action(state)
            # 将当前状态存入状态张量
            states[t] = state

            # 将动作转换为numpy数组
            ary_action = convert(action[0]).detach().cpu().numpy()
            # 与环境交互，执行动作并获取下一个状态、奖励、完成标志等信息
            ary_state, reward, done, _ = env.step(ary_action)  # next_state
            # 如果环境完成，重置环境，否则使用下一个状态
            ary_state = env.reset() if done else ary_state  # ary_state.shape == (state_dim, )
            # 将下一个状态转换为张量并增加一个维度
            state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device).unsqueeze(0)
            # 将动作存入动作张量
            actions[t] = action
            # 将对数概率存入对数概率张量
            logprobs[t] = logprob
            # 将奖励存入奖励张量
            rewards[t] = reward
            # 将完成标志存入完成标志张量
            dones[t] = done

        # 更新最后一个状态
        self.last_state = state  # state.shape == (1, state_dim) for a single env.

        # 对奖励进行缩放
        rewards *= self.reward_scale
        # 将完成标志转换为未完成标志，用1减去完成标志
        undones = 1.0 - dones.type(torch.float32)
        return states, actions, logprobs, rewards, undones

    def explore_vec_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
        """
        通过智能体与向量化环境实例的交互来收集轨迹。

        env: 强化学习训练环境，需要支持env.reset()和env.step()方法，应该是向量化环境
        horizon_len: 探索时收集horizon_len步的数据用于更新网络
        return: `(states, actions, rewards, undones)` 用于离线策略
            states.shape == (horizon_len, env_num, state_dim)
            actions.shape == (horizon_len, env_num, action_dim)
            logprobs.shape == (horizon_len, env_num, action_dim)
            rewards.shape == (horizon_len, env_num)
            undones.shape == (horizon_len, env_num)
        """
        # 初始化状态张量，用于存储收集的状态数据
        states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
        # 初始化动作张量，用于存储收集的动作数据
        actions = torch.zeros((horizon_len, self.num_envs, self.action_dim), dtype=torch.float32).to(self.device)
        # 初始化对数概率张量，用于存储动作的对数概率
        logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化奖励张量，用于存储收集的奖励数据
        rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化完成标志张量，用于存储环境是否完成的标志
        dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)

        # 获取上一个状态，向量化环境的状态形状为 (env_num, state_dim)
        state = self.last_state  # shape == (env_num, state_dim) for a vectorized env.

        # 获取策略网络的动作生成函数
        get_action = self.act.get_action
        # 获取策略网络的动作转换函数，用于将动作转换为环境可接受的格式
        convert = self.act.convert_action_for_env
        # 循环收集horizon_len步的数据
        for t in range(horizon_len):
            # 根据当前状态获取动作和对应的对数概率
            action, logprob = get_action(state)
            # 将当前状态存入状态张量
            states[t] = state

            # 与向量化环境交互，执行动作并获取下一个状态、奖励、完成标志等信息
            state, reward, done, _ = env.step(convert(action))  # next_state
            # 将动作存入动作张量
            actions[t] = action
            # 将对数概率存入对数概率张量
            logprobs[t] = logprob
            # 将奖励存入奖励张量
            rewards[t] = reward
            # 将完成标志存入完成标志张量
            dones[t] = done

        # 更新最后一个状态
        self.last_state = state

        # 对奖励进行缩放
        rewards *= self.reward_scale
        # 将完成标志转换为未完成标志，用1减去完成标志
        undones = 1.0 - dones.type(torch.float32)
        return states, actions, logprobs, rewards, undones

    def update_net(self, buffer) -> Tuple[float, ...]:
        with torch.no_grad():
            # 从缓冲区中获取状态、动作、对数概率、奖励和未完成标志
            states, actions, logprobs, rewards, undones = buffer
            # 获取缓冲区的步数
            buffer_size = states.shape[0]
            # 获取环境数量
            buffer_num = states.shape[1]

            '''获取优势值和奖励总和'''
            # 设置一个较小的批次大小，避免GPU内存溢出
            bs = 2 ** 10  # set a smaller 'batch_size' to avoiding out of GPU memory.
            # 初始化价值张量，用于存储每个状态的价值估计
            values = torch.empty_like(rewards)  # values.shape == (buffer_size, buffer_num)
            # 分批次计算每个状态的价值估计
            for i in range(0, buffer_size, bs):
                for j in range(buffer_num):
                    values[i:i + bs, j] = self.cri(states[i:i + bs, j])

            # 计算优势值
            advantages = self.get_advantages(rewards, undones, values)  # shape == (buffer_size, buffer_num)
            # 计算奖励总和，奖励总和等于优势值加上价值估计
            reward_sums = advantages + values  # shape == (buffer_size, buffer_num)
            # 释放奖励、未完成标志和价值估计的内存
            del rewards, undones, values

            # 对优势值进行标准化处理
            advantages = (advantages - advantages.mean()) / (advantages.std(dim=0) + 1e-4)

            # 更新状态和回报的均值和标准差，用于归一化
            self.update_avg_std_for_normalization(
                states=states.reshape((-1, self.state_dim)),
                returns=reward_sums.reshape((-1,))
            )
        # assert logprobs.shape == advantages.shape == reward_sums.shape == (buffer_size, buffer_num)

        '''更新网络'''
        # 初始化价值网络的目标函数值总和
        obj_critics = 0.0
        # 初始化策略网络的目标函数值总和
        obj_actors = 0.0
        # 样本长度，减去最后一步
        sample_len = buffer_size - 1

        # 计算网络更新的次数
        update_times = int(buffer_size * self.repeat_times / self.batch_size)
        # 确保更新次数至少为1
        assert update_times >= 1
        # 循环更新网络
        for _ in range(update_times):
            # 随机生成批次大小的索引
            ids = torch.randint(sample_len * buffer_num, size=(self.batch_size,), requires_grad=False)
            # 计算索引在步数维度上的位置
            ids0 = torch.fmod(ids, sample_len)  # ids % sample_len
            # 计算索引在环境维度上的位置
            ids1 = torch.div(ids, sample_len, rounding_mode='floor')  # ids // sample_len

            # 根据索引获取对应的状态
            state = states[ids0, ids1]
            # 根据索引获取对应的动作
            action = actions[ids0, ids1]
            # 根据索引获取对应的对数概率
            logprob = logprobs[ids0, ids1]
            # 根据索引获取对应的优势值
            advantage = advantages[ids0, ids1]
            # 根据索引获取对应的奖励总和
            reward_sum = reward_sums[ids0, ids1]

            # 使用价值网络预测状态的价值估计
            value = self.cri(state)  # critic network predicts the reward_sum (Q value) of state
            # 计算价值网络的目标函数值
            obj_critic = self.criterion(value, reward_sum)
            # 更新价值网络的参数
            self.optimizer_update(self.cri_optimizer, obj_critic)

            # 计算新的对数概率和熵
            new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action)
            # 计算新旧策略的比率
            ratio = (new_logprob - logprob.detach()).exp()
            # 计算第一个替代目标函数值
            surrogate1 = advantage * ratio
            # 计算第二个替代目标函数值，对比率进行裁剪
            surrogate2 = advantage * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
            # 取两个替代目标函数值的最小值作为最终的替代目标函数值
            obj_surrogate = torch.min(surrogate1, surrogate2).mean()

            # 计算策略网络的目标函数值，加上熵正则化项
            obj_actor = obj_surrogate + obj_entropy.mean() * self.lambda_entropy
            # 更新策略网络的参数
            self.optimizer_update(self.act_optimizer, -obj_actor)

            # 累加价值网络的目标函数值
            obj_critics += obj_critic.item()
            # 累加策略网络的目标函数值
            obj_actors += obj_actor.item()
        # 获取策略网络动作标准差的对数的均值，如果不存在则为0
        a_std_log = self.act.action_std_log.mean() if hasattr(self.act, 'action_std_log') else torch.zeros(1)
        return obj_critics / update_times, obj_actors / update_times, a_std_log.item()

    def get_advantages_origin(self, rewards: Tensor, undones: Tensor, values: Tensor) -> Tensor:
        # 初始化优势值张量
        advantages = torch.empty_like(values)  # advantage value

        # 计算掩码，用于处理环境未完成的情况
        masks = undones * self.gamma
        # 获取奖励的步数
        horizon_len = rewards.shape[0]

        # 获取最后一个状态的价值估计
        next_value = self.cri(self.last_state).detach()

        # 初始化最后一个优势值，使用GAE方法
        advantage = torch.zeros_like(next_value)  # last advantage value by GAE (Generalized Advantage Estimate)
        # 从最后一步开始反向计算优势值
        for t in range(horizon_len - 1, -1, -1):
            # 计算下一个价值
            next_value = rewards[t] + masks[t] * next_value
            # 计算当前步的优势值
            advantages[t] = advantage = next_value - values[t] + masks[t] * self.lambda_gae_adv * advantage
            # 更新下一个价值为当前步的价值估计
            next_value = values[t]
        return advantages

    def get_advantages_vtrace(self, rewards: Tensor, undones: Tensor, values: Tensor) -> Tensor:
        # 初始化优势值张量
        advantages = torch.empty_like(values)  # advantage value

        # 计算掩码，用于处理环境未完成的情况
        masks = undones * self.gamma
        # 获取奖励的步数
        horizon_len = rewards.shape[0]

        # 初始化最后一个优势值，使用GAE方法
        advantage = torch.zeros_like(values[0])  # last advantage value by GAE (Generalized Advantage Estimate)
        # 从最后一步开始反向计算优势值
        for t in range(horizon_len - 1, -1, -1):
            # 计算当前步的优势值
            advantages[t] = rewards[t] - values[t] + masks[t] * advantage
            # 更新优势值
            advantage = values[t] + self.lambda_gae_adv * advantages[t]
        return advantages


class AgentDiscretePPO(AgentPPO):
    def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
        # 设置离散动作的策略网络类，默认为ActorDiscretePPO
        self.act_class = getattr(self, "act_class", ActorDiscretePPO)
        # 调用父类的初始化方法
        super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)

    def explore_one_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
        """
        通过智能体与单个环境实例的交互来收集轨迹。

        env: 强化学习训练环境，需要支持env.reset()和env.step()方法，应该是单个环境
        horizon_len: 探索时收集horizon_len步的数据用于更新网络
        return: `(states, actions, rewards, undones)` 用于离线策略
            env_num == 1
            states.shape == (horizon_len, env_num, state_dim)
            actions.shape == (horizon_len, env_num, action_dim)
            logprobs.shape == (horizon_len, env_num, action_dim)
            rewards.shape == (horizon_len, env_num)
            undones.shape == (horizon_len, env_num)
        """
        # 初始化状态张量，用于存储收集的状态数据
        states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
        # 初始化动作张量，离散动作的维度为1，使用int32类型
        actions = torch.zeros((horizon_len, self.num_envs, 1), dtype=torch.int32).to(self.device)  # only different
        # 初始化对数概率张量，用于存储动作的对数概率
        logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化奖励张量，用于存储收集的奖励数据
        rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化完成标志张量，用于存储环境是否完成的标志
        dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)

        # 获取上一个状态，单个环境的状态形状为 (1, state_dim)
        state = self.last_state  # shape == (1, state_dim) for a single env.

        # 获取策略网络的动作生成函数
        get_action = self.act.get_action
        # 获取策略网络的动作转换函数，用于将动作转换为环境可接受的格式
        convert = self.act.convert_action_for_env
        # 循环收集horizon_len步的数据
        for t in range(horizon_len):
            # 根据当前状态获取动作和对应的对数概率
            action, logprob = get_action(state)
            # 将当前状态存入状态张量
            states[t] = state

            # 将动作转换为整数
            int_action = convert(action).item()
            # 与环境交互，执行动作并获取下一个状态、奖励、完成标志等信息
            ary_state, reward, done, _ = env.step(int_action)  # next_state
            # 如果环境完成，重置环境，否则使用下一个状态
            state = torch.as_tensor(env.reset() if done else ary_state,
                                    dtype=torch.float32, device=self.device).unsqueeze(0)
            # 将动作存入动作张量
            actions[t] = action
            # 将对数概率存入对数概率张量
            logprobs[t] = logprob
            # 将奖励存入奖励张量
            rewards[t] = reward
            # 将完成标志存入完成标志张量
            dones[t] = done

        # 更新最后一个状态
        self.last_state = state

        # 对奖励进行缩放
        rewards *= self.reward_scale
        # 将完成标志转换为未完成标志，用1减去完成标志
        undones = 1.0 - dones.type(torch.float32)
        return states, actions, logprobs, rewards, undones

    def explore_vec_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
        """
        通过智能体与向量化环境实例的交互来收集轨迹。

        env: 强化学习训练环境，需要支持env.reset()和env.step()方法，应该是向量化环境
        horizon_len: 探索时收集horizon_len步的数据用于更新网络
        return: `(states, actions, rewards, undones)` 用于离线策略
            states.shape == (horizon_len, env_num, state_dim)
            actions.shape == (horizon_len, env_num, action_dim)
            logprobs.shape == (horizon_len, env_num, action_dim)
            rewards.shape == (horizon_len, env_num)
            undones.shape == (horizon_len, env_num)
        """
        # 初始化状态张量，用于存储收集的状态数据
        states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
        # 初始化动作张量，离散动作的维度为1
        actions = torch.zeros((horizon_len, self.num_envs, 1), dtype=torch.float32).to(self.device)
        # 初始化对数概率张量，用于存储动作的对数概率
        logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化奖励张量，用于存储收集的奖励数据
        rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
        # 初始化完成标志张量，用于存储环境是否完成的标志
        dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)

        # 获取上一个状态，向量化环境的状态形状为 (env_num, state_dim)
        state = self.last_state  # shape == (env_num, state_dim) for a vectorized env.

        # 获取策略网络的动作生成函数
        get_action = self.act.get_action
        # 获取策略网络的动作转换函数，用于将动作转换为环境可接受的格式
        convert = self.act.convert_action_for_env
        # 循环收集horizon_len步的数据
        for t in range(horizon_len):
            # 根据当前状态获取动作和对应的对数概率
            action, logprob = get_action(state)
            # 将当前状态存入状态张量
            states[t] = state

            # 与向量化环境交互，执行动作并获取下一个状态、奖励、完成标志等信息
            state, reward, done, _ = env.step(convert(action))  # next_state
            # 将动作存入动作张量
            actions[t] = action
            # 将对数概率存入对数概率张量
            logprobs[t] = logprob
            # 将奖励存入奖励张量
            rewards[t] = reward
            # 将完成标志存入完成标志张量
            dones[t] = done

        # 更新最后一个状态
        self.last_state = state

        # 对动作张量增加一个维度
        actions = actions.unsqueeze(2)
        # 对奖励进行缩放
        rewards *= self.reward_scale
        # 将完成标志转换为未完成标志，用1减去完成标志
        undones = 1.0 - dones.type(torch.float32)
        return states, actions, logprobs, rewards, undones
