import torch
import numpy as np


def _flatten(T, N, x):
    """
    将数据展平为 (T*N, *x.shape[2:]) 的形状
    :param T: 时间步数
    :param N: 智能体数量
    :param x: 输入数据
    :return: 展平后的数据
    """
    return x.reshape(T * N, *x.shape[2:])


def _cast(x):
    """
    重新排列数据维度并展平
    将 (T, N, L, D) 转换为 (T*N*L, D) 的形状
    :param x: 输入数据
    :return: 重新排列后的数据
    """
    return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])


def _shuffle_agent_grid(x, y):
    """
    创建智能体网格的行列索引，用于数据重排
    :param x: 行数
    :param y: 列数
    :return: 行索引和列索引
    """
    rows = np.indices((x, y))[0]  # 创建行索引
    cols = np.stack([np.arange(y) for _ in range(x)])  # 创建列索引
    return rows, cols


class SharedReplayBuffer(object):
    """
    用于存储训练数据的共享经验回放缓冲区
    负责存储和管理多智能体强化学习训练过程中的所有数据
    
    :param args: (argparse.Namespace) 包含相关模型、策略和环境信息的参数
    :param num_agents: (int) 环境中智能体的数量
    :param obs_space: (gym.Space) 智能体的观察空间
    :param act_space: (gym.Space) 智能体的动作空间
    """

    def __init__(self, args, num_agents, obs_shape, act_shape):
        """
        初始化共享经验回放缓冲区
        """
        # 基本参数设置
        self.episode_length = args.episode_length  # 每个episode的长度
        self.n_rollout_threads = args.n_rollout_threads  # 并行环境数量
        self.gamma = args.gamma  # 折扣因子
        self.gae_lambda = args.gae_lambda  # GAE的lambda参数
        self.num_agents = num_agents  # 智能体数量

        # 获取观察空间和动作空间的形状
        self.obs_shape = obs_shape  # 直接使用观测形状
        self.act_shape = act_shape  # 直接使用动作数量

        # 初始化存储数组
        # 局部观察：存储每个智能体的局部观测信息
        self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)

        # 价值预测和回报存储
        self.value_preds = np.zeros(
            (self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)  # 价值函数预测
        self.returns = np.zeros_like(self.value_preds)  # 计算得到的回报
        self.advantages = np.zeros(
            (self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)  # 优势函数

        # 对于离散动作空间，存储每个智能体可用的动作掩码
        self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_shape),
                                         dtype=np.float32)

        # 动作相关存储
        self.actions = np.zeros(
            (self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)  # 智能体采取的动作
        self.action_log_probs = np.zeros(
            (self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)  # 动作的对数概率
        self.rewards = np.zeros(
            (self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)  # 每步获得的奖励

        # 掩码存储
        self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1),
                             dtype=np.float32)  # 环境终止掩码
        self.bad_masks = np.ones_like(self.masks)  # 异常终止掩码
        self.active_masks = np.ones_like(self.masks)  # 智能体活跃掩码

        # 当前步数
        self.step = 0

    def insert(self, obs, actions, action_log_probs,
               value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
        """
        将数据插入缓冲区
        :param obs: (np.ndarray) 局部智能体观察
        :param actions: (np.ndarray) 智能体采取的动作
        :param action_log_probs: (np.ndarray) 智能体采取动作的对数概率
        :param value_preds: (np.ndarray) 每步的价值函数预测
        :param rewards: (np.ndarray) 每步收集的奖励
        :param masks: (np.ndarray) 表示环境是否终止
        :param bad_masks: (np.ndarray) 表示是否因为时间限制而终止
        :param active_masks: (np.ndarray) 表示智能体在环境中是否活跃或死亡
        :param available_actions: (np.ndarray) 每个智能体可用的动作。如果为None，则所有动作都可用
        """
        # 将数据存储到对应的数组中
        self.obs[self.step + 1] = obs.copy()  # 观测存储到下一步（因为obs[0]用于初始化）
        self.actions[self.step] = actions.copy()  # 动作存储到当前步
        self.action_log_probs[self.step] = action_log_probs.copy()  # 动作对数概率
        self.value_preds[self.step] = value_preds.copy()  # 价值预测
        self.rewards[self.step] = rewards.copy()  # 奖励
        self.masks[self.step + 1] = masks.copy()  # 掩码存储到下一步

        # 可选参数的存储
        if bad_masks is not None:
            self.bad_masks[self.step + 1] = bad_masks.copy()  # 异常终止掩码
        if active_masks is not None:
            self.active_masks[self.step + 1] = active_masks.copy()  # 活跃掩码
        if available_actions is not None:
            self.available_actions[self.step + 1] = available_actions.copy()  # 可用动作

        # 更新步数，使用模运算实现循环缓冲区
        self.step = (self.step + 1) % self.episode_length

    def after_update(self):
        """
        将最后一个时间步的数据复制到第一个索引
        在模型更新后调用，为下一个episode做准备
        """
        self.obs[0] = self.obs[-1].copy()  # 将最后一步的观测复制到第一步
        self.masks[0] = self.masks[-1].copy()  # 掩码
        self.bad_masks[0] = self.bad_masks[-1].copy()  # 异常终止掩码
        self.active_masks[0] = self.active_masks[-1].copy()  # 活跃掩码
        if self.available_actions is not None:
            self.available_actions[0] = self.available_actions[-1].copy()  # 可用动作

    def compute_returns(self, next_value, value_normalizer=None):
        """
        计算回报，可以是折扣奖励和，也可以使用GAE（Generalized Advantage Estimation）
        
        :param next_value: (np.ndarray) 最后一个episode步骤之后的价值预测
        :param value_normalizer: (PopArt) 如果不为None，则为PopArt价值归一化器实例
        """
        # 将最后一步的价值预测存储
        self.value_preds[-1] = next_value
        gae = 0  # 初始化GAE

        # 从后往前计算GAE和回报
        for step in reversed(range(self.rewards.shape[0])):
            # 使用价值归一化计算delta
            delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
                self.value_preds[step + 1]) * self.masks[step + 1] \
                    - value_normalizer.denormalize(self.value_preds[step])
            gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae

            self.advantages[step] = gae  # 存储优势函数
            self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])  # 存储回报

    def feed_forward_generator_transformer(self, advantages, num_mini_batch=None, mini_batch_size=None):
        """
        为Transformer模型生成训练数据
        将存储的数据重新组织成适合Transformer训练的批次
        
        :param advantages: (np.ndarray) 优势估计
        :param num_mini_batch: (int) 将批次分成的小批次数量
        :param mini_batch_size: (int) 每个小批次中的样本数量
        :yield: 训练批次数据
        """
        # 获取数据维度
        episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
        batch_size = n_rollout_threads * episode_length

        # 计算小批次大小
        if mini_batch_size is None:
            assert batch_size >= num_mini_batch, (
                "PPO requires the number of processes ({}) "
                "* number of steps ({}) = {} "
                "to be greater than or equal to the number of PPO mini batches ({})."
                "".format(n_rollout_threads, episode_length,
                          n_rollout_threads * episode_length,
                          num_mini_batch))
            mini_batch_size = batch_size // num_mini_batch

        # 随机打乱数据
        rand = torch.randperm(batch_size).numpy()  # 生成随机排列
        sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]  # 分割成小批次
        rows, cols = _shuffle_agent_grid(batch_size, num_agents)  # 创建智能体网格索引

        # 局部观测数据
        obs = self.obs[:-1].reshape(-1, *self.obs.shape[2:])
        obs = obs[rows, cols]

        # 动作相关数据
        actions = self.actions.reshape(-1, *self.actions.shape[2:])
        actions = actions[rows, cols]

        # 可用动作数据
        available_actions = None  # 初始化变量
        if self.available_actions is not None:
            available_actions = self.available_actions[:-1].reshape(-1, *self.available_actions.shape[2:])
            available_actions = available_actions[rows, cols]

        # 价值相关数据
        value_preds = self.value_preds[:-1].reshape(-1, *self.value_preds.shape[2:])
        value_preds = value_preds[rows, cols]
        returns = self.returns[:-1].reshape(-1, *self.returns.shape[2:])
        returns = returns[rows, cols]

        # 掩码数据
        masks = self.masks[:-1].reshape(-1, *self.masks.shape[2:])
        masks = masks[rows, cols]
        active_masks = self.active_masks[:-1].reshape(-1, *self.active_masks.shape[2:])
        active_masks = active_masks[rows, cols]

        # 动作对数概率
        action_log_probs = self.action_log_probs.reshape(-1, *self.action_log_probs.shape[2:])
        action_log_probs = action_log_probs[rows, cols]

        # 优势函数
        advantages = advantages.reshape(-1, *advantages.shape[2:])
        advantages = advantages[rows, cols]

        # 生成训练批次
        for indices in sampler:
            # 将数据重新组织成批次格式
            # [L,T,N,Dim]-->[L*T,N,Dim]-->[index,N,Dim]-->[index*N, Dim]
            obs_batch = obs[indices].reshape(-1, *obs.shape[2:])
            actions_batch = actions[indices].reshape(-1, *actions.shape[2:])

            # 处理可用动作
            if available_actions is not None:
                available_actions_batch = available_actions[indices].reshape(-1, *available_actions.shape[2:])
            else:
                available_actions_batch = None

            # 价值相关批次
            value_preds_batch = value_preds[indices].reshape(-1, *value_preds.shape[2:])
            return_batch = returns[indices].reshape(-1, *returns.shape[2:])

            # 掩码批次
            masks_batch = masks[indices].reshape(-1, *masks.shape[2:])
            active_masks_batch = active_masks[indices].reshape(-1, *active_masks.shape[2:])

            # 动作对数概率批次
            old_action_log_probs_batch = action_log_probs[indices].reshape(-1, *action_log_probs.shape[2:])

            # 优势函数批次
            if advantages is None:
                adv_targ = None
            else:
                adv_targ = advantages[indices].reshape(-1, *advantages.shape[2:])

            # 返回所有训练数据
            yield obs_batch, actions_batch, \
                value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \
                adv_targ, available_actions_batch
