import time
import numpy as np
from functools import reduce
import torch
from runner.shared.base_runner import Runner


# 将PyTorch张量转换为NumPy数组
def _t2n(x):
    return x.detach().cpu().numpy()


# 空战环境专用的训练运行器
class AIRCOMBATRunner(Runner):

    def __init__(self, config):
        """
        初始化空战训练运行器
        :param config: 包含训练配置的字典
        """
        super(AIRCOMBATRunner, self).__init__(config)

    def run2(self):
        """
        简化的运行方法，仅用于评估
        只运行一个episode进行评估，不进行训练
        """
        for episode in range(1):
            self.eval(episode)

    def run(self):
        """
        主训练循环
        实现完整的训练流程：热身、数据收集、模型更新、评估
        """
        self.warmup()  # 训练前热身，初始化环境

        start = time.time()
        # 计算总共要训练多少个episode
        # 总步数 / 每个episode长度 / 并行环境数 = 总episode数
        episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads

        # 初始化每个环境线程的胜场/总场数统计
        # 用于计算胜率变化
        last_battles_game = np.zeros(self.n_rollout_threads, dtype=np.float32)  # 上一轮的总游戏数
        last_battles_won = np.zeros(self.n_rollout_threads, dtype=np.float32)  # 上一轮的胜利数

        # 主训练循环：遍历每个episode
        for episode in range(episodes):
            # 每个episode内的步数循环
            for step in range(self.episode_length):
                # 收集智能体动作：获取价值估计、动作、动作概率
                values, actions, action_log_probs = self.collect(step)

                # 环境交互：执行动作，获得奖励和新的观测
                obs, rewards, dones, infos, available_actions = self.envs.step(actions)

                # 打包数据：将所有收集的数据打包成元组
                data = obs, rewards, dones, infos, available_actions, \
                    values, actions, action_log_probs

                # 将数据插入经验回放缓冲区
                self.insert(data)

            # 计算回报并更新网络
            self.compute()  # 计算价值函数和优势函数
            train_infos = self.train()  # 执行策略更新，返回训练信息

            # 后处理：记录日志、保存模型、评估等
            total_num_steps = (episode + 1) * self.episode_length

            # 定期保存模型
            if episode % self.save_interval == 0 or episode == episodes - 1:
                self.save(episode)

            # 定期记录训练信息
            if episode % self.log_interval == 0:
                end = time.time()
                # 打印训练进度信息
                print("\n updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
                      .format(episode,  # 当前episode
                              episodes,  # 总episode数
                              total_num_steps,  # 当前总步数
                              self.num_env_steps,  # 目标总步数
                              int(total_num_steps / (end - start))))  # 每秒帧数

                # 统计胜率信息
                battles_won = []  # 当前总胜利数
                battles_game = []  # 当前总游戏数
                incre_battles_won = []  # 增量胜利数
                incre_battles_game = []  # 增量游戏数

                # 从环境信息中提取胜率数据
                for i, info in enumerate(infos):
                    if 'battles_won' in info[0].keys():
                        battles_won.append(info[0]['battles_won'])
                        incre_battles_won.append(info[0]['battles_won'] - last_battles_won[i])
                    if 'battles_game' in info[0].keys():
                        battles_game.append(info[0]['battles_game'])
                        incre_battles_game.append(info[0]['battles_game'] - last_battles_game[i])

                # 计算增量胜率
                incre_win_rate = np.sum(incre_battles_won) / np.sum(incre_battles_game) if np.sum(
                    incre_battles_game) > 0 else 0.0
                print("incre win rate is {}.".format(incre_win_rate))

                # 记录胜率到日志系统
                self.writter.add_scalars("incre_win_rate", {"incre_win_rate": incre_win_rate}, total_num_steps)

                # 更新上一轮的统计数据
                last_battles_game = battles_game
                last_battles_won = battles_won

                # 计算死亡率（用于监控训练状态）
                train_infos['dead_ratio'] = 1 - self.buffer.active_masks.sum() / reduce(lambda x, y: x * y, list(
                    self.buffer.active_masks.shape))

                # 记录训练信息
                self.log_train(train_infos, total_num_steps)

            # 定期评估模型性能
            if episode % self.eval_interval == 0 and self.use_eval:
                self.eval(total_num_steps)

    def warmup(self):
        """
        训练前热身
        在训练开始前，重置环境并初始化经验回放缓冲区
        """
        # 重置环境，获得初始观测
        # 获得初始观测（obs）、每个智能体的可用动作（available_actions）
        obs, available_actions = self.envs.reset()

        # 将初始数据存储到缓冲区
        self.buffer.obs[0] = obs.copy()  # 局部观测
        self.buffer.available_actions[0] = available_actions.copy()  # 可用动作

    @torch.no_grad()  # 禁用梯度计算，提高推理效率
    def collect(self, step):
        """
        收集训练数据
        在训练过程中收集智能体的动作、价值估计
        """
        self.trainer.prep_rollout()  # 准备策略网络为推理模式

        # 获取智能体动作：调用策略网络获取动作和价值估计
        # 保持多智能体的维度结构
        # 获取观测和可用动作
        obs_batch = self.buffer.obs[step]  # 形状: (n_rollout_threads, num_agents, 36)
        available_actions_batch = self.buffer.available_actions[step]  # 形状: (n_rollout_threads, num_agents, 7)
        
        # 转换为PyTorch张量
        obs_tensor = torch.FloatTensor(obs_batch).to(self.device)
        available_actions_tensor = torch.FloatTensor(available_actions_batch).to(self.device)
        
        # MAT模型只返回三个值：value, action, action_log_prob
        value, action, action_log_prob = self.trainer.policy.get_actions(
            obs_tensor,  # 多智能体联合观测
            available_actions_tensor  # 可用动作
        )

        # 将批量数据分割为每个环境的独立数组
        # [self.envs, agents, dim] 格式
        values = np.array(np.split(_t2n(value), self.n_rollout_threads))  # 价值估计
        actions = np.array(np.split(_t2n(action), self.n_rollout_threads))  # 选择的动作
        action_log_probs = np.array(np.split(_t2n(action_log_prob), self.n_rollout_threads))  # 动作的对数概率

        # 返回收集的数据
        return values, actions, action_log_probs

    def insert(self, data):
        """
        将收集的数据插入经验回放缓冲区
        :param data: 包含所有训练数据的元组
        """
        # 解包数据
        obs, rewards, dones, infos, available_actions, \
            values, actions, action_log_probs = data

        # 处理数据形状
        dones = np.array(dones).astype(bool)  # 确保是布尔数组
        dones = dones.squeeze()  # 移除多余的维度，变成(n_agents,)
        
        rewards = np.array(rewards)  # 确保是numpy数组
        rewards = rewards.squeeze()  # 移除多余的维度
        rewards = rewards.reshape(self.n_rollout_threads, self.num_agents, 1)  # 重塑为(n_rollout_threads, n_agents, 1)
        
        # 创建掩码：用于标记哪些时间步是有效的
        masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
        if np.all(dones):  # 如果所有智能体都结束了
            masks[:] = 0.0

        # 创建活跃掩码：用于标记哪些智能体是活跃的
        active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
        for i in range(self.num_agents):
            if dones[i]:  # 如果智能体死亡
                active_masks[:, i] = 0.0
        if np.all(dones):  # 如果环境结束
            active_masks[:] = 1.0

        # 创建坏转移掩码：用于标记哪些转移是异常的
        # 在我们的环境中，所有转移都是有效的
        bad_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)

        # 将所有数据插入缓冲区
        self.buffer.insert(obs, actions, action_log_probs, values,
                           rewards, masks, bad_masks, active_masks,
                           available_actions)

    def log_train(self, train_infos, total_num_steps):
        """
        记录训练日志
        :param train_infos: 训练信息字典
        :param total_num_steps: 总训练步数
        """
        # 添加平均步奖励到训练信息中
        train_infos["average_step_rewards"] = np.mean(self.buffer.rewards)

        # 记录所有训练信息到日志系统
        for k, v in train_infos.items():
            self.writter.add_scalars(k, {k: v}, total_num_steps)

    @torch.no_grad()  # 禁用梯度计算，提高评估效率
    def eval(self, total_num_steps):
        """
        模型评估
        在评估环境中测试模型性能，计算胜率等指标
        :param total_num_steps: 总训练步数
        """
        eval_battles_won = 0  # 评估中胜利的战斗数
        eval_episode = 0  # 评估的episode数

        eval_episode_rewards = []  # 所有episode的奖励
        one_episode_rewards = []  # 当前episode的奖励

        # 重置评估环境
        eval_obs, eval_available_actions = self.eval_envs.reset()

        # 评估循环
        while True:
            self.trainer.prep_rollout()  # 准备策略网络为推理模式

            # 获取评估动作（使用确定性策略）
            eval_actions = \
                self.trainer.policy.act(np.concatenate(eval_obs),
                                        np.concatenate(eval_available_actions),
                                        deterministic=True)  # 使用确定性策略

            # 分割数据为每个环境的独立数组
            eval_actions = np.array(np.split(_t2n(eval_actions), self.n_eval_rollout_threads))

            # 环境交互：执行动作，获得奖励和新的观测
            eval_obs, eval_rewards, eval_dones, eval_infos, eval_available_actions = self.eval_envs.step(
                eval_actions)
            one_episode_rewards.append(eval_rewards)

            # 处理环境结束
            eval_dones_env = np.all(eval_dones, axis=1)

            # 更新掩码
            eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)
            eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1),
                                                          dtype=np.float32)

            # 统计每个环境的episode结果
            for eval_i in range(self.n_eval_rollout_threads):
                if eval_dones_env[eval_i]:  # 如果环境结束
                    eval_episode += 1
                    eval_episode_rewards.append(np.sum(one_episode_rewards, axis=0))  # 累加episode奖励
                    one_episode_rewards = []  # 重置当前episode奖励
                    if eval_infos[eval_i][0]['won']:  # 如果获胜
                        eval_battles_won += 1

            # 达到评估episode数量后结束评估
            if eval_episode >= self.all_args.eval_episodes:
                # 计算评估结果
                eval_episode_rewards = np.array(eval_episode_rewards)
                eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards}
                self.log_env(eval_env_infos, total_num_steps)  # 记录环境信息

                # 计算评估胜率
                eval_win_rate = eval_battles_won / eval_episode
                print("eval win rate is {}.".format(eval_win_rate))

                # 记录评估胜率到日志系统
                self.writter.add_scalars("eval_win_rate", {"eval_win_rate": eval_win_rate}, total_num_steps)
                break
