#!/usr/bin/env python3
'''
基本完成适配，待调试

参考链接：
1. https://github.com/lutery/muzero-general.git

训练记录：
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import yaml
import pathlib
import sys
import copy
import os
import pickle

from tensorboardX import SummaryWriter

from lib import model, common

import ale_py

gym.register_envs(ale_py)


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.lr_init = params['lr_init']
        self.optim = params['optimizer']
        self.checkpoint = {
            "weights": None, # cpu_actor的权重
            "optimizer_state": None,
            "total_reward": 0,
            "muzero_reward": 0,
            "opponent_reward": 0,
            "episode_length": 0,
            "mean_value": 0,
            "training_step": 0,
            "lr": 0,
            "total_loss": 0,
            "value_loss": 0,
            "reward_loss": 0,
            "policy_loss": 0,
            "num_played_games": 0,
            "num_played_steps": 0,
            "num_reanalysed_games": 0,
            "terminate": False,
        }
        self.game_id = 0

        self.save_path = os.path.join("saves", "muzero-koolaid")
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-" + params['run_name'] + "-muzero")

        self.build_env()
        self.build_model()
        self.build_buffer()

    
    def build_buffer(self):
        self.share_stoarge = common.SharedStorage(self.checkpoint, self.params)
        self.agent = common.MuZeroAgent(self.params, self.model, self.env, self.device)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.agent, steps_count=1)
        self.replay_buffer = common.ReplayBuffer(self.exp_source, self.share_stoarge, self.params)
        if self.params['use_last_model_value']:
            self.reanalyse = common.Reanalyse(self.checkpoint, self.params)


    def build_env(self):
        self.env = common.wrap_dqn(params['env_name'])
        self.test_env = common.wrap_dqn(params['env_name'])  
        self.obs_shape = self.env.observation_space.shape
        self.params['action_space'] = list(range(self.env.action_space.n))


    def build_model(self):
        self.train_count = 0
        # self.cpu_actor = model.CPUActor(self.params).to(device=self.device)
        self.model = model.MuZeroNetwork(self.params).to(device=self.device)
        self.checkpoint['weights'] = self.model.get_weights()


        if self.optim == "SGD":
            self.optimizer = torch.optim.SGD(
                self.model.parameters(),
                lr=self.params['lr_init'],
                momentum=self.params['momentum'],
                weight_decay=self.params['weight_decay'],
            )
        elif self.optim == "Adam":
            self.optimizer = torch.optim.Adam(
                self.model.parameters(),
                lr=self.params['lr_init'],
                weight_decay=self.params['weight_decay'],
            )
        else:
            raise NotImplementedError(
                f"{self.params['optimizer']} is not implemented. You can change the optimizer manually in trainer.py."
            )
        
        # 初始化优化器后，添加学习率调度器
        def lr_lambda(step):
            return self.params['lr_decay_rate'] ** (step / self.params['lr_decay_steps'])
        
        self.scheduler = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer, 
            lr_lambda=lr_lambda
        )

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
            self.share_stoarge.import_checkpoint(checkpoint=checkpoint)
            self.model.load_state_dict(checkpoint['weights'])
            self.optimizer.load_state_dict(checkpoint['optimizer_state'])
            self.scheduler.load_state_dict(checkpoint['scheduler'])
            self.training_step = self.share_stoarge.get_info('training_step')

            print("加载模型成功")
            # 打印学习率
            for param_group in self.optimizer.param_groups:
                print("学习率：", param_group['lr'])

            
        # Persist replay buffer to disk
        replay_buffer_path = os.path.join(self.save_path, "replay_buffer.pkl")
        if os.path.exists(replay_buffer_path):
            with open(replay_buffer_path, "rb") as f:
                replay_buffer_infos = pickle.load(f)
            self.replay_buffer = replay_buffer_infos["buffer"]


    def save_model(self):
        # 保存模型
        self.share_stoarge.set_info(
            {
                "weights": copy.deepcopy(self.model.get_weights()),
                "optimizer_state": copy.deepcopy(
                    model.dict_to_cpu(self.optimizer.state_dict())
                ),
                "scheduler": self.scheduler.state_dict(),
            }
        )

        common.save_checkpoints(self.frame_idx, self.share_stoarge.dump_checkpoint(), self.save_path, "muzero", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")

        # Persist replay buffer to disk
        path = os.path.join(self.save_path, "replay_buffer.pkl")
        print(f"\n\nPersisting replay buffer games to disk at {path}")
        pickle.dump(
            {
                "buffer": self.replay_buffer,
            },
            open(path, "wb"),
        )


    def __train(self):
       # 获取训练的样本数
        index_batch, batch = self.replay_buffer.get_batch()
        # 下一个样本
        next_batch = self.replay_buffer.get_batch()
        self.scheduler.step()
        # 训练网络
        (
            priorities,
            total_loss,
            value_loss,
            reward_loss,
            policy_loss,
        ) = self.update_weights(batch)

        if self.params['PER']:
            # Save new priorities in the replay buffer (See https://arxiv.org/abs/1803.00933)
            # 更新经验回放池中的优先级
            self.replay_buffer.update_priorities.remote(priorities, index_batch)

        # 记录训练的步数、学习率和损失
        self.share_stoarge.set_info.remote(
            {
                "training_step": self.training_step,
                "lr": self.optimizer.param_groups[0]["lr"],
                "total_loss": total_loss,
                "value_loss": value_loss,
                "reward_loss": reward_loss,
                "policy_loss": policy_loss,
            }
        )


    def update_weights(self, batch):
        """
        Perform one training step.
        执行训练
        """

        (
            observation_batch,
            action_batch,
            target_value,
            target_reward,
            target_policy,
            weight_batch,
            gradient_scale_batch,
        ) = batch

        # Keep values as scalars for calculating the priorities for the prioritized replay
        target_value_scalar = np.array(target_value, dtype="float32")
        priorities = np.zeros_like(target_value_scalar)

        device = next(self.model.parameters()).device
        if self.params['PER']:
            weight_batch = torch.tensor(weight_batch.copy()).float().to(device)
        observation_batch = (
            torch.tensor(np.array(observation_batch)).float().to(device)
        )
        action_batch = torch.tensor(action_batch).long().to(device).unsqueeze(-1)
        target_value = torch.tensor(target_value).float().to(device)
        target_reward = torch.tensor(target_reward).float().to(device)
        target_policy = torch.tensor(target_policy).float().to(device)
        gradient_scale_batch = torch.tensor(gradient_scale_batch).float().to(device)
        # observation_batch: batch, channels, height, width
        # action_batch: batch, num_unroll_steps+1, 1 (unsqueeze)
        # target_value: batch, num_unroll_steps+1
        # target_reward: batch, num_unroll_steps+1
        # target_policy: batch, num_unroll_steps+1, len(action_space)
        # gradient_scale_batch: batch, num_unroll_steps+1
        
        # 将target_value和target_reward转换为支持度向量
        target_value = model.scalar_to_support(target_value, self.params['support_size'])
        target_reward = model.scalar_to_support(
            target_reward, self.params['support_size']
        )
        # target_value: batch, num_unroll_steps+1, 2*support_size+1
        # target_reward: batch, num_unroll_steps+1, 2*support_size+1

        ## Generate predictions 初始化推理
        value, reward, policy_logits, hidden_state = self.model.initial_inference(
            observation_batch
        )
        predictions = [(value, reward, policy_logits)]
        for i in range(1, action_batch.shape[1]):
            value, reward, policy_logits, hidden_state = self.model.recurrent_inference(
                hidden_state, action_batch[:, i]
            )
            # Scale the gradient at the start of the dynamics function (See paper appendix Training)
            # 通过将梯度乘以0.5来缩放梯度 
            # 这可能是为了防止梯度爆炸或加速收敛
            hidden_state.register_hook(lambda grad: grad * 0.5)
            # 记录预测的值、奖励和策略
            predictions.append((value, reward, policy_logits))
        # predictions: num_unroll_steps+1, 3, batch, 2*support_size+1 | 2*support_size+1 | 9 (according to the 2nd dim)

        ## Compute losses 这两个的作用是什么？
        value_loss, reward_loss, policy_loss = (0, 0, 0)
        value, reward, policy_logits = predictions[0]
        # Ignore reward loss for the first batch step
        # 将预测的奖励、值和策略与目标进行比较
        # 计算首个位置的预测损失
        current_value_loss, _, current_policy_loss = self.loss_function(
            value.squeeze(-1),
            reward.squeeze(-1),
            policy_logits,
            target_value[:, 0],
            target_reward[:, 0],
            target_policy[:, 0],
        )
        value_loss += current_value_loss
        policy_loss += current_policy_loss
        # Compute priorities for the prioritized replay (See paper appendix Training)
        # 将预测的第一个价值转换为标量
        pred_value_scalar = (
            model.support_to_scalar(value, self.params['support_size'])
            .detach()
            .cpu()
            .numpy()
            .squeeze()
        )
        # 将价值的差距作为优先级别，差距越大说明预测越差，越需要训练
        priorities[:, 0] = (
            np.abs(pred_value_scalar - target_value_scalar[:, 0])
            ** self.params['PER_alpha']
        )

        # 遍历每个位置的预测损失
        for i in range(1, len(predictions)):
            value, reward, policy_logits = predictions[i]
            (
                current_value_loss,
                current_reward_loss,
                current_policy_loss,
            ) = self.loss_function(
                value.squeeze(-1),
                reward.squeeze(-1),
                policy_logits,
                target_value[:, i],
                target_reward[:, i],
                target_policy[:, i],
            )

            # Scale gradient by the number of unroll steps (See paper appendix Training)
            # 缩放每个损失的梯度，防止梯度爆炸
            current_value_loss.register_hook(
                lambda grad: grad / gradient_scale_batch[:, i]
            )
            current_reward_loss.register_hook(
                lambda grad: grad / gradient_scale_batch[:, i]
            )
            current_policy_loss.register_hook(
                lambda grad: grad / gradient_scale_batch[:, i]
            )

            # 统计每个位置的损失之和
            value_loss += current_value_loss
            reward_loss += current_reward_loss
            policy_loss += current_policy_loss

            # Compute priorities for the prioritized replay (See paper appendix Training)
            # 将价值转换为标量
            pred_value_scalar = (
                model.support_to_scalar(value, self.params['support_size'])
                .detach()
                .cpu()
                .numpy()
                .squeeze()
            )
            # 统计该位置的价值差距用作优先级
            priorities[:, i] = (
                np.abs(pred_value_scalar - target_value_scalar[:, i])
                ** self.params['PER_alpha']
            )

        # Scale the value loss, paper recommends by 0.25 (See paper appendix Reanalyze)
        # 缩放价值损失，论文推荐缩放0.25
        loss = value_loss * self.params['value_loss_weight'] + reward_loss + policy_loss
        if self.params['PER']:
            # Correct PER bias by using importance-sampling (IS) weights
            # 这个应该是利用优先级提高难样本的训练
            loss *= weight_batch
        # Mean over batch dimension (pseudocode do a sum)
        # 计算损失的平均值，计算梯度，更新网络
        loss = loss.mean()

        # Optimize
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        self.training_step += 1

        return (
            priorities, # 返回样本优先级
            # For log purpose
            loss.item(), # 返回损失用于记录
            value_loss.mean().item(),
            reward_loss.mean().item(),
            policy_loss.mean().item(),
        )
    
    
    
    def train_model(self):
        with common.RewardTracker(self.writer, self.params['stop_reward']) as reward_tracker:
            while True:
                self.frame_idx += 1
                self.replay_buffer.populate(1)
                self.agent.set_frame_id(self.frame_idx)

                new_rewards = self.exp_source.pop_total_rewards()
                if new_rewards:
                    if reward_tracker.reward(new_rewards[0], self.frame_idx):
                        break


                if self.share_stoarge.get_info("num_played_games") < 1:
                    continue
                
                self.__train()
                if self.params['use_last_model_value']:
                    # 开启重分析的worker，使用最新的模型来分析状态的价值，使得价值更加准确
                    self.reanalyse.reanalyse(
                        self.replay_buffer, self.share_stoarge
                    )

                        # Save to the shared storage
                if self.train_count % self.params['checkpoint_interval'] == 0:
                    self.save_model()

                        
                if self.train_count % self.params['evaluation_interval'] == 0:
                    self.model.eval()
                    self.eval_model()
                    self.model.train()

    
    def play_game(
        self, temperature, temperature_threshold, render, opponent, muzero_player
    ):
        """
        Play one game with actions based on the Monte Carlo tree search at each moves.

        render：是否渲染采集样本时的画面
        opponent：对手，主要针对竞技游戏，类似下棋这种游戏的对手是p1 和 p2 切换的游戏
        """
        # 游戏样本存储缓冲区
        game_history = common.GameHistory()
        observation = self.test_env.reset()
        game_history.action_history.append(0)
        game_history.observation_history.append(observation)
        game_history.reward_history.append(0)
        game_history.to_play_history.append(self.game_id)

        done = False

        with torch.no_grad():
            # 没有结束、采集的样本小于max_moves（也就是游戏的步数）
            while (
                not done and len(game_history.action_history) <= self.params['max_moves']
            ):
                # 这里校验观察的shape，必须是像素空间
                assert (
                    len(np.array(observation).shape) == 3
                ), f"Observation should be 3 dimensionnal instead of {len(np.array(observation).shape)} dimensionnal. Got observation of shape: {np.array(observation).shape}"
                assert (
                    np.array(observation).shape == self.params['observation_shape']
                ), f"Observation should match the observation_shape defined in MuZeroConfig. Expected {self.params['observation_shape']} but got {np.array(observation).shape}."
                # 获取历史帧信息（包含观察和动作）和当前环境观察
                stacked_observations = game_history.get_stacked_observations(
                    -1, self.params['stacked_observations'], len(self.params['action_space'])
                )

                # 这里使用了MCTS 主要用于pvp的游戏
                # 利用模型对环境的学习，模拟N步的执行，从中找到最好奖励的动作，选择进行执行（有点像贪心算法）
                # 返回搜索的根节点和搜索树的信息
                root, mcts_info = common.MCTS(self.params).run(
                    self.model, # 模型
                    stacked_observations, # 历史帧堆叠
                    self.test_env.legal_actions(), # 返回所有的合法动作
                    self.game_id, # 当前的游戏玩家id
                    True, # 是否给动作增加噪音
                )

                # 根据搜索树得到下一个要执行的动作
                action = common.MuZeroAgent.select_action(
                    root,
                    temperature
                    if not temperature_threshold
                    or len(game_history.action_history) < temperature_threshold
                    else 0,
                )

                # 打印搜索树的信息
                if render:
                    print(f'Tree depth: {mcts_info["max_tree_depth"]}')
                    print(
                        f"Root value for player {self.game_id}: {root.value():.2f}"
                    )

                # 环境执行动作
                observation, reward, done = self.test_env.step(action)

                game_history.store_search_statistics(root, self.params['action_space'])

                # Next batch
                game_history.action_history.append(action)
                game_history.observation_history.append(observation)
                game_history.reward_history.append(reward)
                game_history.to_play_history.append(self.game.to_play())

        # 返回一轮游戏的样本记录
        return game_history


    def eval_model(self):
        with torch.no_grad():
            total_reward = 0.0
            total_q_val = 0.0
            results = []
            for _ in range(10):
                game_history = self.play_game(
                    temperature=0.0,
                    temperature_threshold=0,
                    render=False,
                    opponent=None,
                    muzero_player=None,
                )
                results.append(game_history)
        mean_reward = np.mean([sum(history.reward_history) for history in results])
        common.save_best_model(mean_reward, self.model.state_dict(), self.save_path, "muzero-best", keep_best=10)
        self.writer.add_scalar("test_reward", mean_reward)
        print(f"save best model, current test score: {mean_reward}")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    parser.add_argument("-n", "--name", default='pendulum', help="Name of the run")
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

     # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/config.yaml').read_text())
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)

    trainer = Trainer(params=params, device=device)
    trainer.load_model()
    trainer.train_model()

