import os.path
import copy
from typing import Any
import numpy as np
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as td
import ptan
import gymnasium as gym
from gymnasium import spaces
import logging
from logging.handlers import RotatingFileHandler
from collections import namedtuple
import kornia.augmentation as aug
import cv2
from PIL import Image
import lib.model as model
import numpy



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


class MuZeroAgent(ptan.agent.BaseAgent):

    def __init__(self, params, net, env, device='cpu'):
        super().__init__()

        self.game_history = None
        self.params = params
        self.net = net
        self.device = device
        self.env = env
        self.game_id = 0
        self.frame_id = 0
        self.temperature_threshold = self.params['temperature_threshold']


    def set_frame_id(self, frame_id):
        """
        Set the current frame id
        :param frame_id: Current frame id
        """
        self.frame_id = frame_id

    
    def initial_state(self):
        """
        Should create initial empty state for the agent. It will be called for the start of the episode
        :return: Anything agent want to remember
        """
        game_history = GameHistory()
        self.game_history.action_history.append(0)
        self.game_history.reward_history.append(0)
        self.game_history.to_play_history.append(self.game_id)

        return (game_history, self.game_id)

    def __call__(self, states, agent_states):
        """
        Convert observations and states into actions to take
        :param states: list of environment states to process
        :param agent_states: list of states with the same length as observations
        :return: tuple of actions, states
        """
        stacked_observations = []
        for state, agent_state in zip(states, agent_states):
            if len(agent_state[0].root_values) == 0:
                agent_state[0].observation_history.append(state)

            stacked_observations.append(
                agent_state[0].get_stacked_observations(
            -1, self.params['stacked_observations'], len(self.params['action_space'])
            ))


        stacked_observations = torch.tensor(np.array(stacked_observations)).to(device=self.device)
        # 这里使用了MCTS 主要用于pvp的游戏
        # 利用模型对环境的学习，模拟N步的执行，从中找到最好奖励的动作，选择进行执行（有点像贪心算法）
        # 返回搜索的根节点和搜索树的信息
        root, mcts_info = MCTS(self.params).run(
            self.net, # 模型
            stacked_observations, # 历史帧堆叠
            self.env.legal_actions(), # 返回所有的合法动作
            self.game_id, # 当前的游戏玩家id
            True, # 是否给动作增加噪音
        )

        # 根据搜索树得到下一个要执行的动作
        actions = []
        temperatuer = self.visit_softmax_temperature_fn(
                        trained_steps=self.frame_id
                    )
        for id, agent_state in enumerate(agent_states):
            action = self.select_action(
                root,
                temperatuer
                if not self.temperature_threshold
                or len(agent_state[0].action_history) < self.temperature_threshold
                else 0,
            )
            agent_state[0].action_history.append(action)
            actions.append(action.cpu().numpy())

        return actions, agent_states
    

    def visit_softmax_temperature_fn(self, trained_steps):
        """
        Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
        The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.

        Returns:
            Positive float.
        """
        if trained_steps < 500e3:
            return 1.0
        elif trained_steps < 750e3:
            return 0.5
        else:
            return 0.25
    
    
    @staticmethod
    def select_action(node, temperature):
        """
        Select action according to the visit count distribution and the temperature.
        The temperature is changed dynamically with the visit_softmax_temperature function
        in the config.

        node: 搜索树的起始节点
        temperature: 温度参数，控制动作选择的随机性
        """
        # 获取当前节点所有子节点被访问的次数
        visit_counts = numpy.array(
            [child.visit_count for child in node.children.values()], dtype="int32"
        )
        # 或者当前节点的所有子节点的动作
        actions = [action for action in node.children.keys()]
        if temperature == 0:
            # 如果温度为0，则选择访问次数最多的动作
            action = actions[numpy.argmax(visit_counts)]
        elif temperature == float("inf"):
            # 如果温度为无穷大，则随机选择一个动作
            action = numpy.random.choice(actions)
        else:
            # See paper appendix Data Generation
            # 根据被访问的次数计算动作的概率分布选择动作
            visit_count_distribution = visit_counts ** (1 / temperature)
            visit_count_distribution = visit_count_distribution / sum(
                visit_count_distribution
            )
            action = numpy.random.choice(actions, p=visit_count_distribution)

        return action


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


import collections

class FrameStackZero(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStackZero, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.channels = shp[0]
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    
    def reset_frames(self):
        for _ in range(self.k):
            self.frames.append(np.zeros((self.channels,) + self.observation_space.shape[1:3], dtype=np.float32))


    def reset(self, **kwargs):
        self.reset_frames()
        obs, info = self.env.reset(**kwargs)
        self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)



class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmethod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[25:185, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward > 0:
            reward /= 100.0 * 8
        # # 处理生命减少时的惩罚
        # current_lives = info.get('lives', self.previous_lives)
        # if current_lives < self.previous_lives:
        #     reward = self.life_loss_penalty
        #     self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    


def wrap_dqn(env, episodic_life=False, stack_frames = 4, reward_clipping=True, gray = False):
    env = gym.make(env, frameskip=4, repeat_action_probability=0.25)
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # todo 对比不同的分辨率、颜色的图像，因为理论上都可以
    env = ProcessFrame(env, gray=gray, size=96)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = RewardPenaltyWrapper(env)
    return env


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss


class GameHistory:
    """
    Store only usefull information of a self-play game.
    记录一个生命周期内的游戏执行的状态信息，连续存储
    """

    def __init__(self):
        self.observation_history = [] # 环境观察
        self.action_history = [] # 执行的动作
        self.reward_history = [] # 奖励
        self.to_play_history = [] # 记录玩家的id
        self.child_visits = [] # 记录一轮游戏中，每次状态的子节点的访问次数占比
        self.root_values = [] # 记录一轮游戏中的每次执行的动作的价值
        self.reanalysed_predicted_root_values = None # 记录一个样本周期内每个观察的预测价值总和，这里是使用最新的模型对整体的价值进行重新估计，提高更加准确的价值估计
        # For PER 训练数据的优先级
        self.priorities = None # todo
        self.game_priority = None # todo

    def store_search_statistics(self, root, action_space):
        # Turn visit count from root into a policy
        # 存储游戏过程中的搜索树的统计信息
        # root：表示搜索树的根节点
        # action_space：表示所有的动作空间
        # 这里的action_space是一个离散动作空间
        if root is not None:
            # 统计所有子节点的访问次数
            sum_visits = sum(child.visit_count for child in root.children.values())
            # 统计所有动作的访问次数在所有动作中的占比，如果不在子节点中则为0
            # todo 作用
            self.child_visits.append(
                [
                    root.children[a].visit_count / sum_visits
                    if a in root.children
                    else 0
                    for a in action_space
                ]
            )

            # 
            self.root_values.append(root.value())
        else:
            self.root_values.append(None)

    def get_stacked_observations(
        self, index, num_stacked_observations, action_space_size
    ):
        """
        Generate a new observation with the observation at the index position
        and num_stacked_observations past observations and actions stacked.

        index： 我看到有传入-1
        num_stacked_observations：需要堆叠的历史帧长度num_stacked_observations
        action_space_size：动作空间的维度
        """
        # Convert to positive index 
        # 如果传入-1，则表示获取缓冲区最后一个数据
        index = index % len(self.observation_history)
        
        # 获取最后一个观察obs
        stacked_observations = self.observation_history[index].copy()
        for past_observation_index in reversed(
            range(index - num_stacked_observations, index)
        ):
            if 0 <= past_observation_index:
                # self.observation_history[past_observation_index： 获取对应索引的观察数据
                # self.action_history[past_observation_index + 1]：获取当前观察下执行的动作
                # 将观察和观察下执行的动作合并起来作为历史帧信息
                # / action_space_size是将动作值归一化道0/1之间，标准化
                previous_observation = numpy.concatenate(
                    (
                        self.observation_history[past_observation_index],
                        [
                            numpy.ones_like(stacked_observations[0])
                            * self.action_history[past_observation_index + 1]
                            / action_space_size
                        ],
                    )
                )
            else:
                # 如果不足stacked_observations长度的帧信息则已0填充
                previous_observation = numpy.concatenate(
                    (
                        numpy.zeros_like(self.observation_history[index]),
                        [numpy.zeros_like(stacked_observations[0])],
                    )
                )

            # 将历史帧信息填充到缓冲区
            stacked_observations = numpy.concatenate(
                (stacked_observations, previous_observation)
            )

        # 返回对接好的历史帧和当前帧信息 todo 查看这里的shape
        return stacked_observations


class MinMaxStats:
    """
    A class that holds the min-max values of the tree.
    """

    def __init__(self):
        self.maximum = -float("inf")
        self.minimum = float("inf")

    def update(self, value):
        self.maximum = max(self.maximum, value)
        self.minimum = min(self.minimum, value)

    def normalize(self, value):
        if self.maximum > self.minimum:
            # We normalize only when we have set the maximum and minimum values
            return (value - self.minimum) / (self.maximum - self.minimum)
        return value


class ReplayBuffer:
    """
    Class which run in a dedicated thread to store played games and generate batch.
    """

    def __init__(self, experience_source, share_storage, params):
        self.params = params
        self.buffer = {}
        self.share_storage = share_storage
        self.experience_source_iter = None if experience_source is None else iter(experience_source)
        self.num_played_games = 0 # 记录游戏的数量
        self.num_played_steps = 0 # 记录所有游戏记录的步数总和
        # total_samples 有多少个样本已经在 buffer 中，初始化时为0
        self.total_samples = sum(
            [len(game_history.root_values) for game_history in self.buffer.values()]
        )
        if self.total_samples != 0:
            print(
                f"Replay buffer initialized with {self.total_samples} samples ({self.num_played_games} games).\n"
            )

         # 游戏样本存储缓冲区
        self.game_history = None

    
    def _add(self, sample):
        if self.game_history is None:
            self.game_history = GameHistory()
            observation = sample[0]
            self.game_history.action_history.append(0)
            self.game_history.observation_history.append(observation)
            self.game_history.reward_history.append(0)
        else:
            self.game_history.observation_history.append(sample[0])
        
        self.game_history.action_history.append(sample[1])
        self.game_history.reward_history.append(sample[1])
        self.game_history.to_play_history.append(sample[5][1])


        if sample[3]:
            self.save_game(self.game_history, self.share_storage)
            self.game_history = None

        
    def populate(self, samples):
        """
        Populates samples into the buffer 提取样本到重放缓存区中
        :param samples: how many samples to populate  从样本池中提取多少个样本到缓冲区
        
        算法的原理及利用迭代器根据数量，从经验池中获取数据
        """
        for _ in range(samples):
            entry = next(self.experience_source_iter)
            self._add(entry)


    def save_game(self, game_history, shared_storage=None):
        '''
        game_histroy: 一轮游戏的所有数据
        '''
        if self.params['PER']:
            if game_history.priorities is not None:
                # Avoid read only array when loading replay buffer from disk
                # todo 避免加载为只读数组
                # 已有优先级，则直接复制
                game_history.priorities = numpy.copy(game_history.priorities)
            else:
                # Initial priorities for the prioritized replay (See paper appendix Training)
                # 没有优先级，则计算优先级
                # todo 后续再看
                priorities = []
                for i, root_value in enumerate(game_history.root_values):
                    priority = (
                        numpy.abs(
                            root_value - self.compute_target_value(game_history, i)
                        )
                        ** self.params['PER_alpha']
                    )
                    priorities.append(priority)

                game_history.priorities = numpy.array(priorities, dtype="float32")
                game_history.game_priority = numpy.max(game_history.priorities)

        self.buffer[self.num_played_games] = game_history
        self.num_played_games += 1 # 记录游戏周期的数量
        self.num_played_steps += len(game_history.root_values) # 记录游戏的步数
        self.total_samples += len(game_history.root_values) # 记录所有游戏的样本数总和

        # 限制重放缓冲区的长度，限制为 replay_buffer_size
        if self.params['replay_buffer_size'] < len(self.buffer):
            del_id = self.num_played_games - len(self.buffer)
            self.total_samples -= len(self.buffer[del_id].root_values)
            del self.buffer[del_id]

        if shared_storage:
            # 更新已有的游戏周期个数以及总样本的个数
            shared_storage.set_info.remote("num_played_games", self.num_played_games)
            shared_storage.set_info.remote("num_played_steps", self.num_played_steps)

    def get_buffer(self):
        return self.buffer

    def get_batch(self):
        '''
        获取游戏训练的batch
        '''
        (
            index_batch, # 每个采样的游戏id和采样位置
            observation_batch, # 每个采样位置开始的观察帧堆叠
            action_batch, # 采样位置开始时到结束位置的执行动作list
            reward_batch, # 采样位置开始时到结束位置的获取奖励list
            value_batch, # 采样位置开始时到结束位置的动作价值list
            policy_batch, # 采样位置开始时到结束位置的每个位置的动作概率分布（依据访问次数确定）
            gradient_scale_batch, # # 计算梯度缩放因子
        ) = ([], [], [], [], [], [], [])
        # 这里的 weight_batch 是用来做优先级经验回放的
        weight_batch = [] if self.params['PER'] else None

        # game_id: 游戏的 id
        # game_history: 每个游戏周期的所有样本数据
        # game_prob: 游戏样本的优先级，对于没有优先级的游戏，值为 None
        for game_id, game_history, game_prob in self.sample_n_games(
            self.params['batch_size']
        ):
            # 对应游戏生命周期采样的位置，以及对应位置的优先级
            game_pos, pos_prob = self.sample_position(game_history)
            
            # 返回以game_pos为起点的连续样本数据
            values, rewards, policies, actions = self.make_target(
                game_history, game_pos
            )
            
            # 记录该样本的游戏id、样本位置
            index_batch.append([game_id, game_pos])
            # 获取对应位置的且进行了历史帧堆叠的观察，这边就不是一个列表了，而是一个向量包含历史帧信息
            observation_batch.append(
                game_history.get_stacked_observations(
                    game_pos,
                    self.params['stacked_observations'],
                    len(self.params['action_space']),
                )
            )
            action_batch.append(actions)
            value_batch.append(values)
            reward_batch.append(rewards)
            policy_batch.append(policies)
            # 取展开步数和剩余动作数的较小值
            # 对每个动作重复相同的缩放因子
            # todo 看看后续是什么使用的吧
            gradient_scale_batch.append(
                [
                    min(
                        self.params['num_unroll_steps'],
                        len(game_history.action_history) - game_pos,
                    )
                ]
                * len(actions)
            )
            if self.params['PER']:
                # self.total_samples 总共的样本数量
                # game_prob：当前游戏的优先级
                # pos_prob：样本的优先级
                weight_batch.append(1 / (self.total_samples * game_prob * pos_prob))

        if self.params['PER']:
            # 归一化
            weight_batch = numpy.array(weight_batch, dtype="float32") / max(
                weight_batch
            )

        # observation_batch: batch, channels, height, width
        # action_batch: batch, num_unroll_steps+1
        # value_batch: batch, num_unroll_steps+1
        # reward_batch: batch, num_unroll_steps+1
        # policy_batch: batch, num_unroll_steps+1, len(action_space)
        # weight_batch: batch
        # gradient_scale_batch: batch, num_unroll_steps+1
        return (
            index_batch,
            (
                observation_batch,
                action_batch,
                value_batch,
                reward_batch,
                policy_batch,
                weight_batch,
                gradient_scale_batch,
            ),
        )

    def sample_game(self, force_uniform=False):
        """
        Sample game from buffer either uniformly or according to some priority.
        See paper appendix Training.


        """
        game_prob = None
        if self.params['PER'] and not force_uniform:
            game_probs = numpy.array(
                [game_history.game_priority for game_history in self.buffer.values()],
                dtype="float32",
            )
            game_probs /= numpy.sum(game_probs)
            game_index = numpy.random.choice(len(self.buffer), p=game_probs) 
            game_prob = game_probs[game_index]
        else:
            game_index = numpy.random.choice(len(self.buffer))
        '''
        Buffer 结构

        self.num_played_games: 总共玩过的游戏数量
        len(self.buffer): 当前缓冲区中的游戏数量
        game_index: 在当前缓冲区中随机选择的索引
        ID 计算目的

        维持游戏 ID 的连续性
        考虑了缓冲区大小限制导致的旧游戏被删除的情况

        # 假设:
        num_played_games = 1000  # 总共玩了1000局
        buffer_size = 100       # 缓冲区限制为100局
        game_index = 50        # 随机选中第50局
        
        # 计算:
        game_id = 1000 - 100 + 50 = 950

        最早的900局已被删除
        缓冲区中保存了901-1000这100局
        选中的是缓冲区中第50个游戏，实际对应第950局
        '''
        game_id = self.num_played_games - len(self.buffer) + game_index

        # 游戏的id，指定游戏周期的所有数据，游戏的优先级
        return game_id, self.buffer[game_id], game_prob

    def sample_n_games(self, n_games, force_uniform=False):
        '''
        n_games: 采样的游戏数量，传入的是btach_size,意思就是采集 batch_size 个游戏数据？
        '''
        if self.params['PER'] and not force_uniform:
            game_id_list = [] # 记录游戏的 id，一个游戏周期应该算一个id
            game_probs = [] # 记录每轮游戏的优先级
            for game_id, game_history in self.buffer.items():
                game_id_list.append(game_id)
                game_probs.append(game_history.game_priority)
            # 归一化每轮游戏的优先级
            game_probs = numpy.array(game_probs, dtype="float32")
            game_probs /= numpy.sum(game_probs)
            # 游戏的 id 和优先级组成一个字典
            game_prob_dict = dict(
                [(game_id, prob) for game_id, prob in zip(game_id_list, game_probs)]
            )
            # 随机选择 n_games 个游戏周期样本数据
            selected_games = numpy.random.choice(game_id_list, n_games, p=game_probs)
        else:
            # 没有优先级则随机选择 n_games 个游戏周期样本数据
            selected_games = numpy.random.choice(list(self.buffer.keys()), n_games)
            game_prob_dict = {}
        # 返回选择的游戏训练周期数据的列表
        # 看起来应该就是一个游戏周期的连续训练数据
        ret = [
            (game_id, self.buffer[game_id], game_prob_dict.get(game_id))
            for game_id in selected_games
        ]
        return ret

    def sample_position(self, game_history, force_uniform=False):
        """
        Sample position from game either uniformly or according to some priority.
        See paper appendix Training.
        这个方法的作用 todo
        game_history: 游戏周期的所有数据
        force_uniform: 是否强制使用均匀采样

        return: 返回采集的样本位置和优先级
        """
        position_prob = None
        if self.params['PER'] and not force_uniform:
            # 计算每个样本的优先级比例
            position_probs = game_history.priorities / sum(game_history.priorities)
            # 根据优先级比例随机选择一个样本位置
            position_index = numpy.random.choice(len(position_probs), p=position_probs)
            # 获取对应样本位置的优先级
            position_prob = position_probs[position_index]
        else:
            # 随机选择一个样本位置
            position_index = numpy.random.choice(len(game_history.root_values))

        return position_index, position_prob

    def update_game_history(self, game_id, game_history):
        # The element could have been removed since its selection and update
        # 更新置顶id的游戏周期的所有数据
        if next(iter(self.buffer)) <= game_id:
            if self.params['PER']:
                # Avoid read only array when loading replay buffer from disk
                game_history.priorities = numpy.copy(game_history.priorities)
            self.buffer[game_id] = game_history

    def update_priorities(self, priorities, index_info):
        """
        Update game and position priorities with priorities calculated during the training.
        See Distributed Prioritized Experience Replay https://arxiv.org/abs/1803.00933
        """
        for i in range(len(index_info)):
            game_id, game_pos = index_info[i]

            # The element could have been removed since its selection and training
            if next(iter(self.buffer)) <= game_id:
                # Update position priorities
                priority = priorities[i, :]
                start_index = game_pos
                end_index = min(
                    game_pos + len(priority), len(self.buffer[game_id].priorities)
                )
                self.buffer[game_id].priorities[start_index:end_index] = priority[
                    : end_index - start_index
                ]

                # Update game priorities
                self.buffer[game_id].game_priority = numpy.max(
                    self.buffer[game_id].priorities
                )

    def compute_target_value(self, game_history, index):
        '''
        game_history: 游戏周期的所有数据
        index: 采样的样本位置

        返回指定周期内的价值
        '''
        # The value target is the discounted root value of the search tree td_steps into the
        # future, plus the discounted sum of all rewards until then.
        # 计算计算value展开的步数
        bootstrap_index = index + self.params['td_steps']
        # 确保采样的样本位置在游戏周期的范围内
        if bootstrap_index < len(game_history.root_values):
            # 获取根节点的值 todo 这里的reanalysed_predicted_root_values时什么？
            root_values = (
                game_history.root_values
                if game_history.reanalysed_predicted_root_values is None
                else game_history.reanalysed_predicted_root_values
            )
            # 计算最后一步的价值，如果是双人游戏则取反
            last_step_value = (
                root_values[bootstrap_index]
                if game_history.to_play_history[bootstrap_index]
                == game_history.to_play_history[index]
                else -root_values[bootstrap_index]
            )
            # 利用n步dqn的计算方式，得到最后一步的折扣价值
            value = last_step_value * self.params['discount']**self.params['td_steps']
        else:
            # 如果超出范围，则取最后一步的价值取0
            value = 0

        # 遍历指定周期内的奖励
        for i, reward in enumerate(
            game_history.reward_history[index + 1 : bootstrap_index + 1]
        ):
            # The value is oriented from the perspective of the current player
            # 通过bellman方程计算价值整个指定周期内的价值
            value += (
                reward
                if game_history.to_play_history[index]
                == game_history.to_play_history[index + i]
                else -reward
            ) * self.params['discount']**i

        return value

    def make_target(self, game_history, state_index):
        """
        Generate targets for every unroll steps.
        game_history: 游戏周期的所有数据
        state_index: 采样的样本位置

        return: 返回以state_index为起点的 num_unroll_steps + 1 个样本的价值、奖励、访问次数比率和动作
        """
        # target_values：存储每个采样位置的价值
        # target_rewards：存储每个采样位置的奖励
        # target_policies：获取采样位置的每个动作的访问次数比率
        # action：存储每个采样位置的动作
        target_values, target_rewards, target_policies, actions = [], [], [], []
        # 从采样的位置开始，向后采样 num_unroll_steps + 1 个样本
        for current_index in range(
            state_index, state_index + self.params['num_unroll_steps'] + 1
        ):
            # 计算指定采样位置的价值
            value = self.compute_target_value(game_history, current_index)

            # 没有超过游戏周期的范围
            if current_index < len(game_history.root_values):
                target_values.append(value)
                target_rewards.append(game_history.reward_history[current_index])
                target_policies.append(game_history.child_visits[current_index])
                actions.append(game_history.action_history[current_index])
            # 达到了游戏周期的末尾
            elif current_index == len(game_history.root_values):
                # 达到了末尾则value为0
                target_values.append(0)
                # 这里的奖励按照实际的奖励来
                target_rewards.append(game_history.reward_history[current_index])
                # Uniform policy
                # 如果达到了游戏周期的末尾，则访问次数均匀分布，即每个动作的访问次数相同，概率也相同概率值 = 1/动作空间大小
                target_policies.append(
                    [
                        1 / len(game_history.child_visits[0])
                        for _ in range(len(game_history.child_visits[0]))
                    ]
                )
                # 实际在末尾执行的动作
                actions.append(game_history.action_history[current_index])
            # 超过了游戏周期的末尾
            else:
                # States past the end of games are treated as absorbing states
                # 超过末尾则价值奖励均为0
                target_values.append(0)
                target_rewards.append(0)
                # Uniform policy # 如果达到了游戏周期的末尾，则访问次数均匀分布，即每个动作的访问次数相同，概率也相同概率值 = 1/动作空间大小
                target_policies.append(
                    [
                        1 / len(game_history.child_visits[0])
                        for _ in range(len(game_history.child_visits[0]))
                    ]
                )
                # 这里的动作随机选择，很显然超过的部分没有任何意义
                actions.append(numpy.random.choice(self.params['action_space']))

        return target_values, target_rewards, target_policies, actions


class Reanalyse:
    """
    Class which run in a dedicated thread to update the replay buffer with fresh information.
    See paper appendix Reanalyse.
    """

    def __init__(self, initial_checkpoint, params):
        self.params = params

        # Fix random generator seed

        # Initialize the network 这里也有一个相同的模型 todo 作用是什么？
        # 设置为了 eval 模式，难道是类似 test？
        self.model = model.MuZeroNetwork(self.params)
        self.model.set_weights(initial_checkpoint["weights"])
        self.model.to(torch.device("cuda" if self.params['reanalyse_on_gpu'] else "cpu"))
        self.model.eval()

        self.num_reanalysed_games = initial_checkpoint["num_reanalysed_games"]

    def reanalyse(self, replay_buffer, shared_storage):
        '''
        类是用来更新回放缓冲区中的价值估计
        '''

        while shared_storage.get_info("training_step") < self.params['training_steps'] and not shared_storage.get_info("terminate"):  
            # 获取最新的模型权重
            self.model.set_weights(shared_storage.get_info("weights"))

            # 随机采集一个游戏周期的样本数据
            game_id, game_history, _ = replay_buffer.sample_game(force_uniform=True)

            # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)
            if self.params['use_last_model_value']:
                # 遍历每一个游戏周期的样本数据，将对应的观察进行堆叠
                observations = numpy.array(
                    [
                        game_history.get_stacked_observations(
                            i,
                            self.params['stacked_observations'],
                            len(self.params['action_space']),
                        )
                        for i in range(len(game_history.root_values))
                    ]
                )

                observations = (
                    torch.tensor(observations)
                    .float()
                    .to(next(self.model.parameters()).device)
                )

                # 进行模型的前向推理，获取每个样本的价值，并将向量调整为标量
                values = model.support_to_scalar(
                    self.model.initial_inference(observations)[0],
                    self.params['support_size'],
                )
                # 将得到的一个样本周期内的所有样本的价值设置为这个样本周期的价值
                game_history.reanalysed_predicted_root_values = (
                    torch.squeeze(values).detach().cpu().numpy()
                )

            replay_buffer.update_game_history.remote(game_id, game_history)
            # 记录多少个游戏周期被重新分析过
            self.num_reanalysed_games += 1
            shared_storage.set_info(
                "num_reanalysed_games", self.num_reanalysed_games
            )


class SharedStorage:
    """
    Class which run in a dedicated thread to store the network weights and some information.
    """

    def __init__(self, checkpoint, config):
        self.config = config
        self.current_checkpoint = copy.deepcopy(checkpoint)

    def get_checkpoint(self):
        return copy.deepcopy(self.current_checkpoint)

    def get_info(self, keys):
        if isinstance(keys, str):
            return self.current_checkpoint[keys]
        elif isinstance(keys, list):
            return {key: self.current_checkpoint[key] for key in keys}
        else:
            raise TypeError

    def set_info(self, keys, values=None):
        if isinstance(keys, str) and values is not None:
            self.current_checkpoint[keys] = values
        elif isinstance(keys, dict):
            self.current_checkpoint.update(keys)
        else:
            raise TypeError
        

    def dump_checkpoint(self):
        return self.current_checkpoint
    

    def import_checkpoint(self, checkpoint):
        """
        Import a checkpoint into the shared storage.
        checkpoint: 传入的模型权重
        """
        self.current_checkpoint = copy.deepcopy(checkpoint)
        

# Game independent
class MCTS:
    """
    Core Monte Carlo Tree Search algorithm.
    To decide on an action, we run N simulations, always starting at the root of
    the search tree and traversing the tree according to the UCB formula until we
    reach a leaf node.
    """

    def __init__(self, config):
        self.config = config

    def run(
        self,
        model,
        observation,
        legal_actions,
        to_play,
        add_exploration_noise,
        override_root_with=None,
    ):
        """
        At the root of the search tree we use the representation function to obtain a
        hidden state given the current observation.
        We then run a Monte Carlo Tree Search using only action sequences and the model
        learned by the network.

        model, # 模型
        observation, # 历史帧堆叠和当前帧信息
        legal_actions # 返回所有的合法动作
        to_play # 当前的游戏玩家id
        add_exploration_noise  # 是否给动作增加噪音
        override_root_with：传入已构建的搜索树节点，避免重新构建搜索树，提高搜索效率
        """
        # 首先构建搜索树
        if override_root_with:
            root = override_root_with
            root_predicted_value = None
        else:
            # 构建搜索根节点
            root = Node(0)
            # 将观察帧迁移到对应的device上
            observation = (
                torch.tensor(observation)
                .float()
                .unsqueeze(0)
                .to(next(model.parameters()).device)
            )
            # 将观察传给模型获取预测的动作、奖励、隐藏状态
            # 初始化模型的推理状态
            (
                root_predicted_value, # 价值
                reward, # 奖励
                policy_logits, # 动作的概率分布
                hidden_state, # 特征状态嵌入
            ) = model.initial_inference(observation)
        
            # 将价值分布（Q值分布）转换为标量值，也就是期望值
            root_predicted_value = models.support_to_scalar(
                root_predicted_value, self.config.support_size
            ).item()
            # 将奖励分布转换为标量值，也就是期望值
            reward = models.support_to_scalar(reward, self.config.support_size).item()
            assert (
                legal_actions
            ), f"Legal actions should not be an empty array. Got {legal_actions}."
            assert set(legal_actions).issubset(
                set(self.config.action_space)
            ), "Legal actions should be a subset of the action space."
            # 将预测的动作、奖励、隐藏状态传给根节点
            # 这里的hidden_state是一个tensor，表示的是当前的状态嵌入
            root.expand(
                legal_actions,
                to_play,
                reward,
                policy_logits,
                hidden_state,
            )

        if add_exploration_noise:
            # 给根节点每个动作添加噪音
            root.add_exploration_noise(
                dirichlet_alpha=self.config.root_dirichlet_alpha,
                exploration_fraction=self.config.root_exploration_fraction,
            )

        # 这里的min_max_stats是一个类，主要是用来记录搜索树的最大值和最小值 todo
        min_max_stats = MinMaxStats()

        max_tree_depth = 0
        # num_simulations 这里是一个超参数，表示的是模拟搜索的次数
        for _ in range(self.config.num_simulations):
            virtual_to_play = to_play # 当前的玩家
            node = root # 当前的节点
            search_path = [node] # 搜索路径 记录从当前node搜索到叶子节点的路径
            current_tree_depth = 0 # 记录当前的搜索深度

            # 如果当前的节点还有可以执行的动作，就继续搜索
            while node.expanded():
                current_tree_depth += 1 # 探索深度加1
                # 获取当前节点的下一个要执行的动作以及动作对应的子节点
                action, node = self.select_child(node, min_max_stats)
                # 将下一个节点添加到搜索路径中
                search_path.append(node)

                # Players play turn by turn
                # 动作执行完毕，切换
                if virtual_to_play + 1 < len(self.config.players):
                    virtual_to_play = self.config.players[virtual_to_play + 1]
                else:
                    virtual_to_play = self.config.players[0]

            # Inside the search tree we use the dynamics function to obtain the next hidden
            # state given an action and the previous hidden state
            # 看mdn的注释，表示的是当前节点的父节点
            parent = search_path[-2]
            # action: 表示到达当前节点执行的动作
            # hidden_state: 表示当父节点的隐藏状态
            # 得到下一个状态的Q价值，执行当前动作得到的奖励，下一个状态的动作logits，下一个动作的Q价值
            value, reward, policy_logits, hidden_state = model.recurrent_inference(
                parent.hidden_state,
                torch.tensor([[action]]).to(parent.hidden_state.device),
            )
            # 将Q价值分布和奖励分布转换为标量值，也就是期望值
            value = models.support_to_scalar(value, self.config.support_size).item()
            reward = models.support_to_scalar(reward, self.config.support_size).item()
            node.expand(
                self.config.action_space,
                virtual_to_play,
                reward,
                policy_logits,
                hidden_state,
            )

            # 更新链路的价值
            self.backpropagate(search_path, value, virtual_to_play, min_max_stats)

            # 探索数的最大深度
            max_tree_depth = max(max_tree_depth, current_tree_depth)

        # 记录探索的最大深度和根节点的价值
        extra_info = {
            "max_tree_depth": max_tree_depth,
            "root_predicted_value": root_predicted_value,
        }
        # root：表示搜索树的根节点
        # extra_info：表示搜索树的额外信息: 记录探索的最大深度和根节点的价值
        return root, extra_info

    def select_child(self, node, min_max_stats):
        """
        Select the child with the highest UCB score.
        node: 当前节点
        min_max_stats: 记录搜索树的最大值和最小值 todo

        returns: 选择的动作，获取对应动作的子节点
        """
        # for action, child in node.children.items(): 获取当前节点的所有子节点（也就是能够执行的动作）
        # 计算每个子节点的UCB分数，获取最大的UCB分数
        max_ucb = max(
            self.ucb_score(node, child, min_max_stats)
            for action, child in node.children.items()
        )
        # 从所有最大UCB分数中的动作随机选择一个
        action = numpy.random.choice(
            [
                action
                for action, child in node.children.items()
                if self.ucb_score(node, child, min_max_stats) == max_ucb
            ]
        )
        return action, node.children[action]

    def ucb_score(self, parent, child, min_max_stats):
        """
        The score for a node is based on its value, plus an exploration bonus based on the prior.
        parent: 父节点
        child: 子节点
        min_max_stats: 记录搜索树的最大值和最小值 todo
        """
        # pb_c_base和pb_c_init是两个超参数，表示的是UCB的探索系数
        # 这里的pb_c是一个超参数，表示的是UCB的探索系数 todo 在哪里用到
        pb_c = (
            math.log(
                (parent.visit_count + self.config.pb_c_base + 1) / self.config.pb_c_base
            )
            + self.config.pb_c_init
        )
        # 计算探索强度
        '''
        基于父节点访问次数和子节点访问次数的比率
        与网络预测的先验概率相乘
        访问次数少的节点会获得更高的探索分数
        '''
        pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
        prior_score = pb_c * child.prior

        if child.visit_count > 0:
            # Mean value Q 如果子节点探索过
            # 则使用子节点的价值来计算
            # child.reward: 子节点的奖励 todo 什么时候设置的？
            # self.config.discount：折扣系数
            # child.value(): 当前子节点的价值 针对单人游戏
            # -child.value()：如果是双人游戏，则使用负的子节点价值，因为如果下一步对手的更具备优势则扣除的优势更多，确实符合设计
            # normalize：将当前的奖励和折扣后的价值进行归一化
            # value_score: 子节点的价值
            value_score = min_max_stats.normalize(
                child.reward
                + self.config.discount
                * (child.value() if len(self.config.players) == 1 else -child.value())
            )
        else:
            # 如果子节点没有探索过
            # 则子节点的的价值分数为0
            value_score = 0
        
        # 探索分数+价值分数=UCB分数
        return prior_score + value_score

    def backpropagate(self, search_path, value, to_play, min_max_stats):
        """
        At the end of a simulation, we propagate the evaluation all the way up the tree
        to the root.
        该方法主要适用于更新每个节点的价值和访问次数
        更新整个链路中的最大价值和最小价值
        这样才能够保证每个节点的价值都是最新的，找到最具有价值的节点

        search_path: 搜索路径
        value: 价值，当前节点的Q价值
        to_play: 玩家的id
        min_max_stats: 记录搜索树的最大值和最小值 记录的是搜索树中的bellman方程计算得到的最大值和最小值
        会在这里更新搜索树的最大值和最小值
        """
        if len(self.config.players) == 1:
            # 只有一个玩家的情况
            # 搜索路径是从叶子节点到顶节点
            for node in reversed(search_path):
                node.value_sum += value # 这里统计的是当前节点后续所有的Q价值总和
                node.visit_count += 1 # 被访问的次数
                min_max_stats.update(node.reward + self.config.discount * node.value()) # 更新当前搜索树中的bellman方程计算得到的最大值和最小值

                # 这里的value时真正的bellman方程计算得到的价值，因为value就是当前节点的Q价值
                # 而上面的value和value_sum以及被访问的次数有关系，如果访问次数少，价值大则大
                # 如果访问次数多但是价值小则小
                value = node.reward + self.config.discount * value

        elif len(self.config.players) == 2:
            # 两个玩家的游戏
            for node in reversed(search_path):
                # 这里计算value的时候要注意，如果上一个节点不是当前玩家则增加的价值为负号，理由
                # 就是如果对方的回合那么肯定是对方的价值增加了，而当前玩家的价值减少了
                node.value_sum += value if node.to_play == to_play else -value
                node.visit_count += 1
                # 这里的node之所以是负数，是因为value通常指的是下一个状态的价值
                # 而下一个状态肯定是对方的状态，所以要取反
                min_max_stats.update(node.reward + self.config.discount * -node.value())

                # 这里value之所以不是负数就是因为这个的value已经考虑到了负数
                # 而奖励才需要根据当前玩家的id来决定是正数还是负数，因为对方得到了分数
                # 那么对于另一个玩家就是不利的，需要扣除
                value = (
                    -node.reward if node.to_play == to_play else node.reward
                ) + self.config.discount * value

        else:
            raise NotImplementedError("More than two player mode not implemented.")


class Node:
    def __init__(self, prior):
        self.visit_count = 0 # 当前节点的访问次数
        self.to_play = -1
        self.prior = prior
        self.value_sum = 0
        self.children = {} # 动作和对应的节点
        self.hidden_state = None
        self.reward = 0

    def expanded(self):
        # 这里表示当前节点是否有子节点，而自节点的子节点就是动作
        return len(self.children) > 0

    def value(self):
        if self.visit_count == 0:
            # 如果访问的次数是0则价值为0
            return 0
        # 如果存在访问次数，则返回当前节点的价值
        # 这里的value_sum是当前节点的价值总和
        # visit_count是当前节点的访问次数
        return self.value_sum / self.visit_count

    def expand(self, actions, to_play, reward, policy_logits, hidden_state):
        """
        We expand a node using the value, reward and policy prediction obtained from the
        neural network.
        将当前节点计算出来的所有可能的动作创建子节点添加到当前节点中
        并得到当前节点获取的奖励和价值

        actions: 所有可能的动作
        to_play: 玩家的id
        reward：奖励的期望值
        policy_logits: 动作概率分布
        hidden_state: 观察特征提取潜入值
        """
        self.to_play = to_play
        self.reward = reward
        self.hidden_state = hidden_state

        # 这里就是很明显的将动作和对应的概率分布进行映射
        policy_values = torch.softmax(
            torch.tensor([policy_logits[0][a] for a in actions]), dim=0
        ).tolist()
        policy = {a: policy_values[i] for i, a in enumerate(actions)}
        # 为每个动作制作一个节点，用于后续的动作选择
        for action, p in policy.items():
            self.children[action] = Node(p)

    def add_exploration_noise(self, dirichlet_alpha, exploration_fraction):
        """
        At the start of each search, we add dirichlet noise to the prior of the root to
        encourage the search to explore new actions.
        """
        # 或者当前节点的能够执行的所有动作
        actions = list(self.children.keys())
        # 这里的dirichlet_alpha是一个超参数，表示的是噪音的强度 todo
        # numpy.random.dirichlet([dirichlet_alpha] * len(actions))：生成一个dirichlet分布的噪音
        noise = numpy.random.dirichlet([dirichlet_alpha] * len(actions))
        # frac是一个加权超参数，用于控制噪音的强度
        frac = exploration_fraction
        for a, n in zip(actions, noise):
            # todo 这里prior的作用？难道是影响到离散动作的概率分布吗
            self.children[a].prior = self.children[a].prior * (1 - frac) + n * frac