import os.path
import time
from typing import Any
import numpy as np
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as td
import ptan
import gymnasium as gym
from gymnasium import spaces
import logging
from logging.handlers import RotatingFileHandler
from collections import namedtuple
import kornia.augmentation as aug
import cv2
from PIL import Image
import random
import math


# random_shift = nn.Sequential(aug.RandomCrop((80, 80)), nn.ReplicationPad2d(4), aug.RandomCrop((84, 84))).to(dtype=torch.float32)
def make_aug():
    return nn.Sequential(
        aug.RandomCrop((80, 80)), 
        nn.ReplicationPad2d(4), 
        aug.RandomCrop((84, 84))
    ).to(dtype=torch.float32)

aug = make_aug()


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


import collections

class FrameStackZero(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStackZero, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.channels = shp[0]
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    
    def reset_frames(self):
        for _ in range(self.k):
            self.frames.append(np.zeros((self.channels,) + self.observation_space.shape[1:3], dtype=np.float32))


    def reset(self, **kwargs):
        self.reset_frames()
        obs, info = self.env.reset(**kwargs)
        self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)



class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmethod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[20:230, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0
        self.non_reward_frames = 0
        self.non_reward_frames_limit = 100
        self.non_reward_loss = 0.01

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        reward //= 25
        if reward > 5:
            reward = 5
               
        # # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    


def wrap_dqn(env, episodic_life=True, stack_frames = 4, reward_clipping=True, gray = True):
    env = gym.make(env, frameskip=1, repeat_action_probability=0)
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # todo 对比不同的分辨率、颜色的图像，因为理论上都可以
    env = ProcessFrame(env, gray=gray, size=84)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = FrameStackZero(env, stack_frames)
    env = RewardPenaltyWrapper(env)
    return env


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss

def unpack_batch(batch):
    '''
    将经验重放缓冲区的经验进行解包操作，从经验中获取各个经验类别
    '''

    # states 每次采集样本时的环境状态
    # actions 每次状态下执行的动作值
    # rewards 每次执行完动作后获取的激励值
    # dones 执行动作后游戏是否结束
    # last_states 执行动作之后的下一个状态
    #
    # return 将states, actions, rewards, dones, last_states各个list转换为numpy
    states, actions, rewards, dones, last_states = [], [], [], [], []
    for exp in batch:
        state = np.asarray(exp.state)
        states.append(state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        dones.append(exp.last_state is None)
        if exp.last_state is None:
            last_states.append(state)       # the result will be masked anyway
        else:
            last_states.append(np.asarray(exp.last_state))
    return np.asarray(states), np.array(actions), np.array(rewards, dtype=np.float32), \
           np.array(dones, dtype=np.uint8), np.asarray(last_states)



def calc_loss_dqn(batch, net, tgt_net, gamma, device="cpu"):
    '''
    计算dqn的损失值
    net: 负责计算当前状态下的动作Q值
    tag_net: 负责计算下一个状态下的动作Q值
    '''
    # 提取样本集中游戏的各个状态和动作
    states, actions, rewards, dones, next_states = unpack_batch(batch)

    # 将数据传输到指定的设备中
    states_v = torch.tensor(states).to(device)
    next_states_v = torch.tensor(next_states).to(device)
    actions_v = torch.tensor(actions).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    done_mask = torch.ByteTensor(dones).to(device)

    # 将状态传输到神经网路中，获取到神经网路推理出来的执行的动作
    # 网络细节查看第06章 02_dqn_pong.py
    state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    # tag_net负责计算下一个状态的Q值
    # 并将其中会导致游戏结束的动作的Q值设置为0，这样可以将不好的q值降低
    next_state_values = tgt_net(next_states_v).max(1)[0]
    next_state_values[done_mask.bool()] = 0.0

    # 损失值计算：下一个状态的最大Q值+实际获取到的激励值 == 当前net计算出来的Q值
    expected_state_action_values = next_state_values.detach() * gamma + rewards_v
    return nn.MSELoss()(state_action_values, expected_state_action_values)


class EpsilonTracker:
    def __init__(self, epsilon_greedy_selector, epsilon_start, epsilon_final, epsilon_frames):
        '''
        这个跟踪器是根据当前执行的帧数大小，以及epslison的参数来计算动作选择其中
        选择动作的概率

        param epsilon_greedy_selector: 动作选择器
        param params: map类型的参数，也就是本文中的HYPERPARAMS参数
        '''

        self.epsilon_greedy_selector = epsilon_greedy_selector
        self.epsilon_start = epsilon_start
        self.epsilon_final = epsilon_final
        self.epsilon_frames = epsilon_frames
        self.frame(0)

    def frame(self, frame):
        '''
        根据当前的帧数，更新epsilon的值，eplison是用来决定每次执行的策略是
        随机还是神经网络推理获取的
        '''
        self.epsilon_greedy_selector.epsilon = \
            max(self.epsilon_final, self.epsilon_start - frame / self.epsilon_frames)
        

"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))
    

def preprocess(obs, device):
    '''
    对像素数据进行预处理，归一化为0-1之间的数值
    '''
    # 1. 转换为tensor
    obs['image'] = torch.tensor(obs['image'], device=device).float()
    # 2. 归一化
    obs['image'] = obs['image'] / 255.0
    if isinstance(obs['is_terminal'], np.ndarray):
        obs['cont'] = np.expand_dims((~obs['is_terminal']), axis=-1)
    else:
        obs['cont'] = (~obs['is_terminal']).unsqueeze(-1)
    return obs

def weight_init(m):
    if isinstance(m, nn.Linear):
        in_num = m.in_features
        out_num = m.out_features
        denoms = (in_num + out_num) / 2.0
        scale = 1.0 / denoms
        std = np.sqrt(scale) / 0.87962566103423978
        nn.init.trunc_normal_(
            m.weight.data, mean=0.0, std=std, a=-2.0 * std, b=2.0 * std
        )
        if hasattr(m.bias, "data"):
            m.bias.data.fill_(0.0)
    elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
        space = m.kernel_size[0] * m.kernel_size[1]
        in_num = space * m.in_channels
        out_num = space * m.out_channels
        denoms = (in_num + out_num) / 2.0
        scale = 1.0 / denoms
        std = np.sqrt(scale) / 0.87962566103423978
        nn.init.trunc_normal_(
            m.weight.data, mean=0.0, std=std, a=-2.0 * std, b=2.0 * std
        )
        if hasattr(m.bias, "data"):
            m.bias.data.fill_(0.0)
    elif isinstance(m, nn.LayerNorm):
        m.weight.data.fill_(1.0)
        if hasattr(m.bias, "data"):
            m.bias.data.fill_(0.0)

def uniform_weight_init(given_scale):
    def f(m):
        if isinstance(m, nn.Linear):
            in_num = m.in_features
            out_num = m.out_features
            denoms = (in_num + out_num) / 2.0
            scale = given_scale / denoms
            limit = np.sqrt(3 * scale)
            nn.init.uniform_(m.weight.data, a=-limit, b=limit)
            if hasattr(m.bias, "data"):
                m.bias.data.fill_(0.0)
        elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
            space = m.kernel_size[0] * m.kernel_size[1]
            in_num = space * m.in_channels
            out_num = space * m.out_channels
            denoms = (in_num + out_num) / 2.0
            scale = given_scale / denoms
            limit = np.sqrt(3 * scale)
            nn.init.uniform_(m.weight.data, a=-limit, b=limit)
            if hasattr(m.bias, "data"):
                m.bias.data.fill_(0.0)
        elif isinstance(m, nn.LayerNorm):
            m.weight.data.fill_(1.0)
            if hasattr(m.bias, "data"):
                m.bias.data.fill_(0.0)

    return f


# param timestep: 当前时间步
# param state: 当前状态
# param action: 当前动作
# param reward: 当前奖励
# param nonterminal: 当前状态是否为非终止状态
# 这里的命名元组是用来存储每个时间步的状态、动作、奖励和非终止状态
Transition = namedtuple('Transition', ('timestep', 'state', 'action', 'reward', 'nonterminal'))
blank_trans = Transition(0, torch.zeros(84, 84, dtype=torch.uint8), None, 0, False)


class SegmentTree():
    def __init__(self, size):
        '''
        param size: 容量
        '''
        self.index = 0 # todo 作用
        self.size = size
        self.full = False  # Used to track actual capacity
        # 构建了一个尺寸是2倍容量的树，这里存储的是每个数据的权重，父节点是子节点的权重和
        # 一层一层递归，最终根节点存储的所有数据的权重和
        # 用二叉树来存储
        self.sum_tree = np.zeros((2 * size - 1, ), dtype=np.float32)  # Initialise fixed size tree with all (priority) zeros
        # 这个存储实际的数据
        self.data = np.array([None] * size)  # Wrap-around cyclic buffer
        # todo 这个是什么？
        self.max = 1  # Initial max value to return (1 = 1^ω)

    # Propagates value up tree given a tree index
    def _propagate(self, index, value):
        '''
        param index: 索引
        param value: 权重值
        '''
        # 找到当前index的父节点
        parent = (index - 1) // 2
        # 找到parent的左子节点和右子节点
        left, right = 2 * parent + 1, 2 * parent + 2
        # 左子节点的值 + 右子节点的值 = 父节点的权重值
        # todo 为啥父节点的权重值更大?
        # 因为父节点存储的是子节点的权重的总和
        self.sum_tree[parent] = self.sum_tree[left] + self.sum_tree[right]
        if parent != 0:
            self._propagate(parent, value)




    # Updates value given a tree index
    def update(self, index, value):
        '''
        更新树的权重值
        :param index: 索引
        :param value: 权重值
        :return:
        '''
        self.sum_tree[index] = value  # Set new value
        self._propagate(index, value)  # Propagate value
        # 更新权重最大值
        self.max = max(value, self.max)

    def append(self, data, value):
        '''
        将数据添加到SegmentTree中，也就是优先级树
        :param data: 数据
        :param value: 优先级
        '''
        # 首先将数据添加到整个SegmentTree中的最后一个位置
        self.data[self.index] = data  # Store data in underlying data structure
        # 更新当前index对应的权重值
        # 使用self.index + self.size - 1这样久保证了数据对应的存储存储到树的叶子节点
        self.update(self.index + self.size - 1, value)  # Update tree
        # 可以看每次append后，index都会加1，那么self.index时记录当前缓冲区的最后一个数据的索引
        self.index = (self.index + 1) % self.size  # Update index
        # 这里的self.full是用来判断是否已经达到缓冲区的最大容量
        # 也就是当index=0时，表示缓冲区已经满了
        # 这里的self.full是用来判断是否已经达到缓冲区的最大容量
        self.full = self.full or self.index == 0  # Save when capacity reached
        self.max = max(value, self.max)

    # Searches for the location of a value in sum tree
    def _retrieve(self, index, value):
        '''
        从索引开始向下遍历权重值
        param index: 索引
        param value: 权重值 todo
        看代码这里要找都是比当前节点的权重值小的节点
        '''
        left, right = 2 * index + 1, 2 * index + 2
        if left >= len(self.sum_tree):
            # 如果找到了子节点则返回当前的索尼位置
            return index
        elif value <= self.sum_tree[left]:
            # 如果当前节点大于权重，则遍历左节点
            # 所以如果权重越大，它的上级节点越大，会导致value越大的几率value <= self.sum_tree[left]
            # 往当前的左节点去找，所以权重越大优先级越高
            return self._retrieve(left, value)
        else:
            # 小于权重则遍历右节点，并且右节点要减去当前节点的权重，这里一定不会负数
            # 因为大于的已经到左节点去了
            # todo 但是为啥要这么做呢？
            return self._retrieve(right, value - self.sum_tree[left])

    # Searches for a value in sum tree and returns value, data index and tree index
    def find(self, value):
        '''
        param value: 找到符合范围value的权重值
        '''
        # 0表示从根步搜索
        index = self._retrieve(0, value)  # Search for index of item from root
        # 将权重索引转换为数据索引
        data_index = index - self.size + 1
        # 获取权重、数据索引、权重索引
        return (self.sum_tree[index], data_index, index)  # Return value, data index, tree index

    # Returns data given a data index
    def get(self, data_index):
        '''
        获取指定索引位置的数据
        '''
        return self.data[data_index % self.size]

    def total(self):
        return self.sum_tree[0]
    

    def __len__(self):
        return self.size if self.full else self.index - 1  # Return number of items in tree


class PrioritizedReplayBuffer:
    def __init__(self, params, capacity, exp_source):
        self.experience_source_iter = None if exp_source is None else iter(exp_source)
        self.capacity = capacity # 缓冲区容量
        self.history = params['history_length'] # 帧堆叠 todo
        self.discount = params['discount_factor'] # 好像是奖励折扣值
        self.n = params['multi_step'] # 多步DQN的折扣值
        # 优先级重放缓冲区
        self.priority_weight = params['priority_weight']  # Initial importance sampling weight β, annealed to 1 over course of training
        self.priority_exponent = params['priority_exponent']
        self.t = 0  # Internal episode timestep counter
        # 用这个存储优先级貌似 todo
        self.transitions = SegmentTree(capacity)  # Store transitions in a wrap-around cyclic buffer within a sum tree for querying priorities
        self.eps = 1e-6


    def set_exp_source(self, exp_source):
        self.experience_source_iter = None if exp_source is None else iter(exp_source)


    def __getstate__(self):
        state = self.__dict__.copy()
        # 迭代器不能被pickle保存，所以移除它
        state['experience_source_iter'] = None
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        # 加载后需要手动调用 set_exp_source() 恢复经验池
        self.experience_source_iter = None


    def __len__(self):
        return len(self.transitions)

    def __iter__(self):
        raise NotImplementedError("Replay buffer is not iterable")

    def sample(self, batch_size):
        '''
        对于优先经验重放的采样

        reutrn 权重树索引、历史帧堆叠、执行的动作、这里是折扣Q值 todo和我的代码貌似不同吧、未来帧堆叠、最后一帧是否未结束、计算重要性采样权重（todo 作用）
        '''
        # 获取当前缓冲区的权重总和
        p_total = self.transitions.total()  # Retrieve sum of all priorities (used to create a normalised probability distribution)
        # 将权重总优先级划分成 batch_size 个段，每个段大小相等
        segment = p_total / batch_size  # Batch size number of segments, based on sum over all probabilities
        # 采集batch_size个数据
        batch = [self._get_sample_from_segment(segment, i) for i in range(batch_size)]  # Get batch of valid samples
        # 解出数据
        probs, idxs, tree_idxs, states, actions, returns, next_states, nonterminals = zip(*batch)
        # 堆叠所有采集数据的历史帧堆叠和未来帧堆叠
        states, next_states, = np.stack(states), np.stack(next_states)
        # 组合每个采集数据执行动作、这里是折扣Q值 todo和我的代码貌似不同吧、最后一帧是否未结束
        actions, returns, nonterminals = np.concatenate(actions), np.concatenate(returns), np.concatenate(nonterminals)
        # 归一化权重优先级 将优先级标准化为 [0,1] 概率分布
        probs = np.array(probs, dtype=np.float32) / p_total  # Calculate normalised probabilities
        # 缓冲区如果满了，则直接返回最大的容量否则则返回实际的大小
        capacity = self.capacity if self.transitions.full else self.transitions.index
        # 计算重要性采样 todo这里是在计算什么？
        weights = (capacity * probs) ** -self.priority_weight  # Compute importance-sampling weights w
        # 归一化防止数值过大
        weights = weights / weights.max() # Normalise by max importance-sampling weight from batch
        return tree_idxs, states, actions, returns, next_states, nonterminals, weights

    def _get_transition(self, idx):
        '''
        获取对应索引的数据
        param idx: 索引
        return：返回一个具备历史history+未来n帧的连续缓冲区
        '''
        # 创建一个shape ： （帧堆叠历史帧+n步dqn大小尺寸的缓冲区)
        transition = np.array([None] * (self.history + self.n))
        # 提取idx所在的索引
        transition[self.history - 1] = self.transitions.get(idx)
        # 提取历史帧，如果越界了则设置为blank帧
        for t in range(self.history - 2, -1, -1):  # e.g. 2 1 0
            if transition[t + 1].timestep == 0:
                transition[t] = blank_trans  # If future frame has timestep 0
            else:
                transition[t] = self.transitions.get(idx - self.history + 1 + t)
        # 提取未来n帧，如果遇到了结束帧则设置为空帧
        for t in range(self.history, self.history + self.n):  # e.g. 4 5 6
            if transition[t - 1].nonterminal:
                transition[t] = self.transitions.get(idx - self.history + 1 + t)
            else:
                transition[t] = blank_trans  # If prev (next) frame is terminal
        return transition

    # Returns a valid sample from a segment
    def _get_sample_from_segment(self, segment, i):
        '''
        param segment: 每个样本分配的权重基数
        param i: 第i个样本
        return: 当前选择实际权重值、数据索引、权重树索引、历史帧堆叠、未来帧堆叠、符合权重的帧的状态、符合权重帧的动作、n步dqn的折扣奖励值、未来n帧的帧堆叠、最后一帧是否结束未结束
        '''
        valid = False
        find_count = 2
        while not valid:
            # 随机选择第i个样本的权重值
            if find_count > 0:
                sample = np.random.uniform(i * segment, (i + 1) * segment)  # Uniformly sample an element from within a segment
            else:
                sample = np.random.uniform(0, self.transitions.total())
                # 获取权重、数据索引、权重索引
            prob, idx, tree_idx = self.transitions.find(sample)  # Retrieve sample from tree with un-normalised probability
            # Resample if transition straddled current index or probablity 0
            # 获取的数据索引要大于n步dqn的n值，因为要计算未来n步的q值
            # 并且要大于帧堆叠的帧数，因为这样才能了解之前的动作趋势吧，可能是可选的
            # 并且权重要大于0
            if (self.transitions.index - idx) % self.capacity > self.n and (idx - self.transitions.index) % self.capacity >= self.history and prob != 0:
                valid = True  # Note that conditions are valid but extra conservative around buffer index 0

            find_count -= 1

        # Retrieve all required transition data (from t - h to t + n)
        # 获取指定范围的连续样本数据
        transition = self._get_transition(idx)
        # Create un-discretised state and nth next state
        # 堆叠历史帧并且归一化 todo 好像环境里面已经有帧堆叠了，这里还要继续堆叠？
        state = np.stack([trans.state for trans in transition[:self.history]]).astype(np.float32)
        # 未来帧帧堆叠并归一化
        next_state = np.stack([trans.state for trans in transition[self.n:self.n + self.history]]).astype(np.float32)
        # Discrete action to be used as index
        # 获取当前采集帧执行动作
        action = np.array([transition[self.history - 1].action], dtype=np.int64)
        # Calculate truncated n-step discounted return R^n = Σ_k=0->n-1 (γ^k)R_t+k+1 (note that invalid nth next states have reward 0)
        # n步dqn的计算，折扣值
        R = np.array([sum(self.discount ** n * transition[self.history + n - 1].reward for n in range(self.n))], dtype=np.float32)
        # Mask for non-terminal nth next states
        # 获取最后一个是否未结束的标识
        nonterminal = np.array([transition[self.history + self.n - 1].nonterminal], dtype=np.float32)

        return prob, idx, tree_idx, state, action, R, next_state, nonterminal

    def _add(self, sample):
        '''
        将当前的状态、动作、奖励和终止状态添加到经验回放缓冲区中
        :param state: 当前状态
        :param action: 当前动作
        :param reward: 当前奖励
        :param terminal: 当前状态是否为终止状态
        :return:
        '''
        state, action, reward, terminal = sample[0][0], sample[0][1], sample[0][2], sample[0][3]
        # 这里是将状态转换为uint8类型的张量同时将值缩放到0-255之间
        state = (state[-1] * 255).astype(np.uint8)  # Only store last frame and discretise to save memory
        # 这里的时间步标记的是本轮游戏的第几步
        self.transitions.append(Transition(self.t, state, action, reward, not terminal), self.transitions.max)  # Store new transition with maximum priority
        # 重置时间步
        self.t = 0 if terminal else self.t + 1  # Start new episodes with t = 0

    def populate(self, samples):
        """
        Populates samples into the buffer 提取样本到重放缓存区中
        :param samples: how many samples to populate  从样本池中提取多少个样本到缓冲区
        
        算法的原理及利用迭代器根据数量，从经验池中获取数据
        """
        for _ in range(samples):
            entry = next(self.experience_source_iter)
            self._add(entry)

    
    def update_priorities(self, idxs, priorities):
        '''
        需要更新权重的权重树索引，损失loss
        '''
        # 将损失作为更新的优先级权重更新到对应idsx对应的权重树中
        # todo 了解实际运行时是怎么运作的
        priorities = np.maximum(priorities, self.eps)  # Avoid NaN in tree
        priorities = np.power(priorities, self.priority_exponent)
        [self.transitions.update(idx, priority) for idx, priority in zip(idxs, priorities)]
