import os.path
import copy
from typing import Any
import numpy as np
import sys
import torch
from torch.nn.utils.rnn import pad_sequence
import random
import ptan
import gymnasium as gym
from gymnasium import spaces
import logging
from logging.handlers import RotatingFileHandler
from dataclasses import dataclass, field
from typing import Tuple, Optional
import cv2
from PIL import Image
import threading
import lib.model as model
import math
import time
from tensorboardX import SummaryWriter



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


@dataclass
class AgentState(ptan.experience.BaseAgentState):
    obs: torch.Tensor # 相当于next_obs
    action_dim: int # 动作维度
    last_action: torch.Tensor = field(init=False) # 得到next_obs的动作执行的动作
    last_reward: torch.Tensor = torch.zeros((1, 1), dtype=torch.float32)
    hidden_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
    q_value: torch.Tensor = field(init=False)

    def __post_init__(self):
        self.last_action = torch.zeros((1, self.action_dim), dtype=torch.float32)
    
    def update(self, obs, action, reward, done, next_obs):
        '''
        obs: 最新观察值
        last_action: 到该obs所执行的动作
        last_reward: 到该obs所获得的奖励
        hidden: LSTM的隐藏状态
        '''
        self.obs = torch.from_numpy(next_obs).unsqueeze(0)
        # 将动作转换为one-hot编码
        self.last_action = torch.FloatTensor([[1 if i == action else 0 for i in range(self.action_dim)]])
        self.last_reward = torch.FloatTensor([[reward]])



class R2D2Agent(ptan.agent.BaseAgent):

    def __init__(self, params, net, epsilon, env, device='cpu'):
        super().__init__()

        self.params = params
        self.net = net
        self.device = device
        self.epsilon = epsilon
        self.env = env

    
    def initial_state(self):
        init_state = AgentState(
            torch.from_numpy(self.env.reset()[0]).unsqueeze(0),
            self.env.action_space.n
        )

        return init_state
    

    def __call__(self, states, agent_states):
        """
        Convert observations and states into actions to take
        :param states: list of environment states to process
        :param agent_states: list of states with the same length as observations
        :return: tuple of actions, states
        """
        actoins = []
        for state, agent_state in zip(states, agent_states):
            action, agent_state = self.__inner_call__(state, agent_state)
            actoins.append(action)

        return actoins, agent_states
    
    

    def __inner_call__(self, state, agent_state:AgentState):
        """
        Convert observation and state into action to take
        :param state: environment state to process
        :param agent_state: state with the same length as observations
        :return: tuple of actions, states
        """
        with torch.no_grad():
            q_value, hidden = self.net(agent_state)

        if random.random() < self.epsilon:
            action = self.env.action_space.sample()
        else:
            action = q_value.argmax(dim=1).item()

        agent_state.hidden_state = hidden
        agent_state.q_value = q_value

        return action, agent_state
    


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


import collections

class FrameStackZero(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStackZero, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.channels = shp[0]
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    
    def reset_frames(self):
        for _ in range(self.k):
            self.frames.append(np.zeros((self.channels,) + self.observation_space.shape[1:3], dtype=np.float32))


    def reset(self, **kwargs):
        self.reset_frames()
        obs, info = self.env.reset(**kwargs)
        self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)



class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmethod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[19:169, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # if reward > 0:
        #     reward /= 100.0 * 8
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    


def wrap_dqn(env, episodic_life=True, reward_clipping=True, gray = True):
    env = gym.make(env, frameskip=4, repeat_action_probability=0)
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # todo 对比不同的分辨率、颜色的图像，因为理论上都可以
    env = ProcessFrame(env, gray=gray, size=84)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = RewardPenaltyWrapper(env)
    return env


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss


class LocalBuffer:
    '''store transitions of one episode 这个应该是一个局部缓冲区，存储一个episode的所有数据'''
    def __init__(self, action_dim: int, params: dict):
        '''
        action_dim: 动作的维度
        hidden_dim: 隐藏层的维度 todo 作用
        '''
        
        self.params = params
        self.action_dim = action_dim
        self.gamma = self.params['gamma']
        self.hidden_dim = self.params['hidden_dim']
        self.forward_steps = self.params['forward_steps'] # n步dqn计算时的前向步数
        self.learning_steps = self.params['learning_steps']
        self.burn_in_steps = self.params['burn_in_steps']
        self.block_length = self.params['block_length']
        self.curr_burn_in_steps = 0
        
    def __len__(self):
        return self.size
    
    def reset(self, init_obs: np.ndarray):
        '''
        这个函数的作用 初始化buffer缓冲区
        1. 在动作器每个环境重置时都会调用一次

        init_obs: 初始的观察值
        '''
        self.obs_buffer = [init_obs] # 这里的obs_buffer 对应着相同位置的action表示的是到达该obs所执行的动作
        # np.array([1 if i == 0 else 0 for i in range(self.action_dim) 这里实际时创建一个one-hot向量，这个one-hot变量只的是动作的向量，指向的是none动作
        # 这里指的是初始初始状态时，上一个执行的动作是none，存储的是动作的one-hont编码
        self.last_action_buffer = [np.array([1 if i == 0 else 0 for i in range(self.action_dim)], dtype=bool)]
        # 这里指的是初始状态时，上一个执行的奖励是0
        self.last_reward_buffer = [0]
        # 隐藏状态的初始值是0
        self.hidden_buffer = [np.zeros((2, self.hidden_dim), dtype=np.float32)]
        self.action_buffer = [] # 存储动作的标量值，每次游戏结束时都会清空缓冲区
        self.reward_buffer = [] # 存储的也是环境的奖励 ，每次游戏结束时都会清空缓冲区
        self.qval_buffer = [] # 存储的是动作的q值，每次游戏结束时都会清空缓冲区
        self.curr_burn_in_steps = 0 # todo
        self.size = 0 # 缓冲区的大小
        self.sum_reward = 0 # 存储的是应该是一个生命周期内获取的总奖励 todo
        self.done = False

    def add(self, action: int, reward: float, next_obs: np.ndarray, q_value: np.ndarray, hidden_state: np.ndarray):
        '''
        action: 执行的动作
        reward: 得到的奖励
        next_obs: 得到的状态
        q_value: 得到的Q值
        hidden_state: 模型隐藏层的状态
        '''
        self.action_buffer.append(action)
        self.reward_buffer.append(reward)
        self.hidden_buffer.append(hidden_state)
        self.obs_buffer.append(next_obs)
        self.last_action_buffer.append(np.array([1 if i == action else 0 for i in range(self.action_dim)], dtype=bool))
        self.last_reward_buffer.append(reward)
        self.qval_buffer.append(q_value)
        self.sum_reward += reward
        self.size += 1
    
    def finish(self, last_qval: np.ndarray = None) -> Tuple:
        '''
        last_qval: 在游戏结束时会调用这个方法，并且这里值为None/当达到最大步数时，会调用这个方法，并且这里值为为下一个Q值
        '''
        assert self.size <= self.block_length
        # assert len(self.last_action_buffer) == self.curr_burn_in_steps + self.size + 1

        # 这行代码在计算需要多少个序列(sequences)来存储当前缓冲区的所有数据:
        # todo 估计就是训练时时一个连续序列一个序列取训练的 todo
        num_sequences = math.ceil(self.size/self.learning_steps)

        # 最大的前进步数，这里应该就是限制最大的训练步数 todo 作用
        max_forward_steps = min(self.size, self.forward_steps)
        # 感觉有点像n步DQN的计算变量 todo
        n_step_gamma = [self.gamma**self.forward_steps] * (self.size-max_forward_steps)

        # last_qval is none means episode done 
        if last_qval is not None:
            # todo 这里是干嘛
            self.qval_buffer.append(last_qval)
            n_step_gamma.extend([self.gamma**i for i in reversed(range(1, max_forward_steps+1))])
        else:
            self.done = True # 游戏结束标识
            self.qval_buffer.append(np.zeros_like(self.qval_buffer[0])) # 如果游戏结束了，那么最新的Q值则是0
            # todo
            n_step_gamma.extend([0 for _ in range(max_forward_steps)]) # set gamma to 0 so don't need 'done'
        
        # 将N步DQN的gamma转换为矩阵，方便计算
        n_step_gamma = np.array(n_step_gamma, dtype=np.float32)

        obs = np.stack(self.obs_buffer) # 将所有的观察值堆叠成一个数组
        last_action = np.stack(self.last_action_buffer) # 将所有的上一个动作堆叠成一个数组
        last_reward = np.array(self.last_reward_buffer, dtype=np.float32) # 将所有的上一个奖励堆叠成一个数组
        
        '''
        slice(start, stop, step) 是Python的切片操作，有三个参数：

        start: 起始索引
        stop: 结束索引（不包含）
        step: 步长

        这等价于更常见的切片语法：self.hidden_buffer[0:self.size:self.learning_steps]

        所以以下操作是按照固定的步长为每一个训练的序列提取起始的隐藏层状态用于训练
        '''
        hiddens = np.stack(self.hidden_buffer[slice(0, self.size, self.learning_steps)])

        actions = np.array(self.action_buffer, dtype=np.uint8)

        qval_buffer = np.concatenate(self.qval_buffer)
        # 这里的作用应该是填充奖励的长度，可能是为了对齐某种长度
        reward_buffer = self.reward_buffer + [0 for _ in range(self.forward_steps-1)]
        # 这里是计算n步dqn时，每一个时刻的前向forward_steps所累积的奖励
        '''
        # 衰减因子序列为：
        [0.9^2, 0.9^1, 0.9^0] = [0.81, 0.9, 1.0]

        # 如果 reward_buffer = [1, 2, 3, 4, 0, 0]
        # 卷积计算 n 步累积奖励：
        # 1*0.81 + 2*0.9 + 3*1.0 = 4.71
        # 2*0.81 + 3*0.9 + 4*1.0 = 7.02
        # 3*0.81 + 4*0.9 + 0*1.0 = 6.03
        # 4*0.81 + 0*0.9 + 0*1.0 = 3.24
        '''
        n_step_reward = np.convolve(reward_buffer, 
                                    [self.gamma**(self.forward_steps-1-i) for i in range(self.forward_steps)],
                                    'valid').astype(np.float32)

        '''
        curr_burn_in_steps 在 R2D2 算法中是用来跟踪当前的 burn-in 步数的变量。它的主要作用是：

        维护 LSTM 状态的连续性
        在序列之间保持 LSTM 隐藏状态的连续性
        确保 LSTM 在训练时有正确的上下文信息

        todo 怎么使用？
        '''
        burn_in_steps = np.array([min(i*self.learning_steps+self.curr_burn_in_steps, self.burn_in_steps) for i in range(num_sequences)], dtype=np.uint8)
        # 这里应该是存储实际用于训练的学习步数，如果对于结尾部分需要考虑到不是learning_steps倍数的情况，所以采用的是self.size-i*self.learning_steps
        learning_steps = np.array([min(self.learning_steps, self.size-i*self.learning_steps) for i in range(num_sequences)], dtype=np.uint8)
        # 同样也是类似的道理，这里是n步dqn的向前看的步数，对于结尾部分 todo 调试看
        forward_steps = np.array([min(self.forward_steps, self.size+1-np.sum(learning_steps[:i+1])) for i in range(num_sequences)], dtype=np.uint8)
        assert forward_steps[-1] == 1 and burn_in_steps[0] == self.curr_burn_in_steps
        # assert last_action.shape[0] == self.curr_burn_in_steps + np.sum(learning_steps) + 1

        # R2D2中TD误差和优先级计算的详细解析 todo 调试查看
        # 从qval_buffer中提取未来状态的最大Q值,这里的未来状态应该是越过了训练步长的状态
        max_qval = np.max(qval_buffer[max_forward_steps:self.size+1], axis=1)
        # 使用边缘填充('edge')来处理序列末尾的值
        max_qval = np.pad(max_qval, (0, max_forward_steps-1), 'edge')
        # 选择实际上每一步执行动作的Q值
        target_qval = qval_buffer[np.arange(self.size), actions]

        # 算n步TD误差的绝对值
        # n_step_reward: n步累积奖励
        # n_step_gamma * max_qval: 折扣后的未来最大Q值
        # target_qval: 当前动作的Q值
        # 这个误差用于优先级回放
        # 误差雨大则说明预测的Q值和实际的Q值差距大，优先级回放会更倾向于选择这种误差大的样本进行训练
        td_errors = np.abs(n_step_reward + n_step_gamma * max_qval - target_qval, dtype=np.float32)
        # 计算优先级:
        # 创建优先级数组
        # 使用混合TD误差计算优先级
        # 混合TD误差结合了最大和平均TD误差
        # 这些优先级用于优先级经验回放采样
        priorities = np.zeros(self.block_length//self.learning_steps, dtype=np.float32)
        # 更新优先级
        priorities[:num_sequences] = calculate_mixed_td_errors(td_errors, learning_steps)

        # save burn in information for next block
        # 看起来像保留一部分之前的数据，用于维护LSTM的上下文状态
        self.obs_buffer = self.obs_buffer[-self.burn_in_steps-1:]
        self.last_action_buffer = self.last_action_buffer[-self.burn_in_steps-1:]
        self.last_reward_buffer = self.last_reward_buffer[-self.burn_in_steps-1:]
        self.hidden_buffer = self.hidden_buffer[-self.burn_in_steps-1:]
        self.action_buffer.clear()
        self.reward_buffer.clear()
        self.qval_buffer.clear()
        self.curr_burn_in_steps = len(self.obs_buffer)-1
        self.size = 0
        
        # 将本次的数据打包成Block对象，一个block包含一个周期内所有的序列数据
        block = Block(obs, last_action, last_reward, actions, n_step_reward, n_step_gamma, hiddens, num_sequences, burn_in_steps, learning_steps, forward_steps)
        return [block, priorities, self.sum_reward if self.done else None]
    

def calculate_mixed_td_errors(td_error, learning_steps):
    # todo 调试看看如何计算的'''
    
    start_idx = 0
    # todo learning_steps是什么？内容是什么？
    mixed_td_errors = np.empty(learning_steps.shape, dtype=td_error.dtype)
    for i, steps in enumerate(learning_steps):
        # 得到每个序列的TD误差，使用的方式是90%的最大TD误差和10%的平均TD误差的加权平均
        mixed_td_errors[i] = 0.9*td_error[start_idx:start_idx+steps].max() + 0.1*td_error[start_idx:start_idx+steps].mean()
        start_idx += steps
    
    return mixed_td_errors


@dataclass
class Block:
    # 存储观察值序列
    # shape: (sequence_length, *obs_shape)
    obs: np.array

    # 存储上一个时刻执行的动作的one-hot编码
    # shape: (sequence_length, action_dim)
    last_action: np.array

    # 存储上一个时刻获得的奖励
    # shape: (sequence_length,)
    last_reward: np.array

    # 存储当前时刻执行的动作的标量值
    # shape: (sequence_length,)
    action: np.array

    # 存储n步累积奖励(使用n-step TD)
    # shape: (sequence_length,)
    n_step_reward: np.array

    # 存储n步折扣因子
    # shape: (sequence_length,)
    gamma: np.array

    # 存储LSTM的隐藏状态
    # shape: (num_sequences, 2, hidden_dim)
    hidden: np.array

    # 当前block包含的序列数量
    num_sequences: int

    # 每个序列的burn-in步数 todo
    # shape: (num_sequences,)
    burn_in_steps: np.array

    # 每个序列的实际学习步数 todo
    # shape: (num_sequences,)
    learning_steps: np.array

    # 每个序列的前向展望步数(用于n-step return) todo
    # shape: (num_sequences,)
    forward_steps: np.array

    

def get_epsilon(actor_id: int, base_eps: float, alpha: float, num_actors: int):
    '''
    actor_id: 动作器的ID，因为有多个动作器，不同动作器之间的id不同
    base_eps: 基础的epsilon值
    alpha: 用于计算epsilon的参数
    num_actors: 动作器的数量
    '''
    
    # 得到不同的actor_id对应的epsilon值
    # 1 + （ 1 / ( 8 - 1) ) * 0.4 = 1.0571428571428572
    exponent = 1 + actor_id / (num_actors-1) * alpha
    return base_eps**exponent



class ReplayBuffer:
    def __init__(self, sample_queue_list, batch_queue, priority_queue, params: dict):

        self.params = params
        self.buffer_capacity = self.params['buffer_capacity'] # 缓冲区的大小
        self.sequence_len = self.params['learning_steps'] # todo
        self.num_sequences = self.params['buffer_capacity']//self.sequence_len # todo
        self.block_len = self.params['block_length'] # todo block的长度 block时做什么用的
        self.num_blocks = self.buffer_capacity // self.block_len # todo 计算有多少个block
        self.seq_pre_block = self.block_len // self.sequence_len # todo 每个block中包含的序列数
        self.add_frame_idx = 0

        self.block_ptr = 0 # todo

        # todo 感觉应该是实现了一个优先级缓冲区
        self.priority_tree = PriorityTree(self.num_sequences, self.params['alpha'], self.params['importance_sampling_exponent'])

        self.batch_size = self.params['batch_size']

        self.env_steps = 0
        
        self.num_episodes = 0
        self.episode_reward = 0

        self.training_steps = 0
        self.last_training_steps = 0
        self.sum_loss = 0

        self.size = 0 # 这个应该是最新的数据采集量
        self.last_size = 0 # 这个应该是上一次的数据采集量

        self.buffer = [None] * self.num_blocks

        # todo 这三个队列是什么意思？
        self.sample_queue_list, self.batch_queue, self.priority_queue = sample_queue_list, batch_queue, priority_queue

    def __len__(self):
        return self.size

    def run(self):
        self.lock = threading.Lock()
        self.writer = SummaryWriter(comment="-" + "kungfulmaster-r2d2")
        # todo 这三个进程的作用分别是什么？
        background_thread = threading.Thread(target=self.add_data, daemon=True)
        background_thread.start()

        background_thread = threading.Thread(target=self.prepare_data, daemon=True)
        background_thread.start()

        background_thread = threading.Thread(target=self.update_data, daemon=True)
        background_thread.start()

        log_interval = self.params['log_interval']

        # 这边看起来只是打印日志，真做事的还是上面三个线程
        while True:
            print(f'buffer size: {self.size}')
            print(f'buffer update speed: {(self.size-self.last_size)/log_interval}/s')
            self.last_size = self.size
            print(f'number of environment steps: {self.env_steps}')
            if self.num_episodes != 0:
                print(f'average episode return: {self.episode_reward/self.num_episodes:.4f}')
                # print(f'average episode return: {self.episode_reward/self.num_episodes:.4f}')
                self.episode_reward = 0
                self.num_episodes = 0
            print(f'number of training steps: {self.training_steps}')
            print(f'training speed: {(self.training_steps-self.last_training_steps)/log_interval}/s')
            if self.training_steps != self.last_training_steps:
                print(f'loss: {self.sum_loss/(self.training_steps-self.last_training_steps):.4f}')
                self.last_training_steps = self.training_steps
                self.sum_loss = 0
            self.last_env_steps = self.env_steps
            print()

            if self.training_steps == self.params['training_steps']:
                break
            else:
                time.sleep(log_interval)

    def prepare_data(self):
        # 要等到缓冲区有足够的数据才能开始采样
        while self.size < self.params['learning_starts']:
            time.sleep(1)

        while True:
            # 知道缓冲区满了才开始采样数据
            if not self.batch_queue.full():
                data = self.sample_batch()
                self.batch_queue.put(data)
            else:
                time.sleep(0.1)

    def add_data(self):
         with RewardTracker(self.writer, self.params['stop_reward']) as reward_tracker:
            while True:
                for sample_queue in self.sample_queue_list:
                    if not sample_queue.empty():
                        data = sample_queue.get_nowait()
                        if isinstance(data[0], Block):
                            # 如果是Block类型的数据，则添加到缓冲区
                            self.add(*data)
                        else:
                            self.add_frame_idx += 1
                            reward_tracker.reward(data[0], self.add_frame_idx)

    def update_data(self):

        while True:
            if not self.priority_queue.empty():
                data = self.priority_queue.get_nowait()
                self.update_priorities(*data)
            else:
                time.sleep(0.1)


    def add(self, block: Block, priority: np.array, episode_reward: float):
        '''
        block: 整个block的内容，包含了一个episode的所有数据
        priority: 该block的优先级
        episode_reward: 该episode的总奖励，如果是探索阶段则为0
        '''

        with self.lock:

            idxes = np.arange(self.block_ptr*self.seq_pre_block, (self.block_ptr+1)*self.seq_pre_block, dtype=np.int64)

            # 根据计算的优先级更新对应索引的优先级
            # todo 后续看看这个优先级是怎么计算的
            self.priority_tree.update(idxes, priority)

            if self.buffer[self.block_ptr] is not None:
                self.size -= np.sum(self.buffer[self.block_ptr].learning_steps).item()

            self.size += np.sum(block.learning_steps).item()
            
            # 将数据存储到缓冲区中
            self.buffer[self.block_ptr] = block

            # todo
            self.env_steps += np.sum(block.learning_steps, dtype=np.int32)

            # 更新缓冲区的指针，这里模拟的是一个循环缓冲区的效果
            self.block_ptr = (self.block_ptr+1) % self.num_blocks
            if episode_reward:
                self.episode_reward += episode_reward # 这里应该是记录一个episode的总奖励，应该是用于计算平均奖励值
                self.num_episodes += 1

    def sample_batch(self):
        '''sample one batch of training data'''
        batch_obs, batch_last_action, batch_last_reward, batch_hidden, batch_action, batch_reward, batch_gamma = [], [], [], [], [], [], []
        burn_in_steps, learning_steps, forward_steps = [], [], []

        with self.lock:
            
            # 这里的idxes应该是对应的序列索引
            idxes, is_weights = self.priority_tree.sample(self.batch_size)

            block_idxes = idxes // self.seq_pre_block  # 确定在哪个block，表示每个索引在哪个block的索引
            sequence_idxes = idxes % self.seq_pre_block  # 确定block中的哪个序列 标识每个索引在block中的序列索引


            for block_idx, sequence_idx  in zip(block_idxes, sequence_idxes):

                block = self.buffer[block_idx]

                assert sequence_idx < block.num_sequences, 'index is {} but size is {}'.format(sequence_idx, self.seq_pre_block_buf[block_idx])

                burn_in_step = block.burn_in_steps[sequence_idx]
                learning_step = block.learning_steps[sequence_idx]
                forward_step = block.forward_steps[sequence_idx]
                
                start_idx = block.burn_in_steps[0] + np.sum(block.learning_steps[:sequence_idx])

                obs = block.obs[start_idx-burn_in_step:start_idx+learning_step+forward_step]
                last_action = block.last_action[start_idx-burn_in_step:start_idx+learning_step+forward_step]
                last_reward = block.last_reward[start_idx-burn_in_step:start_idx+learning_step+forward_step]
                obs, last_action, last_reward = torch.from_numpy(obs), torch.from_numpy(last_action), torch.from_numpy(last_reward)
                
                start_idx = np.sum(block.learning_steps[:sequence_idx])
                end_idx = start_idx + block.learning_steps[sequence_idx]
                action = block.action[start_idx:end_idx]
                reward = block.n_step_reward[start_idx:end_idx]
                gamma = block.gamma[start_idx:end_idx]
                hidden = block.hidden[sequence_idx]
                
                batch_obs.append(obs)
                batch_last_action.append(last_action)
                batch_last_reward.append(last_reward)
                batch_action.append(action)
                batch_reward.append(reward)
                batch_gamma.append(gamma)
                batch_hidden.append(hidden)

                burn_in_steps.append(burn_in_step)
                learning_steps.append(learning_step)
                forward_steps.append(forward_step)

            batch_obs = pad_sequence(batch_obs, batch_first=True)
            batch_last_action = pad_sequence(batch_last_action, batch_first=True)
            batch_last_reward = pad_sequence(batch_last_reward, batch_first=True)

            is_weights = np.repeat(is_weights, learning_steps)


            data = (
                batch_obs,
                batch_last_action,
                batch_last_reward,
                torch.from_numpy(np.stack(batch_hidden)).transpose(0, 1),

                torch.from_numpy(np.concatenate(batch_action)).unsqueeze(1),
                torch.from_numpy(np.concatenate(batch_reward)),
                torch.from_numpy(np.concatenate(batch_gamma)),

                torch.ByteTensor(burn_in_steps),
                torch.ByteTensor(learning_steps),
                torch.ByteTensor(forward_steps),

                idxes,
                torch.from_numpy(is_weights.astype(np.float32)),
                self.block_ptr,

                self.env_steps
            )

        return data

    def update_priorities(self, idxes: np.ndarray, td_errors: np.ndarray, old_ptr: int, loss: float):
        """Update priorities of sampled transitions"""
        """更新优先级，应该是在训练的时候利用损失"""
        with self.lock:

            # discard the idxes that already been replaced by new data in replay buffer during training
            if self.block_ptr > old_ptr:
                # range from [old_ptr, self.seq_ptr)
                mask = (idxes < old_ptr*self.seq_pre_block) | (idxes >= self.block_ptr*self.seq_pre_block)
                idxes = idxes[mask]
                td_errors = td_errors[mask]
            elif self.block_ptr < old_ptr:
                # range from [0, self.seq_ptr) & [old_ptr, self,capacity)
                mask = (idxes < old_ptr*self.seq_pre_block) & (idxes >= self.block_ptr*self.seq_pre_block)
                idxes = idxes[mask]
                td_errors = td_errors[mask]

            self.priority_tree.update(idxes, td_errors)

        self.training_steps += 1
        self.sum_loss += loss



class PriorityTree:
    def __init__(self, capacity, prio_exponent, is_exponent):
        self.num_layers = 1
        while capacity > 2**(self.num_layers-1): 
            self.num_layers += 1

        self.ptree = np.zeros(2**self.num_layers-1, dtype=np.float64)

        self.prio_exponent = prio_exponent
        self.is_exponent = is_exponent
    
    def update(self, idxes: np.ndarray, td_error: np.ndarray):
        priorities = td_error ** self.prio_exponent

        idxes = idxes + 2**(self.num_layers-1) - 1
        self.ptree[idxes] = priorities

        for _ in range(self.num_layers-1):
            idxes = (idxes-1) // 2
            idxes = np.unique(idxes)
            self.ptree[idxes] = self.ptree[2*idxes+1] + self.ptree[2*idxes+2]

    def sample(self, num_samples: int) -> Tuple[np.ndarray, np.ndarray]:
        p_sum = self.ptree[0]
        interval = p_sum / num_samples

        prefixsums = np.arange(0, p_sum, interval, dtype=np.float64) + np.random.uniform(0, interval, num_samples)

        idxes = np.zeros(num_samples, dtype=np.int64)
        for _ in range(self.num_layers-1):
            nodes = self.ptree[idxes*2+1]
            idxes = np.where(prefixsums < nodes, idxes*2+1, idxes*2+2)
            prefixsums = np.where(idxes%2 == 0, prefixsums - self.ptree[idxes-1], prefixsums)
        
        # importance sampling weights
        priorities = self.ptree[idxes]
        min_p = np.min(priorities)
        is_weights = np.power(priorities/min_p, -self.is_exponent)

        idxes -= 2**(self.num_layers-1) - 1

        return idxes, is_weights