import gymnasium as gym
import ptan
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils as nn_utils
from collections import deque
import cv2
import os
import time
import sys
from gymnasium import spaces
from PIL import Image

DEFAULT_SEED = 20 # 环境默认的随机种子

NUM_ENVS = 50 # 创建环境的数量
GAMMA = 0.99
REWARD_STEPS = 5 # todo 作用 目前看起来像是记录环境交互结果的步数
ENTROPY_BETA = 0.01
VALUE_LOSS_COEF = 0.5
BATCH_SIZE = REWARD_STEPS * 16
CLIP_GRAD = 0.5

FRAMES_COUNT = 4
IMG_SHAPE = (FRAMES_COUNT, 84, 84) # 图片维度，（帧数（通道数），高，宽）

def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)

class StackFrameWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4):
        super().__init__(env)
        self.env = env
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)

        low = np.repeat(self.observation_space.low, n_frames, axis=2)
        high = np.repeat(self.observation_space.high, n_frames, axis=2)
        self.observation_space = gym.spaces.Box(low=low, high=high, dtype=self.observation_space.dtype)

        self.obs = []

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.n_frames):
            self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), info

    def step(self, action):
        obs, reward, terminated, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), reward, terminated, truncated, info
    

class ProcessFrame84(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None):
        super(ProcessFrame84, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame84.process(obs)

    @staticmethod
    def process(img):
        global count_frame
        x_t = img
        # ProcessFrame84.save_state_as_image(x_t, r'state_image.png')
        x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114
        x_t = x_t[12:173, :]
        x_t = cv2.resize(x_t, (84, 84), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [84, 84, 1])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)

class AlienPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(AlienPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.frame_penalty_count = 39
        self.frame_penalty_tick = 0
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        reward = self.clip_reward(reward=reward)
        
        # 每帧惩罚
        if self.frame_penalty_tick >= self.frame_penalty_count:
            reward += self.frame_penalty
            self.frame_penalty_tick = 0
        else:
            self.frame_penalty_tick += 1
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward = -self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    

    def clip_reward(self, reward):
        if reward <= 0:
            return reward
        elif reward <= 11:
            return np.float32(10)
        elif reward <= 101:
            return np.float32(20)
        elif reward <= 501:
            return np.float32(30)
        elif reward <= 1001:
            return np.float32(40)
        elif reward <= 2001:
            return np.float32(50)
        elif reward <= 3001:
            return np.float32(60)
        else:
            return np.float32(70)

class DenseOpticalFlowWrapper(gym.ObservationWrapper):
    def __init__(self, env):
        super(DenseOpticalFlowWrapper, self).__init__(env)
        orig_shape = env.observation_space.shape
        # 如果原始 observation 是 RGB 图像
        if len(orig_shape) == 3 and orig_shape[-1] == 3:
            # 转换为灰度后计算光流，结果 shape 为 (H, W, 2)
            new_shape = (orig_shape[0], orig_shape[1], 2)
        else:
            new_shape = (orig_shape[0], orig_shape[1], 2)
        self.observation_space = gym.spaces.Box(
            low=-np.inf, high=np.inf, shape=new_shape, dtype=np.float32
        )
        self.prev_gray = None

    def observation(self, obs):
        # 如果 obs 为 RGB，则转换为灰度
        if len(obs.shape) == 3 and obs.shape[-1] == 3:
            gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        else:
            gray = obs.copy()
        # 如果没有上一帧，则返回全 0 光流
        if self.prev_gray is None:
            flow = np.zeros((gray.shape[0], gray.shape[1], 2), dtype=np.float32)
        else:
            flow = cv2.calcOpticalFlowFarneback(
                self.prev_gray, gray, None,
                pyr_scale=0.5, levels=3, winsize=15,
                iterations=3, poly_n=5, poly_sigma=1.2, flags=0
            )
        self.prev_gray = gray
        return flow

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        if len(obs.shape) == 3 and obs.shape[-1] == 3:
            self.prev_gray = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        else:
            self.prev_gray = obs.copy()
        flow = np.zeros((self.prev_gray.shape[0], self.prev_gray.shape[1], 2), dtype=np.float32)
        return flow, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        flow = self.observation(obs)
        return flow, reward, done, truncated, info


class DenseOpticalFlow3FrameWrapper(gym.ObservationWrapper):
    def __init__(self, env):
        super(DenseOpticalFlow3FrameWrapper, self).__init__(env)
        # 假设原始 observation 为灰度或RGB图像
        orig_shape = env.observation_space.shape
        # 这里输出的是两次光流取平均，仍然为光流形状(H,W,2)
        self.observation_space = gym.spaces.Box(
            low=-np.inf, high=np.inf, shape=(orig_shape[0], orig_shape[1], 2), dtype=np.float32
        )
        # 缓冲区保存3帧图像
        self.frame_buffer = deque(maxlen=3)
    
    def _preprocess(self, obs):
        # 如果 obs 为 RGB，则转换为灰度
        if len(obs.shape) == 3 and obs.shape[-1] == 3:
            return cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        else:
            return obs.copy()
    
    def _calc_flow(self, prev, next_):
        # 计算两个灰度图之间的稠密光流
        flow = cv2.calcOpticalFlowFarneback(
            prev, next_, None, 
            pyr_scale=0.5, levels=3, winsize=15, iterations=3, 
            poly_n=5, poly_sigma=1.2, flags=0
        )
        return flow

    def observation(self, obs):
        # 预处理当前帧
        gray = self._preprocess(obs)
        # 如果缓冲区内没有足够帧，则重复填充
        if len(self.frame_buffer) < 3:
            while len(self.frame_buffer) < 3:
                self.frame_buffer.append(gray)
        else:
            self.frame_buffer.append(gray)
        # 如果缓冲区中的帧数不足3，则返回全0光流
        if len(self.frame_buffer) < 3:
            h, w = gray.shape
            return np.zeros((h, w, 2), dtype=np.float32)
        # 计算前两帧间的光流
        flow1 = self._calc_flow(self.frame_buffer[0], self.frame_buffer[1])
        # 计算后两帧间的光流
        flow2 = self._calc_flow(self.frame_buffer[1], self.frame_buffer[2])
        # 合并两个光流（取均值作为3帧的整体运动信息）
        flow = (flow1 + flow2) / 2.0
        return flow

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        gray = self._preprocess(obs)
        # 用初始帧填充缓冲区（3 帧相同）
        self.frame_buffer.clear()
        for _ in range(3):
            self.frame_buffer.append(gray)
        # 初始化光流全为0
        h, w, _ = gray.shape
        flow = np.zeros((h, w, 2), dtype=np.float32)
        return flow, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        flow = self.observation(obs)
        return flow, reward, done, truncated, info


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)
    # 跳帧包装器
    # env = MaxAndSkipEnv(env, skip=4)
    env = ptan.common.wrappers.FireResetEnv(env)
    env = ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = ptan.common.wrappers.FrameStack(env, stack_frames)
    env = AlienPenaltyWrapper(env)
    return env

def make_env():
    return wrap_dqn(gym.make("ALE/Alien-v5", frameskip=4, repeat_action_probability=0.0))


def set_seed(seed, envs=None, cuda=False):
    '''
    为环境设置随机种子
    '''
    np.random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    if envs:
        for idx, env in enumerate(envs):
            # 这个seed是哪里定义的？
            # 这个seed是gym官方的种子设置函数，用来设置环境的随机种子
            env.seed(seed + idx)


class AtariA2C(nn.Module):
    '''
    创建A2C网络，用来预测执行动作的概率和执行动作后的直接回报
    '''
    def __init__(self, input_shape, n_actions):
        super(AtariA2C, self).__init__()

        # todo 通达、尺寸的变化
        self.conv = nn.Sequential(
            nn.Conv2d(input_shape[0], 64, kernel_size=8, stride=4),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, stride=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
        )

        conv_out_size = self._get_conv_out(input_shape)

        # ## 预测执行动作的大小，后续会将大小转换该概率
        self.policy = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(),
            nn.Linear(512, n_actions)
        )

        # ## 根据环境特征的提取预测执行动作所能获取的直接回报，也就是环境执行游戏一步后得到的reward，这里就是和之前网络的不同指出，之前都是预测Q值，这里预测的直接回报
        self.value = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(),
            nn.Linear(512, 1)
        )

    def _get_conv_out(self, shape):
        '''
        计算卷积最后输出的shape，从而得到全连接层的输入维度
        '''
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        '''
        return todo (预测的动作， )
        '''
        # 归一化
        fx = x.float() / 256
        # 将卷积输出展平
        conv_out = self.conv(fx).view(fx.size()[0], -1)
        return self.policy(conv_out), self.value(conv_out)



def discount_with_dones(rewards, dones, gamma):
    '''
    param rewards: 游戏每一步的回报
    param dones: 游戏每一步是否结束
    param gamma: 折扣因子

    return discounted: 游戏每一步的回报。采用类似bellman的计算公式
    '''
    # 保存游戏最近REWARD_STEPS每一步中的回报
    discounted = []
    r = 0
    # 逆序遍历游戏每一步的回报和是否结束
    for reward, done in zip(rewards[::-1], dones[::-1]):
        # 计算公式：r(t+1) = r(t+1) + gamma * r'(1.0 - done（False=0， True=1）)'
        # 如果游戏已经结束，那么r就等于结束的时候拿到的回报
        # 如果游戏没有结束，那么r等于当前的回报加上下一步的回报乘以折扣因子
        r = reward + gamma*r*(1.-done)
        discounted.append(r)
    # 有序计算的时候是从后往前计算的，所以这里需要将计算结果逆序
    return discounted[::-1]


def iterate_batches(envs, net, device="cpu"):
    '''
    param envs: 环境列表
    param net: I2A网络

    return 采集到的环境观察数据, 环境得到的Q值回报, 执行的动作, 环境得到的回报奖励,执行动作的概率分布, 记录每一次游戏结束时的总回报, 记录每一次游戏结束时的总步数
    '''
    # 动作维度，动作数量
    n_actions = envs[0].action_space.n
    # 概率动作选择器
    act_selector = ptan.actions.ProbabilityActionSelector()
    obs = [e.reset()[0] for e in envs]
    obs = np.array([np.array(o, dtype=np.float32) for o in obs])
    # 记录每个环境是否已经结束
    batch_dones = [[False] for _ in range(NUM_ENVS)]
    # 记录每个环境的总回报
    total_reward = [0.0] * NUM_ENVS
    # 记录每个环境的总步数
    total_steps = [0] * NUM_ENVS
    # todo 作用
    mb_obs = np.zeros((NUM_ENVS, REWARD_STEPS) + IMG_SHAPE, dtype=np.uint8)
    # 记录游戏每进行一步得到的奖励，总共会记录REWARD_STEPS步
    # 在REWARD_STEPS步数结束后，会将其转换为类似Q值的回报
    mb_rewards = np.zeros((NUM_ENVS, REWARD_STEPS), dtype=np.float32)
    # 记录网络预测的环境执行动作后的直接回报奖励
    mb_values = np.zeros((NUM_ENVS, REWARD_STEPS), dtype=np.float32)
    mb_actions = np.zeros((NUM_ENVS, REWARD_STEPS), dtype=np.int32)
    mb_probs = np.zeros((NUM_ENVS, REWARD_STEPS, n_actions), dtype=np.float32)
    # todo

    while True:
        # 获取游戏最后的一次结束状态标识
        # 在进行REWARD_STEPS步探索时，会记录每执行一步后，游戏的状态是否结束
        # 总共记录REWARD_STEPS步，这里取最后一步的结束状态
        batch_dones = [[dones[-1]] for dones in batch_dones]
        # 记录游戏结束的环境的总回报和总步数，不区分是哪个环境产生的回报和步数
        done_rewards = []
        done_steps = []
        for n in range(REWARD_STEPS):
            # 将obslist转换为np array
            obs_v = ptan.agent.default_states_preprocessor(obs).to(device)
            # 将拿到的环境特征放置到mb_obs中
            mb_obs[:, n] = obs_v.data.cpu().numpy()
            # 将环境特征放入网络获取
            # todo logits_v是什么,values_v是什么
            logits_v, values_v = net(obs_v)
            # 使用softmax宇哥每个执行动作的概率
            probs_v = F.softmax(logits_v, dim=1)
            probs = probs_v.data.cpu().numpy()
            # 使用概率动作选择其，选择需要执行的动作
            actions = act_selector(probs)
            # 保存每一步预测的动作概率
            mb_probs[:, n] = probs
            # 保存每一步选择动作
            mb_actions[:, n] = actions
            # todo 作用
            mb_values[:, n] = values_v.squeeze().data.cpu().numpy()
            # 遍历每一个环境，执行一步，取得环境的状态，奖励，是否结束，其他信息
            for e_idx, e in enumerate(envs):
                # 将选择执行的动作传入环境，获取环境的状态，奖励，是否结束，其他信息
                o, r, done, trunc,  _ = e.step(actions[e_idx])
                # 记录每个环境的总回报
                total_reward[e_idx] += r
                # 记录环境的向前一步
                total_steps[e_idx] += 1
                if done or trunc:
                    # 如果游戏结束，则重置环境
                    o, info = e.reset()
                    # 记录游戏结束的环境的总回报
                    done_rewards.append(total_reward[e_idx])
                    done_steps.append(total_steps[e_idx])
                    # 重置该环境的总回报和总步数
                    total_reward[e_idx] = 0.0
                    total_steps[e_idx] = 0
                # 更新最新的游戏观察数据到缓存中
                obs[e_idx] = o
                # 将当前游戏的回报保存到缓存中
                mb_rewards[e_idx, n] = r
                # 记录每一个游戏环境执行当前的一步后，是否结束的标识
                batch_dones[e_idx].append(done)
        # obtain values for the last observation
        # 将最新的游戏状体转换为矩阵
        obs_v = ptan.agent.default_states_preprocessor(obs).to(device)
        # 将最新的游戏状态放入网络中获取 todo value
        _, values_v = net(obs_v)
        # 移除大小为1 的维度,也就是将最里面的维度移除，得到一个list
        # 这里记录的是游戏进行了REWARD_STEPS步，这里之所以还要进行一次最后一步
        # 是为保证在计算游戏的bellman公式时，最后一步的回报也能够被计算到，否则没有结束的游戏
        # 最后一步的下一步游戏回报就是0，这样计算出来的回报就会偏小
        values_last = values_v.squeeze().data.cpu().numpy()

        # 遍历之前每一个环境收集的每一步的回报、每一步后游戏是否结束、todo 最后一步的回报
        # 这里就是将每一步收集到的回报转换为类似Q值，计算公式类似bellman公式
        for e_idx, (rewards, dones, value) in enumerate(zip(mb_rewards, batch_dones, values_last)):
            # 当前观测环境的每一步的回报
            rewards = rewards.tolist()
            if not dones[-1]:
                # 计算最后一个如果不是结束状态的的总回报
                # 这里done的size会比rewards多一个，是因为batch_dones初始化的时候，每个环境都会初始化一个done标识，所以这里需要去掉最后一个
                rewards = discount_with_dones(rewards + [value], dones[1:] + [False], GAMMA)[:-1]
            else:
                # 计算最后一个如果是结束状态的的总回报，采用的计算公式一致
                # 因为最后一个状态是结束状态，所以没有下一次回报，所以就不需要+ [value]了
                rewards = discount_with_dones(rewards, dones[1:], GAMMA)
            # 将转换后的回报保存到mb_rewards中
            mb_rewards[e_idx] = rewards

        # 有所有缓存中有REWARD_STEPS维度全部展        平
        out_mb_obs = mb_obs.reshape((-1,) + IMG_SHAPE)
        out_mb_rewards = mb_rewards.flatten()
        out_mb_actions = mb_actions.flatten()
        out_mb_values = mb_values.flatten()
        out_mb_probs = mb_probs.flatten()
        # 用协程的方式返回数据
        yield out_mb_obs, out_mb_rewards, out_mb_actions, out_mb_values, out_mb_probs, \
              np.array(done_rewards), np.array(done_steps)


def train_a2c(net, mb_obs, mb_rewards, mb_actions, mb_values, optimizer, tb_tracker, step_idx, device="cpu"):
    '''
    param net: I2A网络
    param mb_obs: 游戏每一步的状态
    param mb_rewards: 游戏每一步的回报,这里不是直接回报，而是转换后的Q值
    param mb_actions: 游戏每一步的动作
    param mb_values: 游戏每一步的状态价值，是网络预测的直接回报
    param optimizer: 优化器
    param tb_tracker: tensorboard记录器
    param step_idx: 当前训练轮数

    '''

    # 重置梯度
    optimizer.zero_grad()
    # 记录优势，也就是Q值是否大于状态价值，如果大于，那么优势就是正数，否则就是负数
    # todo 这里的mb_adv的最小值是否是0？
    # todo mb_values是游戏预测的直接回报，那么这个公式实在做什么？
    mb_adv = mb_rewards - mb_values
    adv_v = torch.FloatTensor(mb_adv).to(device)
    obs_v = torch.FloatTensor(mb_obs).to(device)
    rewards_v = torch.FloatTensor(mb_rewards).to(device)
    actions_t = torch.LongTensor(mb_actions).to(device)
    # 将状态传入net，得到预测的动作大小和状态回报
    logits_v, values_v = net(obs_v)
    # 计算动作的概率
    log_prob_v = F.log_softmax(logits_v, dim=1)
    # 和之前的策略梯度一样，使用优势基线和执行动作的概率相结合，计算损失
    # 如果优势是正数，那么会加大这个动作，如果优势是负数，那么会减小这个动作
    # todo 但是这里会出现负值吗？
    log_prob_actions_v = adv_v * log_prob_v[range(len(mb_actions)), actions_t]

    # 因为要计算最大值，所以这里需要取负数，计算最小值
    # 因为所有的优化器，都基本是最小值优化
    loss_policy_v = -log_prob_actions_v.mean()
    # 计算每一个预测出来的环境回报和实际的环境回报之间的均方误差
    # 使得网路预测的回报能够接近世界回报
    loss_value_v = F.mse_loss(values_v.squeeze(-1), rewards_v)

    # 预测的动作概率，这里是计算信息熵，也就是计算动作的不确定性
    # 这里信息熵取了符号，原本信息熵的计算公式前面是由负号的
    # 所以取最小值也就导致了信息熵最大，目的是让网络能够进行探索
    prob_v = F.softmax(logits_v, dim=1)
    entropy_loss_v = (prob_v * log_prob_v).sum(dim=1).mean()
    # 得到总损失
    loss_v = ENTROPY_BETA * entropy_loss_v + VALUE_LOSS_COEF * loss_value_v + loss_policy_v
    # 计算梯度
    loss_v.backward()
    # 限制梯度的大小，防止梯度爆炸
    nn_utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
    # 优化
    optimizer.step()

    #记录训练中的数值
    tb_tracker.track("advantage", mb_adv, step_idx)
    tb_tracker.track("values", values_v, step_idx)
    tb_tracker.track("batch_rewards", rewards_v, step_idx)
    tb_tracker.track("loss_entropy", entropy_loss_v, step_idx)
    tb_tracker.track("loss_policy", loss_policy_v, step_idx)
    tb_tracker.track("loss_value", loss_value_v, step_idx)
    tb_tracker.track("loss_total", loss_v, step_idx)
    return obs_v


@torch.no_grad
def test_model(env, net, rounds=3, device="cpu"):
    '''
    param env: 测试游戏环境
    param net: I2A网络
    param rounds: 测试轮数（直到结束的轮数）
    '''

    total_reward = 0.0
    total_steps = 0
    agent = ptan.agent.PolicyAgent(lambda x: net(x)[0], device=device, apply_softmax=True, action_selector=ptan.actions.ArgmaxActionSelector())

    for _ in range(rounds):
        obs, info = env.reset()
        while True:
            action = agent([obs])[0][0]
            obs, r, done, trunc, _ = env.step(action)
            total_reward += r
            total_steps += 1
            if done or trunc:
                break

    # 平均每一轮的回报和步数
    return total_reward / rounds, total_steps / rounds



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
            

def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


class RewardTracker:
    def __init__(self, writer, stop_reward):
        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        self.total_rewards.append(reward)
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        self.ts_frame = frame
        self.ts = time.time()
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"):
    """
    Convert batch into training tensors
    :param batch: 收集的游戏数据
    :param net:
    :return: states variable, actions tensor, reference values variable（游戏环境状态、执行的动作、评价的Q值）
    """
    states = [] # 每一步的游戏状态
    actions = [] # 每一步执行的动作
    rewards = [] # 每一步执行动作后获取的奖励
    not_done_idx = [] # 执行动作后没有结束游戏的索引
    last_states = [] # 执行动作后的下一个状态
    for idx, exp in enumerate(batch):
        states.append(exp.state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        if exp.last_state is not None:
            not_done_idx.append(idx)
            last_states.append(exp.last_state)
    states_v = ptan.agent.float32_preprocessor(states).to(device)
    actions_v = torch.FloatTensor(np.array(actions)).to(device)

    # handle rewards
    rewards_np = np.array(rewards, dtype=np.float32)
    if not_done_idx:
        # 如果存在下一个状态的游戏数据，那么计算Q值
        # 对于已经结束的游戏动作，他们的Q值
        last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
        last_vals_v = net(last_states_v)[2]
        # todo 为什么这里计算的Q值没有选择最大值
        last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
        rewards_np[not_done_idx] += last_val_gamma * last_vals_np

    ref_vals_v = torch.FloatTensor(rewards_np).to(device)
    return states_v, actions_v, ref_vals_v