#!/usr/bin/env python3
'''
完成适配，待验证

参考链接：
https://github.com/lutery/cleanrl.git

其实这个transformer主要的作用类似lstm
结合历史的数据一起考虑下一步要做什么
而不是用transformer提取obs的特征

'''
import os
import sys
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
import yaml
import pathlib

from lib import model_ppo as model, common_trxl as common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py
from collections import deque

gym.register_envs(ale_py)


class Trainer:
    def __init__(self, params, device):

        self.params = params
        self.build_params()

        self.writer = SummaryWriter(comment="-ppo-trxl_" + self.params['name'])
        self.device = device
        self.save_path = os.path.join("saves", "-ppo-trxl-" + self.params['name'])
        os.makedirs(self.save_path, exist_ok=True)

        self.build_env()
        self.build_model()
        self.build_buffer()


    def build_params(self):
        self.num_steps = self.params['num_steps']
        self.num_minibatches = self.params['num_minibatches']
        self.total_timesteps = self.params['total_timesteps']
        self.batch_size = self.num_steps
        self.minibatch_size = self.batch_size // self.num_minibatches
        self.num_iterations = self.total_timesteps // self.batch_size
        # 记录每个环境当前的步数？todo
        self.max_episode_steps = 1024
        self.trxl_memory_length = self.params['trxl_memory_length']
        self.init_lr = self.params['init_lr']
        self.final_lr = self.params['final_lr']
        self.anneal_steps = self.params['anneal_steps']
        self.frame_idx = 0
        self.train_count = 0
        self.best_reward = float('-inf')
        self.trxl_num_layers = self.params['trxl_num_layers']
        self.trxl_dim = self.params['trxl_dim']
        self.init_ent_coef = self.params['init_ent_coef']
        self.final_ent_coef = self.params['final_ent_coef']
        self.gamma = self.params['gamma']
        self.gae_lambda = self.params['gae_lambda']
        self.update_epochs = self.params['update_epochs']
        self.norm_adv = self.params['norm_adv']
        self.clip_coef = self.params['clip_coef']
        self.clip_vloss = self.params['clip_vloss']
        self.vf_coef = self.params['vf_coef']
        self.reconstruction_coef = self.params['reconstruction_coef']
        self.max_grad_norm = self.params['max_grad_norm']
        self.target_kl = self.params['target_kl']
        self.start_time = time.time()
        self.eval_freq = self.params['eval_freq']
        self.save_freq = self.params['save_freq']

    
    def build_buffer(self):
        # ALGO Logic: Storage setup
        self.start_time = time.time()
        # Setup placeholders for each environments's current episodic memory
        # 这里创建了一个下三角掩码矩阵
        self.memory_mask = torch.tril(torch.ones((self.trxl_memory_length, self.trxl_memory_length)), diagonal=-1).to(device=self.device)
        """ e.g. memory mask tensor looks like this if memory_length = 6
        0, 0, 0, 0, 0, 0
        1, 0, 0, 0, 0, 0
        1, 1, 0, 0, 0, 0
        1, 1, 1, 0, 0, 0
        1, 1, 1, 1, 0, 0
        1, 1, 1, 1, 1, 0
        """
        # Setup memory window indices to support a sliding window over the episodic memory
        # torch.arange(0, self.trxl_memory_length).unsqueeze(0)：首先创建一个序列，例如 [0,1,2,3]
        # base = base.unsqueeze(0)  # 2. 增加维度 变成 [[0,1,2,3]]
        #  torch.repeat_interleave 重复这个序列trxl_memory_length - 1次
        # 那么最终会输出的repetitions的shape (self.trxl_memory_length - 1, self.trxl_memory_length)
        # 由于记忆窗口的大小是trxl_memory_length，每次输入给transformer的记忆长度也是trxl_memory_length
        # 所以如果env_current_episode_step步数没有超过trxl_memory_length的话
        # 那么其对应的以及索引一定是0, 1, 2, 3 (记忆窗口的大小是是4，env_current_episode_step为1，只有超过了3才会往后看，因为如果不超过3，那么
        # 后续的记忆的空的，没有意义)
        # 反之如果env_current_episode_step步数超过了trxl_memory_length
        # 那么其对应的以及索引就是2, 3, 4, 5(记忆窗口的大小是是4，env_current_episode_step为6，只有超过了5才会往后看）
        # 所以这里才会这么构建memory_indices
        # 所以这里存储的索引就是记忆窗口的索引，而记忆窗口中存储的是记忆的索引
        self.repetitions = torch.repeat_interleave(
            torch.arange(0, self.trxl_memory_length).unsqueeze(0), self.trxl_memory_length - 1, dim=0
        ).long()
        # # torch.arange(0, self.trxl_memory_length).unsqueeze(0)：首先创建一个序列，例如 [0,1,2,3] 、 [1,2,3,4]、 [2,3,4,5]、 [3,4,5,6] ......
        # 将上述序列重复max_episode_steps - self.trxl_memory_length + 1，由之前可知trxl_memory_length <= max_episode_steps
        # 最后stack起来 shape (max_episode_steps - self.trxl_memory_length + 1, self.trxl_memory_length)
        self.memory_indices = torch.stack(
            [torch.arange(i, i + self.trxl_memory_length) for i in range(self.max_episode_steps - self.trxl_memory_length + 1)]
        ).long()

        # 最后拼接起来，shape = （max_episode_steps, self.trxl_memory_length） todo 
        # 最后输出的内容类似如下 todo 作用是什么？
        self.memory_indices = torch.cat((self.repetitions, self.memory_indices)).to(device=self.device)
        """ e.g. the memory window indices tensor looks like this if memory_length = 4 and max_episode_length = 7:
        0, 1, 2, 3
        0, 1, 2, 3
        0, 1, 2, 3
        0, 1, 2, 3
        1, 2, 3, 4
        2, 3, 4, 5
        3, 4, 5, 6
        """
        # 构建DDPG代理
        self.net_sample_agent = model.AgentPPOTrxl(
            net=self.model, 
            memory_indices=self.memory_indices,
            memory_mask=self.memory_mask, 
            max_episode_steps=self.max_episode_steps,
            param=self.params,
            action_shape=self.action_shape,
            device=self.device)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.net_sample_agent, steps_count=1)
        self.tarj = []



    def build_env(self):
        # todo dreamerv1可以不使用多帧堆叠，尝试一下
        self.env = common.wrap_dqn(gym.make('MountainCar-v0', render_mode="rgb_array"))
        self.test_env = common.wrap_dqn(gym.make('MountainCar-v0', render_mode="rgb_array"))
        self.obs_shape = self.test_env.observation_space.shape
        self.action_shape = [self.test_env.action_space.n]
        # Determine maximum episode steps 环境的最大步数
        self.max_episode_steps = self.test_env.spec.max_episode_steps
        # 尝试用其他的方法获取最大的步数
        if not self.max_episode_steps:
            self.test_env.reset()  # Memory Gym envs need to be reset before accessing max_episode_steps
            self.max_episode_steps = self.env.max_episode_steps
        # 如果没有最大步数，则限制最大步数为 1024
        if self.max_episode_steps <= 0:
            self.max_episode_steps = 1024  # Memory Gym envs have max_episode_steps set to -1
        self.trxl_memory_length = min(self.trxl_memory_length, self.max_episode_steps)


    def build_model(self):
        self.model = model.PPOTrxlModel(self.params, self.obs_shape, self.action_shape, self.max_episode_steps).to(device=self.device)
        self.optimizer = optim.AdamW(self.model.parameters(), lr=self.init_lr)
        # 难怪用BCELoss，因为里面的actor输出本来就是单独针对每个动作空间的维度
        self.bce_loss = torch.nn.BCELoss()  # Binary cross entropy loss for observation reconstruction
        self.sheduler = model.CustomLRScheduler(self.optimizer, self.init_lr, self.final_lr, self.anneal_steps)
        self.ent_coef_sheduler = model.CustomEntCoefcheduler(self.init_ent_coef, self.final_ent_coef, self.anneal_steps)
        

    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
                self.model.load_state_dict(checkpoint['net'])
                self.optimizer.load_state_dict(checkpoint['optimizer'])
                self.frame_idx = checkpoint['step_idx']
                self.ent_coef_sheduler.load_state_dict(checkpoint['ent_coef_sheduler'])
                self.sheduler.load_state_dict(checkpoint['sheduler'])
                self.train_count = checkpoint['train_count']

            print("加载模型成功")


    def save_model(self):
        checkpoints = {
            "net": self.model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "step_idx": self.frame_idx,
            "ent_coef_sheduler": self.ent_coef_sheduler.state_dict(),
            "sheduler": self.sheduler.state_dict(),
            "train_count": self.train_count,
        }

        common.save_checkpoints(self.train_count, checkpoints, self.save_path, 'ppo_trxl')


    def collect_seed_episodes(self, tb_tracker, tracker):
        while len(self.buffer) < self.replay_initial:
            self.frame_idx += 1
            self.buffer.populate(1)
            rewards_steps = self.exp_source.pop_rewards_steps()
            if rewards_steps:
                # 记录当前的训练进度并判断是否达到了奖励目标
                rewards, steps = zip(*rewards_steps)
                tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                tracker.reward(rewards[0], self.frame_idx)


    def train_model(self, tarj):
        stored_memories = self.net_sample_agent.stored_memories
        # Bootstrap value if not done
        env_current_episode_step = tarj[-1][0][5][1]
        next_memory = tarj[-1][0][5][0].unsqueeze(0)
        next_obs = torch.tensor(tarj[-1][0][4]).to(device=self.device).unsqueeze(0)
        next_done = tarj[-1][0][3]

        rewards = torch.zeros((self.num_steps,))
        actions = torch.zeros((self.num_steps, len(self.action_shape)), dtype=torch.long)
        dones = torch.zeros((self.num_steps,))
        obs = torch.zeros((self.num_steps,) + self.obs_shape)
        log_probs = torch.zeros((self.num_steps, len(self.action_shape)))
        values = torch.zeros((self.num_steps,))
        stored_memory_masks = torch.zeros((self.num_steps, self.trxl_memory_length), dtype=torch.bool)
        stored_memory_indices = torch.zeros((self.num_steps, self.trxl_memory_length), dtype=torch.long)


        for i, exp in enumerate(tarj):
            rewards[i] = exp[0][2]
            actions[i] = exp[0][1]
            dones[i] = exp[0][3]
            obs[i] = torch.as_tensor(exp[0][0])
            stored_memory_masks[i] = exp[0][5][5]
            stored_memory_indices[i] = exp[0][5][4]
        log_probs = tarj[-1][0][5][2]
        values = tarj[-1][0][5][3]

        rewards = rewards.to(device=self.device)
        actions = actions.to(device=self.device)
        obs = obs.to(device=self.device)
        log_probs = log_probs.to(device=self.device)
        dones = dones.to(device=self.device)
        values = values.to(device=self.device)
        stored_memory_masks = stored_memory_masks.to(device=self.device)
        stored_memory_indices = stored_memory_indices.to(device=self.device)

        with torch.no_grad():
            # 计算记忆窗口的起始位置, 因为transformer的记忆长度是args.trxl_memory_length，所以这里的起始位置是当前步数 - args.trxl_memory_length
            # 如果小于0，则取0
            # 虽然这里可能会因为长度不足得到一个start - end不足args.trxl_memory_length的记忆窗口
            # 结合pytorch的广播机制会对其进行补全，然后借助掩码机制无需担心传入的额外的记忆
            start = torch.clip(env_current_episode_step - self.trxl_memory_length, 0).item()
            # 计算记忆窗口的结束位置，不能超过记忆长度
            end = torch.clip(env_current_episode_step, self.trxl_memory_length).item()
            # 为每一个环境根据起始索引和结束索引创建一个索引序列
            indices = torch.stack([torch.arange(start, end, device=self.device)]).long()
            # 这里应该是根据索引，从记忆中提取出对应的记忆窗口的记忆 todo 调试起来看
            memory_window = common.batched_index_select(next_memory, 1, indices)  # Retrieve the memory window from the entire episode
            # 获取最后一步obs对应的Q值评价
            next_value = self.model.get_value(
                next_obs,
                memory_window,
                self.memory_mask[torch.clip(env_current_episode_step, 0, self.trxl_memory_length - 1)],
                stored_memory_indices[-1].unsqueeze(0),
            )
            advantages = torch.zeros_like(rewards).to(device=self.device)
            lastgaelam = 0
            # 这里就是熟悉的PPO GAE算法
            for t in reversed(range(self.num_steps)):
                if t == self.num_steps - 1:
                    nextnonterminal = 1.0 - next_done
                    nextvalues = next_value
                else:
                    nextnonterminal = 1.0 - dones[t + 1]
                    nextvalues = values[t + 1]
                delta = rewards[t] + self.gamma * nextvalues * nextnonterminal - values[t]
                advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam
            advantages = advantages.to(device=self.device)
            values = values.to(device=self.device)
            returns = advantages + values


        stored_memory_index = self.net_sample_agent.stored_memory_index
        # Flatten the batch 将多个环境多个时间步的数据展品为一个batch
        b_obs = obs.reshape(-1, *obs.shape[1:])
        b_logprobs = log_probs.reshape(-1, *log_probs.shape[1:])
        b_actions = actions.reshape(-1, *actions.shape[1:])
        b_advantages = advantages.reshape(-1)
        b_returns = returns.reshape(-1)
        b_values = values.reshape(-1)
        b_memory_index = stored_memory_index.reshape(-1)
        b_memory_indices = stored_memory_indices.reshape(-1, *stored_memory_indices.shape[1:])
        b_memory_mask = stored_memory_masks.reshape(-1, *stored_memory_masks.shape[1:])
        stored_memories = torch.stack(stored_memories, dim=0)

            # Remove unnecessary padding from TrXL memory, if applicable
        # 根据掩码以及记忆窗口的索引，如果存在任意没有超过trxl_memory_length记忆窗口的长度
        # 那就对其进行裁剪
        actual_max_episode_steps = (stored_memory_indices * stored_memory_masks).max().item() + 1
        if actual_max_episode_steps < self.trxl_memory_length:
            b_memory_indices = b_memory_indices[:, :actual_max_episode_steps]
            b_memory_mask = b_memory_mask[:, :actual_max_episode_steps]
            stored_memories = stored_memories[:, :actual_max_episode_steps]


        # Optimizing the policy and value network
        # 开始进行PPO的训练
        clipfracs = []
        for epoch in range(self.update_epochs):
            # 感觉这个应该是为了打乱数据，生成乱序的索引，大小为batch_size
            b_inds = torch.randperm(self.batch_size)
            for start in range(0, self.batch_size, self.minibatch_size):
                end = start + self.minibatch_size
                mb_inds = b_inds[start:end] # 获取对应的索引
                # b_memory_index[mb_inds]：获取对应环境的id索引
                # stored_memories[b_memory_index[mb_inds]]：提取对应环境的记忆
                mb_memories = stored_memories[b_memory_index[mb_inds]]
                # 根据记忆窗口的索引，从对应记忆中提取记忆窗口中的记忆
                mb_memory_windows = common.batched_index_select(mb_memories, 1, b_memory_indices[mb_inds])

                # 传入给网络得到新的动作log概率，熵，以及评价Value
                _, newlogprob, entropy, newvalue, _ = self.model.get_action_and_value(
                    b_obs[mb_inds], mb_memory_windows, b_memory_mask[mb_inds], b_memory_indices[mb_inds], b_actions[mb_inds]
                )

                # Policy loss
                # 得到对应的优势值
                mb_advantages = b_advantages[mb_inds]
                if self.norm_adv:
                    mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
                mb_advantages = mb_advantages.unsqueeze(1).repeat(
                    1, len(self.action_shape)
                )  # Repeat is necessary for multi-discrete action spaces
                # 新旧动作log概率的比率
                # 计算ppo的损失
                logratio = newlogprob - b_logprobs[mb_inds]
                ratio = torch.exp(logratio)
                pgloss1 = -mb_advantages * ratio
                pgloss2 = -mb_advantages * torch.clamp(ratio, 1.0 - self.clip_coef, 1.0 + self.clip_coef)
                pg_loss = torch.max(pgloss1, pgloss2).mean()

                # Value loss
                # 计算未裁剪的 MSE 损失：(newvalue - returns)²
                v_loss_unclipped = (newvalue - b_returns[mb_inds]) ** 2
                if self.clip_vloss:
                    # 限制新旧值函数估计之间的差距
                    # 防止值函数估计发生剧烈变化
                    # 计算裁剪后的值函数估计：old_value ± clip_coef
                    # 基于新旧的差距，计算旧值到新值之间的距离，如果距离过大则裁剪
                    # 裁剪后重新加到旧值上作为裁剪后的新值
                    '''
                    意义
                    稳定性：

                    防止值函数估计发生过大变化
                    减少训练的不稳定性
                    信任区域：

                    为值函数创建一个"信任区域"
                    类似于策略更新中的 PPO 裁剪
                    渐进式更新：

                    鼓励值函数进行渐进式的小步更新
                    避免过度激进的修正
                    这种双重裁剪机制（策略和值函数）是 PPO 算法稳定性的重要保证。- 新旧值函数的差异被限制在 ±clip_coef 范围内
                    '''
                    v_loss_clipped = b_values[mb_inds] + (newvalue - b_values[mb_inds]).clamp(
                        min=-self.clip_coef, max=self.clip_coef
                    )
                    v_loss = torch.max(v_loss_unclipped, (v_loss_clipped - b_returns[mb_inds]) ** 2).mean()
                else:
                    v_loss = v_loss_unclipped.mean()

                # Entropy loss
                entropy_loss = entropy.mean()

                # Combined losses
                # 结合动作损失、熵损失、价值损失
                loss = pg_loss - self.ent_coef_sheduler.get_ent_coef() * entropy_loss + v_loss * self.vf_coef

                # Add reconstruction loss if used
                r_loss = torch.tensor(0.0)
                if self.reconstruction_coef > 0.0:
                    # 将特征还原为观察后，和真实观察差值之间的损失，利于提取关键特征
                    r_loss = self.bce_loss(self.model.reconstruct_observation(), b_obs[mb_inds] / 255.0)
                    loss += self.reconstruction_coef * r_loss

                self.optimizer.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.max_grad_norm)
                self.optimizer.step()

                with torch.no_grad():
                    # calculate approx_kl http://joschu.net/blog/kl-approx.html
                    # 这边是在计算什么？
                    # 看起来时计算新旧动作策略之间的分布差异，作用仅用于记录评估是否差异过大
                    old_approx_kl = (-logratio).mean()
                    approx_kl = ((ratio - 1) - logratio).mean()
                    clipfracs += [((ratio - 1.0).abs() > self.clip_coef).float().mean().item()]
                self.train_count += 1

            if self.target_kl is not None and approx_kl > self.target_kl:
                break
        
        for _ in range(self.num_steps):
            self.sheduler.step()
            self.ent_coef_sheduler.step()
        # 这里又是在计算什么？
        # 计算值函数（Value Function）的解释方差（Explained Variance），它是衡量值函数预测质量的一个重要指标
        # var时方差的意思
        y_pred, y_true = b_values.cpu().numpy(), b_returns.cpu().numpy()
        var_y = np.var(y_true)
        # explained_var = 1 - Var(y_true - y_pred) / Var(y_true)
        '''
        指标含义
        范围：通常在 (-∞, 1] 之间
        解释：
        1.0: 完美预测
        0.0: 预测效果等同于总是预测平均值，也即是真实值和预测之间只是平均值相同，还是存在差异
        < 0: 预测效果差于预测平均值
        '''
        # 如果var_y == 0，表示以下问题
        '''
        表示回报值（returns）的方差为0，这种情况意味着所有的回报值都是相同的。这可能发生在以下几种情况：

        特殊场景：

        所有回合都得到完全相同的回报
        环境总是返回固定的奖励
        所有episode都失败或都成功，且奖励值相同
        潜在问题：

        环境设计问题：奖励设计过于简单
        训练过程出现问题：agent 行为完全一致
        奖励信号异常：所有状态给出相同奖励

        帮助发现训练或环境中的潜在问题
        指示是否需要调整奖励设计
        作为训练调试的重要指标
        '''
        explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y

        # 打印出来
        print(
            "{:9} SPS={:4} pi_loss={:.3f} v_loss={:.3f} entropy={:.3f} r_loss={:.3f} value={:.3f} adv={:.3f}".format(
                self.train_count,
                int(self.frame_idx / (time.time() - self.start_time)),
                pg_loss.item(),
                v_loss.item(),
                entropy_loss.item(),
                r_loss.item(),
                torch.mean(values),
                torch.mean(advantages),
            )
        )

        self.writer.add_scalar("episode/value_mean", torch.mean(values), self.frame_idx)
        self.writer.add_scalar("episode/advantage_mean", torch.mean(advantages), self.frame_idx)
        self.writer.add_scalar("charts/learning_rate", self.sheduler.calculate_lr(), self.frame_idx)
        self.writer.add_scalar("charts/entropy_coefficient", self.ent_coef_sheduler.get_ent_coef(), self.frame_idx)
        self.writer.add_scalar("losses/policy_loss", pg_loss.item(), self.frame_idx)
        self.writer.add_scalar("losses/value_loss", v_loss.item(), self.frame_idx)
        self.writer.add_scalar("losses/loss", loss.item(), self.frame_idx)
        self.writer.add_scalar("losses/entropy", entropy_loss.item(), self.frame_idx)
        self.writer.add_scalar("losses/reconstruction_loss", r_loss.item(), self.frame_idx)
        self.writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), self.frame_idx)
        self.writer.add_scalar("losses/approx_kl", approx_kl.item(), self.frame_idx)
        self.writer.add_scalar("losses/clipfrac", np.mean(clipfracs), self.frame_idx)
        self.writer.add_scalar("losses/explained_variance", explained_var, self.frame_idx)
        self.writer.add_scalar("charts/SPS", int(self.frame_idx / (time.time() - self.start_time)), self.frame_idx)


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                for exp in self.exp_source:
                    self.frame_idx += 1
                    self.tarj.append(exp)
                    rewards_steps = self.exp_source.pop_rewards_steps()
                    if rewards_steps:
                        # 记录当前的训练进度并判断是否达到了奖励目标
                        rewards, steps = zip(*rewards_steps)
                        tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                        tracker.reward(rewards[0], self.frame_idx)

                    if len(self.tarj) < self.num_steps:
                        continue
                    
                    self.train_model(self.tarj)
                    self.tarj = []

                    if self.train_count % self.eval_freq == 0:
                        self.__test_model()

                    if self.train_count % self.save_freq == 0:
                        self.save_model()


    @torch.no_grad()
    @staticmethod
    def test_net(net, env, trxl_memory_length, trxl_num_layers, trxl_dim, max_episode_steps, memory_mask, memory_indices, count, device):
        '''
        count: 执行游戏的次数（每次都是执行到游戏结束）

        return: （平均奖励，平均步数）
        '''
        rewards = 0.0
        steps = 0
        for _ in range(count):
            # 这里存储着每个环境每步的掩码
            next_memory = torch.zeros((1, max_episode_steps, trxl_num_layers, trxl_dim), dtype=torch.float32)
            # next_memory_list = deque(maxlen=trxl_memory_length)
            # next_memory_list.append(torch.zeros((trxl_num_layers, trxl_dim), dtype=torch.float32))

            obs, _ = env.reset()
            step = torch.zeros((1,), dtype=torch.long).to(device=device)
            while True:
                # todo 这个的作用
                # 根据记忆的索引提取出对应的记忆窗口的记忆，因为有掩码，所以不许担心会提取到不相关的记忆
                memory_window = common.batched_index_select(next_memory, 1, memory_indices[step])
                obs = torch.tensor(obs, dtype=torch.float32, device=device).unsqueeze(0)
                action, new_memory = net.get_action(
                    obs, memory_window, memory_mask[torch.clip(step, 0, trxl_memory_length - 1)], memory_indices[step]
                )

                next_memory[0, step] = new_memory
            
                # 然后执行动作得到下一个
                obs, reward, done, truncated, _ = env.step(action.item())
                step += 1
                rewards += reward
                steps += 1
                if done or truncated:
                    break
        return rewards / count, steps / count


    @torch.no_grad()
    def __test_model(self):
         # 测试并保存最好测试结果的庶数据
        ts = time.time()
        self.model.eval()
        rewards, steps = Trainer.test_net(
            net=self.model,
            env=self.test_env,
            trxl_memory_length=self.trxl_memory_length,
            trxl_num_layers=self.trxl_num_layers,
            trxl_dim=self.trxl_dim,
            memory_mask=self.memory_mask,
            memory_indices=self.memory_indices,
            max_episode_steps=self.max_episode_steps,
            count=10, device=device)
        self.model.train()
        print("Train Count %d, Test done in %.2f sec, reward %.3f, steps %d" % (self.train_count,
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
        
        checkpoints = {
            "model": self.model.state_dict(),
        }
        common.save_best_model(rewards, checkpoints, self.save_path, 'ppo_trxl_best')

        


if __name__ == "__main__":
    torch.set_default_dtype(torch.float32)
    np.set_printoptions(precision=8)
    np_float32 = np.float32

    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="mountaincar", help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/mountaincar_configs_ppo_trxl.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)


    trainer = Trainer(params, device)
    trainer.load_model()
    trainer.train()
