#!/usr/bin/env python3
'''
未适配，参考链接：
1. https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo_trxl/ppo_trxl.py
2. https://docs.cleanrl.dev/rl-algorithms/ppo-trxl#ppo_trxlpy
3. https://ale.farama.org/environments/darkchambers/
'''
import os
import math
from typing import Any
import torch.nn as nn
import ptan
import time
import gymnasium as gym
from gymnasium import spaces
import ale_py
import argparse
from tensorboardX import SummaryWriter
from torch.distributions import Categorical

from lib import model, common
from einops import rearrange

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn.utils as nn_utils
from collections import deque

gym.register_envs(ale_py)
GAMMA = 0.9
GAE_LAMBDA = 1.00 # 优势估计器的lambda因子，0.95是一个比较好的值

TRAJECTORY_SIZE = 2049
LEARNING_RATE_ACTOR = 2.75e-4

PPO_EPS = 0.2
PPO_EPOCHES = 10 # todo 执行ppo的迭代次数 作用
PPO_BATCH_SIZE = 64 # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 100000 # 采样迭代多少次，进行一次游戏测试

CLIP_GRAD = 0.5
CLIP_COEF = 0.1
CLIP_VLOSS = True
ENT_COEF = 0.01
VF_COEF = 0.5
TARGET_KL = None

TRXL_MEMORY_LENGTH = 119


class DQNTRXLAgent(ptan.agent.BaseAgent):

    def __init__(self, dqn_model, num_envs, action_selector=ptan.actions.ProbabilityActionSelector(), device="cpu", preprocessor=ptan.agent.default_states_preprocessor):
        '''
        param dqn_model: dqn网络模型，训练的的网络
        param action_selector: 动作选择器
        '''

        self.dqn_model = dqn_model
        self.action_selector = action_selector
        self.preprocessor = preprocessor
        self.device = device
        self.num_envs = num_envs
        self.cur_value = None
        self.cur_logprob = None
        self.cur_new_memory = None


    def initial_state(self):
        """
        Should create initial empty state for the agent. It will be called for the start of the episode
        :return: Anything agent want to remember
        """
        return None
    

    def current_value(self):
        return self.cur_value
    
    
    def current_logprob(self):
        return self.cur_logprob
    
    
    def current_new_memory(self):
        return self.cur_new_memory
    

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        if agent_states is None:
            raise ValueError("agent_states must be provided for this agent")
        
        memroy_window, stored_memory_mask, stored_memory_indix = agent_states
        # 如果定义了预处理器，则将state进行预处理
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)
        # 用传入的模型计算预测q值或者动作概率
        action, self.cur_logprob, _, self.cur_value, self.new_memory = self.dqn_model(states, memroy_window, stored_memory_mask, stored_memory_indix)
        return action, agent_states

    # 增加保存和加载next_lstm_state的代码
    def save_state_dict(self, checkpoints):
        checkpoints['next_lstm_state'] = self.next_lstm_state

    def load_state_dict(self, checkpoints):
        self.next_lstm_state = checkpoints['next_lstm_state']


class PositionalEncoding(nn.Module):
    def __init__(self, dim, min_timescale=2.0, max_timescale=1e4):
        super().__init__()
        freqs = torch.arange(0, dim, min_timescale)
        inv_freqs = max_timescale ** (-freqs / dim)
        self.register_buffer("inv_freqs", inv_freqs)

    def forward(self, seq_len):
        seq = torch.arange(seq_len - 1, -1, -1.0)
        sinusoidal_inp = rearrange(seq,  "n -> n ()" * rearrange(self.inv_freqs, "d -> () d"))
        pos_emb = torch.cat((sinusoidal_inp.sin(), sinusoidal_inp.cos()), dim=-1)
        return pos_emb


class MultiheadAttention(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.head_size = embed_dim // num_heads

        assert self.head_size * num_heads == embed_dim, "Embedding dimension must be divisible by number of heads"

        self.values = nn.Linear(self.head_size, self.head_size, bias=False)
        self.keys = nn.Linear(self.head_size, self.head_size, bias=False)
        self.queries = nn.Linear(self.head_size, self.head_size, bias=False)
        self.fc_out = nn.Linear(self.num_heads * self.head_size, embed_dim)

    def forward(self, query, key, value, mask):
        N = query.shape[0]
        value_len, key_len, query_len = value.shape[1], key.shape[1], query.shape[1]

        values = values.reshape(N, value_len, self.num_heads, self.head_size)
        keys = keys.reshape(N, key_len, self.num_heads, self.head_size)
        queries = queries.reshape(N, query_len, self.num_heads, self.head_size)

        values = self.values(values)
        keys = self.keys(keys)
        queries = self.queries(queries)

        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])

        if mask is not None:
            energy = energy.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0, float("-1e20"))

        attention = torch.softmax(energy / (self.embed_dim ** (1 / 2)), dim=3)

        out = torch.einsum("nhql,nlhd->nqhd", [attention, values]).reshape(N, query_len, self.num_heads * self.head_size)

        return self.fc_out(out), attention



class TransformerLayer(nn.Module):
    def __ini__(self, dim, num_heads):
        super().__init__()
        self.attention = MultiheadAttention(dim, num_heads)
        self.layer_norm_q = nn.LayerNorm(dim)
        self.norm_kv = nn.LayerNorm(dim)
        self.layer_norm_attn = nn.LayerNorm(dim)
        self.fc_projection = nn.Sequential(
            nn.Linear(dim, dim),
            nn.ReLU())

    def forward(self, value, key, query, mask):
        query_ = self.layer_norm_q(query)
        value = self.norm_kv(value)
        key = value
        attention, attention_weights = self.attention(value, key, query_, mask)
        x = attention + query
        x_ = self.layer_norm_attn(x)
        forward = self.fc_projection(x_)
        out = forward + x
        return out, attention_weights



class Transformer(nn.Module):
    def __init__(self, num_layers, dim,num_heads, max_episode_steps, positional_encoding):
        super(Transformer, self).__init__()
        self.max_episode_steps = max_episode_steps
        self.positional_encoding = positional_encoding
        if positional_encoding == 'absolute':
            self.pos_embedding = PositionalEncoding(dim)
        elif positional_encoding == 'learned':
            self.pos_embedding = nn.Parameter(torch.randn(max_episode_steps, dim))
        self.transformer_layers = nn.ModuleList([
            TransformerLayer(dim, num_heads) for _ in range(num_layers)])


class ModelPPO(nn.Module):
    def __init__(self, obs_size, act_size, trxl_dim, max_episode_steps=1024, trxl_num_layers=3, trxl_num_heads=4, trxl_positional_encoding='absolute', reconstruction_coeff=0.1):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(ModelPPO, self).__init__()

        self.max_episode_steps = max_episode_steps
        self.trxl_dim = trxl_dim
        self.obs_size = obs_size
        self.act_size = act_size

        if len(obs_size) > 1:
            self.encoder = nn.Sequential(
                nn.Conv2d(obs_size[0], 32, kernel_size=8, stride=4),
                nn.ReLU(),
                nn.Conv2d(32, 64, kernel_size=4, stride=2),
                nn.ReLU(),
                nn.Conv2d(64, 64, kernel_size=3, stride=1),
                nn.ReLU(),
                nn.Flatten(),
                nn.Linear(64 * 7 * 7, trxl_dim),
                nn.ReLU()
            )

        else:
            self.encoder = nn.Linear(obs_size[0], trxl_dim)

        
        self.transformer = Transformer(
            trxl_num_layers, trxl_dim, trxl_num_heads, max_episode_steps, trxl_positional_encoding
        )

        self.hidden_post_trxl = nn.Sequential(
            nn.Linear(trxl_dim, trxl_dim),
            nn.ReLU()
        )

        self.actor_branches = nn.ModuleList(
            [
                nn.Linear(trxl_dim, num_actions) 
                for num_actions in act_size
            ]
        )

        self.critic_linear = nn.Linear(trxl_dim, 1)

        if reconstruction_coeff > 0.0:
            self.transposed_cnn = nn.Sequential(
                nn.Linear(trxl_dim, 64 * 7 * 7),
                nn.ReLU(),
                nn.Unflatten(1, (64, 7, 7)),
                nn.ConvTranspose2d(64, 64, kernel_size=3, stride=1),
                nn.ReLU(),
                nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2),
                nn.ReLU(),
                nn.ConvTranspose2d(32, obs_size[0], kernel_size=8, stride=4),
                nn.Sigmoid()
            )


    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    
    def get_value(self, x, memory, memory_mask, memory_indices):
        if len(self.obs_size) > 1:
            x = self.encoder(x.permute((0, 3, 1, 2)) / 255.0)
        else:
            x = self.encoder(x)
        x, _ = self.transformer(x, memory, memory_mask, memory_indices)
        x = self.hidden_post_trxl(x)
        return self.critic_linear(x).flatten()

    def forward(self, x, memory, memory_mask, memory_indices, action=None):
        if len(self.obs_size) > 1:
            x = self.encoder(x.permute((0, 3, 1, 2)) / 255.0)
        else:
            x = self.encoder(x)
        x, memory = self.transformer(x, memory, memory_mask, memory_indices)
        x = self.hidden_post_trxl(x)
        self.x = x
        probs = [Categorical(logits=self.actor_branches[i](x)) for i in range(len(self.actor_branches))]
        if action is None:
            action = torch.stack([dist.sample() for dist in probs], dim=1)
        log_probs = []
        for i, dist in enumerate(probs):
            log_probs.append(dist.log_prob(action[:, i]))
        entropies = torch.stack([dist.entropy() for dist in probs], dim=1).sum(dim=1).reshape(-1)
        return action, torch.stack(log_probs, dim=1), entropies, self.critic_linear(x).flatten(), memory

    
    def reconstruct_observation(self):
        x = self.transposed_cnn(self.x)
        return x.permute(0, 2, 3, 1)


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward //= 10
        
        # 处理生命减少时的惩罚
        # current_lives = info.get('lives', self.previous_lives)
        # if current_lives < self.previous_lives:
        #     reward += self.life_loss_penalty
        #     self.previous_lives = current_lives
        # elif current_lives > self.previous_lives:
        #     reward -= self.life_loss_penalty
        #     self.previous_lives = current_lives
        
        
        return obs, reward, done, truncated, info


import cv2

class ProcessFrame84(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None):
        super(ProcessFrame84, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame84.process(obs)

    @staticmethod
    def process(img):
        img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
        resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
        x_t = resized_screen[5:89, :]
        # save_state_as_image(x_t, r'D:\Projects\Python\my_-nqd\state_image.png')
        x_t = np.reshape(x_t, [84, 84, 1])
        return x_t.astype(np.uint8)


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = ptan.common.wrappers.FireResetEnv(env)
    env = ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = RewardPenaltyWrapper(env)
    return env


def test_net(net, env, count=10, device="cpu"):
    rewards = 0.0
    steps = 0
    for _ in range(count):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
            mu_v, _ = net(obs_v)

            # 计算动作概率
            # probs = torch.softmax(mu_v, dim=1)
            # probs = probs.squeeze(dim=0).cpu().detach().numpy()

            # 打印每个动作的概率
            # print("Action probabilities:")
            # for i, prob in enumerate(probs):
                # print(f"Action {i}: {prob:.4f}")

            action = mu_v.squeeze(dim=0).data.cpu().argmax().item()
            # print(f"Selected action: {action}")

            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            # env.render()
            done = done or trunc
            rewards += reward
            steps += 1
            if done:
                break
    return rewards / count, steps / count


def calc_adv_ref(trajectory, values, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = [] # 存储动作的优势值
    result_ref = [] # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
            last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v

def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())

from collections import namedtuple
from ptan.agent import BaseAgent

def _group_list(items, lens):
    """
    Unflat the list of items by lens
    反平铺队列，也就是将原先list 一维的数据，跟进lens进行分割，实现二维的队列
    [...] => [[.], [[.], [.]], .]
    :param items: list of items
    :param lens: list of integers
    :return: list of list of items grouped by lengths
    """
    res = []
    cur_ofs = 0
    for g_len in lens:
        res.append(items[cur_ofs:cur_ofs+g_len])
        cur_ofs += g_len
    return res


def batched_index_select(input, dim, index):
    for ii in range(1, len(input.shape)):
        if ii != dim:
            index = index.unsqueeze(ii)
    
    expanse = list(input.shape)
    expanse[0] = -1
    expanse[dim] = -1
    index = index.expand(expanse)
    return torch.gather(input, dim, index)


Experience = namedtuple('Experience', ['state', 'action', 'reward', 'done', 'env_episode_step'])


class ExperienceSource:
    """
    Simple n-step experience source using single or multiple environments
    简单的存储n步的经验采集样本，用于单个或者多个环境中

    Every experience contains n list of Experience entries
    每个经验样本集都包含n步的经验
    """
    def __init__(self, env, agent, steps_count=2, steps_delta=1, vectorized=False, trxl_memory_length = 119, max_episode_steps = 1024, trxl_dim = 384):
        """
        Create simple experience source
        :param env: environment or list of environments to be used 环境信息或者环境信息列表
        :param agent: callable to convert batch of states into actions to take 代理信息
        :param steps_count: count of steps to track for every experience chain 需要追溯多少步以前的记录
        :param steps_delta: how many steps to do between experience items todo
        :param vectorized: support of vectorized envs from OpenAI universe 
        """
        # 判断经验传入的参数类型是否正确
        # 并存储到成员变量
        assert isinstance(env, (gym.Env, list, tuple))
        assert isinstance(agent, BaseAgent)
        assert isinstance(steps_count, int)
        assert steps_count >= 1
        assert isinstance(vectorized, bool)
        # self.pool: 存储游戏环境
        if isinstance(env, (list, tuple)):
            self.pool = env
        else:
            self.pool = [env]
        self.agent = agent
        self.steps_count = steps_count
        self.steps_delta = steps_delta
        self.total_rewards = []
        self.total_steps = []
        self.vectorized = vectorized
        self.trxl_memory_length = trxl_memory_length 
        self.max_episode_steps = max_episode_steps
        self.trxl_dim = trxl_dim

    def __iter__(self):
        # 这个接口就是运行环境并获取观测值，填充经验缓冲区的地方
        # 调用这个接口后，每次遍历都会使用神经网络预测当前状态下的
        # 执行动作，使用yield实现
        # 如果游戏一旦结束，将会把最后一次的游戏状态存储到total_rewards（包括执行的动作，奖励值，游戏的环境状态）
        # 
        # return: 
        # 
        
        # states: 存储每一次的环境观测值
        # agent_states: 存储游戏网络的代理的初始状态
        # histories: 存储的好像是一个队列，长度为step_ount，存储的应该是历史记录，存储多少步以前的状态等信息 todo
        # cur_rewards: 存储每轮游戏观测的激励，游戏初始化时存储的是0.0
        # cur_steps: 存储当前观测结果的步数（todo），游戏初始化时存储的是0
        # agent_states： 代理状态, 作用1：表示当前游戏主体所处状态的评价值（Q值） todo 作用
        # 看代码后，发现这个agnet states的状态只有在ExperienceSourceRollouts中才有实际的作用

        states, agent_states, histories, cur_rewards, cur_steps = [], [], [], [], []
        env_current_episode_step = torch.zeros((len(self.pool),), dtype=torch.long)
        stored_memory_masks = []
        stored_memory_indices = []
        stored_memory_index = []
        memory_mask = torch.tril(torch.ones(self.trxl_memory_length, self.trxl_memory_length), diagonal=-1)
        """ e.g. memory mask tensor looks like this if memory_length = 6
            0, 0, 0, 0, 0, 0
            1, 0, 0, 0, 0, 0
            1, 1, 0, 0, 0, 0
            1, 1, 1, 0, 0, 0
            1, 1, 1, 1, 0, 0
            1, 1, 1, 1, 1, 0
        """

        repetitions = torch.repeat_interleave(
            torch.arange(0, self.trxl_memory_length).unsqueeze(0), self.trxl_memory_length - 1, dim=0
        ).long()
        memory_indices = torch.stack(
            [torch.arange(i, i + self.trxl_memory_length) for i in range(self.max_episode_steps - self.trxl_memory_length + 1)]
        ).long()
        memory_indices = torch.cat((repetitions, memory_indices))
        """ e.g. the memory window indices tensor looks like this if memory_length = 4 and max_episode_length = 7:
        0, 1, 2, 3
        0, 1, 2, 3
        0, 1, 2, 3
        0, 1, 2, 3
        1, 2, 3, 4
        2, 3, 4, 5
        3, 4, 5, 6
        """
        next_memory = torch.zeros((len(self.pool), self.max_episode_steps, self.trxl_num_layers, self.trxl_dim), dtype=torch.float32)

        # 存储每次环境观测值的长度（因为矢量的环境其返回的结果值是一个不定长度的列表）
        # 每个索引对应着states中对应索引的长度
        env_lens = []
        # 遍历每一个游戏环境
        # 这一大段的循环作用是初始化环境，得到初始化结果
        for env in self.pool:
            # 每一次遍历时，重置游戏环境
            obs, obs_info = env.reset()
            # if the environment is vectorized, all it's output is lists of results.
            # 如果支持矢量计算，那么可以将多个环境的输出结果，拼接到矩阵向量中，直接进行计算，效率比单个计算高
            if self.vectorized:
                # 矢量环境
                # 获取单词观察结果的向量长度
                obs_len = len(obs)
                # 将当前状态结果列表（应该是包含了环境状态，激励，动作，是否结束等信息）存储在states
                states.extend(obs)
            else:
                # 非矢量环境下，其观测的结果就是一个标量，简单说就是一个动作值，所以
                # 长度是1
                obs_len = 1
                # 将结果存储在status中
                states.append(obs)
            env_lens.append(obs_len)
            
            # 遍历本次环境观测的结果
            for _ in range(obs_len):
                histories.append(deque(maxlen=self.steps_count)) # 创建对应环境观测结果历史缓存队列
                cur_rewards.append(0.0)  # 存储最初是状态下的激励，为0
                cur_steps.append(0) # 存储当前行走的部署，为0
                agent_states.append(self.agent.initial_state()) # 存储代理状态，reset环境时，代理状态是初始状态
        
        iter_idx = 0
        while True: 
            actions = [None] * len(states) # todo
            states_input = []
            states_indices = []
            log_probs = []
            values = []
            stored_memories = [self.next_memory[e] for e in range(len(self.pool))]
            memory_window = [None] * len(states)
            for idx, state in enumerate(states):
                if state is None:
                    actions[idx] = self.pool[0].action_space.sample()  # assume that all envs are from the same family
                else:
                    states_input.append(state)
                    states_indices.append(idx)
                stored_memory_masks[idx] = memory_mask[torch.clip(env_current_episode_step, 0, args.trxl_memory_length - 1)]
                stored_memory_indices[idx] = memory_indices[env_current_episode_step]
                memory_window[idx] = batched_index_select(next_memory, 1, stored_memory_indices)

                    
            if states_input:
                # 如果观测的状态列表非空，则将状态输入的神经网络环境代理中，获取将要执行的动作
                # 而agent_staes根据源码，发现并未做处理
                agent_states = memory_window, stored_memory_masks, stored_memory_indices
                states_actions, new_agent_states = self.agent(states_input, agent_states)
                # 遍历每一个状态所要执行的动作
                for idx, action in enumerate(states_actions):
                    # 获取当前动作对应的状态的索引位置，有上面106行的代码可知
                    g_idx = states_indices[idx]
                    # 将执行的动作存储在与状态相对应的索引上
                    actions[g_idx] = action
                    # 代理状态（todo 作用）
                    agent_states[g_idx] = new_agent_states[idx]
                    logprob, _, value, new_memory = agent_states[g_idx]
                    next_memory[g_idx] = new_memory[idx]
                    log_probs[g_idx] = logprob[idx]
                    values[g_idx] = value[idx]


            # 将动作按照原先存储的每个环境得到的观测结果长度，按照环境数组进行重新分割分组
            grouped_actions = _group_list(actions, env_lens)
            grouped_next_memory = _group_list(next_memory, env_lens)
            grouped_log_probs = _group_list(log_probs, env_lens)
            grouped_values = _group_list(values, env_lens)
            
            # 因为存在一个大循环，存储每个环境的起始索引位置
            global_ofs = 0
            # 遍历每一个环境
            # 这一个大循环是将上一个循环中得到的执行动作应用到实际的环境中
            for env_idx, (env, action_n) in enumerate(zip(self.pool, grouped_actions)):
                if self.vectorized:
                    # 这里action_n是一个list，也就是说矢量环境的输入的一个多维
                    # 如果是矢量的环境，则直接执行动作获取下一个状态，激励，是否结束等观测值
                    next_state_n, r_n, is_done_n, truncated, _ = env.step(action_n)
                    is_done_n = is_done_n or truncated
                else:
                    # 如果不是矢量环境，则需要将动作的第一个动作发送到env中获取相应的观测值（这里之所以是[0]，因为为了和矢量环境统一，即时是一个动作也会以列表的方式存储）
                    next_state, r, is_done, truncated, _ = env.step(action_n[0])
                    is_done = is_done or truncated
                    # 这个操作是为了和矢量环境统一
                    next_state_n, r_n, is_done_n = [next_state], [r], [is_done]
                
                # 遍历每一次的动作所得到的下一个状态、激励、是否结束
                for ofs, (action, next_state, r, is_done) in enumerate(zip(action_n, next_state_n, r_n, is_done_n)):
                    # 获取当前缓存的索引位置
                    idx = global_ofs + ofs
                    # 获取初始环境的状态
                    # 因为action_n存储的就是每一个状态下所执行的动作，所以这里直接使用idx提取对应的状态
                    state = states[idx]
                    # 获取一个历史队列，此时队列为空
                    history = histories[idx]
                    
                    # 这里利用的idx来区分每一个状态执行的动作所对应的激励值
                    # 将获取的激励值存储在缓存中
                    cur_rewards[idx] += r
                    # 将当前状态以及执行的动作，执行的步骤次数存在起来
                    cur_steps[idx] += 1
                    # 如果状态非空，则（当前状态，所执行的动作对应的历史缓存队列）将当前状态存储在history中
                    # 所以一个样本可能就是这样对应一个队列数据
                    if state is not None:
                        history.append(Experience(state=state, action=action, reward=r, done=is_done, env_episode_step=env_current_episode_step[idx]))
                    # 如果达到了采集的步数并且遍历索引达到了两个经验样本的指定差值，则将样本返回，待外界下一次继续获取时，从这里继续执行
                    if len(history) == self.steps_count and iter_idx % self.steps_delta == 0:
                        yield tuple(history)
                    # 更新states，表示当前动作执行后状态的改变
                    # 将动作设置为动作执行后的下一个状态，因为idx表示当前运行的环境状态的变更
                    states[idx] = next_state
                    if is_done:
                        env_current_episode_step[idx] = 0
                        mem_index = stored_memory_index[idx]
                        stored_memories[mem_index] = stored_memories[mem_index].copy()
                        next_memory[idx] = torch.zeros(
                            (max_episode_steps, self.trxl_num_layers, self.trxl_dim), dtype=torch.float32
                        )

                        stored_memories.append(next_memory[idx])
                        stored_memory_index[idx] = len(stored_memories) - 1
                        # 如果游戏结束，如果存储的历史数据小于指定的长度，则直接返回
                        # in case of very short episode (shorter than our steps count), send gathered history
                        if 0 < len(history) < self.steps_count:
                            yield tuple(history)
                        # generate tail of history
                        # 弹出最左侧的历史数据，返回给外部获取数据
                        while len(history) > 1:
                            history.popleft()
                            yield tuple(history)
                        # 将当前状态+动作执行后得到的激励存储在total_rewards队列中
                        self.total_rewards.append(cur_rewards[idx])
                        # 这个当前状态+动作执行的次数也存储起来
                        self.total_steps.append(cur_steps[idx])
                        # 重置状态
                        cur_rewards[idx] = 0.0
                        cur_steps[idx] = 0
                        # vectorized envs are reset automatically
                        states[idx] = env.reset()[0] if not self.vectorized else None
                        agent_states[idx] = self.agent.initial_state()
                        history.clear()
                    else:
                        env_current_episode_step[idx] += 1
                        
                # 将起始索引设置为下一个环境的起始位置
                global_ofs += len(action_n)
            # 遍历索引+1
            iter_idx += 1

    def pop_total_rewards(self):
        """
        返回所有采集的样本，并清空缓存
        """
        r = self.total_rewards
        if r:
            self.total_rewards = []
            self.total_steps = []
        return r

    def pop_rewards_steps(self):
        res = list(zip(self.total_rewards, self.total_steps))
        if res:
            self.total_rewards, self.total_steps = [], []
        return res


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = select_device(args)

    save_path = os.path.join("saves", "ppo-trxl-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = wrap_dqn(gym.make("ALE/Darkchambers-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    test_env = wrap_dqn(gym.make("ALE/Darkchambers-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)

    max_episode_steps = env.spec.max_episode_steps
    if max_episode_steps is not None:
        env.reset()
        max_episode_steps = env.spec.max_episode_steps
    else:
        max_episode_steps = 1024
    
    trxl_memory_length = min(TRXL_MEMORY_LENGTH, max_episode_steps)
    # 创建动作预测网络
    net_ppo = ModelPPO(env.observation_space.shape, env.action_space.n).to(device)
    print(net_ppo)

    writer = SummaryWriter(comment="-ppo-trxl-" + args.name)
    agent = DQNTRXLAgent(net_ppo, num_envs=1, device=device, preprocessor=ppo_states_preprocessor)
    exp_source = ptan.experience.ExperienceSourceNextStates(env, agent, steps_count=1)

    opt_ppo = optim.AdamW(net_ppo.parameters(), lr=LEARNING_RATE_ACTOR)
    bce_loss = nn.BCELoss()
    scheduler = optim.lr_scheduler.StepLR(opt_ppo, step_size=2000, gamma=0.9)


    start_idx = 0
    old_ratio_v_mean = 0
    grad_index = 0
    train_frame_idx = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        opt_ppo.load_state_dict(checkpoint['opt_ppo'])
        net_ppo.load_state_dict(checkpoint['net_ppo'])
        start_idx = checkpoint['start_idx']
        old_ratio_v_mean = checkpoint['old_ratio_v_mean']
        grad_index = checkpoint['grad_index']
        train_frame_idx = checkpoint['train_frame_idx']
        scheduler.load_state_dict(checkpoint['scheduler'])
        print("加载模型成功")
                # 打印学习率
        print("Learning Rate:", opt_ppo.param_groups[0]['lr'])
        print("train_frame_idx:", train_frame_idx)
        print("scheduler epoch:", scheduler.last_epoch)
        # 修改学习率调度器step_size
        scheduler.step_size = 10000


    trajectory = [] # 注意，缓冲区更名为轨迹
    trajectory_values = []
    trajectory_logprobs = []
    trajectory_new_memory = []
    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + start_idx)
                tracker.reward(np.mean(rewards), step_idx + start_idx)

            if step_idx > 0 and step_idx % TEST_ITERS == 0:
                ts = time.time()
                rewards, steps = test_net(net_ppo, test_env, count=10, device=device)
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + start_idx)
                writer.add_scalar("test_steps", steps, step_idx + start_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                    best_reward = rewards
                common.save_best_model(rewards, net_ppo.state_dict(), save_path, f"ppo-best-{train_frame_idx}", keep_best=10)


            trajectory.append(exp)
            trajectory_values.append(agent.current_value().cpu())
            trajectory_logprobs.append(agent.current_logprob().cpu())
            trajectory_new_memory.append(agent.current_new_memory().cpu())
            if len(trajectory) < TRAJECTORY_SIZE:
                continue
            
            # 这里之所以会需要使用
            traj_states = [t[0].state for t in trajectory]
            traj_actions = [t[0].action.cpu() for t in trajectory]
            traj_env_current_episode_step = [t[0].current_episode_step for t in trajectory]
            traj_memory_index = [t[0].memory_index for t in trajectory]
            traj_memory_indices = [t[0].memory_indices for t in trajectory]
            traj_memory_masks = [t[0].memory_masks for t in trajectory]
            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            traj_values_v = torch.FloatTensor(np.array(trajectory_values)).to(device)
            traj_logprobs_v = torch.FloatTensor(np.array(trajectory_logprobs)).to(device)
            traj_new_memory_v = torch.FloatTensor(np.array(trajectory_new_memory)).to(device)
            traj_env_current_episode_step_v = torch.FloatTensor(np.array(traj_env_current_episode_step)).to(device)
            traj_memory_index_v = torch.LongTensor(np.array(traj_memory_index)).to(device)
            traj_memory_indices_v = torch.LongTensor(np.array(traj_memory_indices)).to(device)
            traj_memory_masks_v = torch.FloatTensor(np.array(traj_memory_masks)).to(device)
            # 计算优势值和实际Q值
            traj_adv_v, traj_ref_v = calc_adv_ref(trajectory, traj_values_v, device=device)
            # 根据状态预测动作
            # 计算上一轮训练的评价网络、动作网络动作的概率
            old_logprob_v = traj_logprobs_v

            # normalize advantages 归一化计算得到的Q值 作用是提高训练的稳定性
            traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / torch.std(traj_adv_v)

            # drop last entry from the trajectory, an our adv and ref value calculated without it
            # 这里的作用，为什么截取去除最后一个 P316
            # 可能是因为要和traj_adv_v和traj_ref_v一一对应，因为里面在计算时，是
            # 去掉最后一个的
            trajectory = trajectory[:-1]
            # 这里获取的是从[0到-1)范围的数据
            old_logprob_v = old_logprob_v[:-1].detach()

            sum_loss = 0.0
            count_steps = 1
            is_interrupt = False

            clipfracs = []
            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE].unsqueeze(-1)
                    batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_old_logprob_v = old_logprob_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_values_v = traj_values_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]

                    opt_ppo.zero_grad()
                    # 这边就是在计算预测Q值和实际Q值之间的差异损失
                    _, newlogprob, entropy, newvalue, _ = net_ppo(states_v, initial_lstm_state, actions_v)
                    logratio = newlogprob - batch_old_logprob_v
                    ratio = logratio.exp()

                    with torch.no_grad():
                        old_approx_kl = (-logratio).mean()
                        approx_kl = ((ratio - 1) - logratio).mean()
                        clipfracs += [((ratio - 1.0).abs() > CLIP_COEF).float().mean().item()]

                    pg_loss1 = -batch_adv_v * ratio
                    pg_loss2 = -batch_adv_v * torch.clamp(ratio, 1 - CLIP_COEF, 1 + CLIP_COEF)
                    pg_loss = torch.max(pg_loss1, pg_loss2).mean()

                    newvalue = newvalue.view(-1)
                    if CLIP_VLOSS:
                        v_loss_unclipped = (newvalue - batch_ref_v) ** 2
                        v_clipped = batch_values_v + torch.clamp(
                            newvalue - batch_values_v,
                            -CLIP_COEF,
                            CLIP_COEF
                        )

                        v_loss_clipped = (v_clipped - batch_ref_v) ** 2
                        v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
                        v_loss = 0.5 * torch.mean(v_loss_max)
                    else:
                        v_loss = 0.5 * torch.mean((newvalue - batch_ref_v) ** 2)

                    entropy_loss = entropy.mean()
                    loss = pg_loss - ENT_COEF * entropy_loss + v_loss + VF_COEF

                    opt_ppo.zero_grad()
                    loss.backward()
                    nn.utils.clip_grad_norm_(net_ppo.parameters(), CLIP_GRAD)
                    opt_ppo.step()
                    # 记录总损失，用于计算平均损失变化
                    sum_loss += loss.item()
                    count_steps += 1
                    grad_index += 1
                if TARGET_KL is not None and approx_kl > TARGET_KL:
                    break


            trajectory.clear()
            trajectory_logprobs.clear()
            trajectory_values.clear()
            train_frame_idx += 1
            scheduler.step()
            writer.add_scalar("advantage", traj_adv_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("values", traj_ref_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("sum_loss", sum_loss / count_steps, step_idx + start_idx)

            checkpoints = {
                'net_ppo': net_ppo.state_dict(),
                'opt_ppo': opt_ppo.state_dict(),
                'start_idx': start_idx + step_idx,
                'old_ratio_v_mean': old_ratio_v_mean,
                'grad_index':grad_index,
                'train_frame_idx': train_frame_idx,
                'scheduler': scheduler.state_dict()
            }
            common.save_checkpoints(train_frame_idx, checkpoints, save_path, "ppo", keep_last=3)



