#!/usr/bin/env python3
'''
待验证

训练记录：
在2号机上训练
1127：训练分数-3分，测试分数14分，继续训练
1128：训练分数2分，测试分数14分，继续训练，提升不大，是否需要调整超参数？比如学习率
1202：调整学习率，重新训练，训练分数10分，测试分数39分，观测学习率，继续训练
1204：训练分数7分，测试分数38分，发现训练分数在-4~7分之间震荡，是否需要调整学习率继续训练
20241228:学习率0.001，训练分数12，测试分数40，继续训练，观测学习率
20241229: 学习率opt_act lr:  0.001
scheduler last epoch:  774
train count:  774，训练轮次也和昨天一样，是否模型没有保存，需要修改代码
20241230：已修复保存的问题，继续训练，学习率opt_act lr:  0.001，scheduler last epoch:  2243
训练分数8.9分，测试分数42.8分，继续训练，看学习率
20241231：opt_act lr:  0.001
scheduler last epoch:  3581
train count:  3581 
修复学习率无法下降的问题
训练分数：8.6分。测试分数40分
20250102：学习率opt_act lr:  0.000900000000000000，训练分数10.7，测试分数42.8分，继续训练
20250103: 学习率opt_act lr:  0.00081 训练分数7分，测试分数42.8分，无新分生成，停止训练，调整代码和超参数

重新调整代码，并且建议如下：
监控关键指标: 添加KL散度监控，如果KL散度过大，提前终止该轮训练
值函数预热: 考虑先训练几轮只更新critic网络
使用更简单的环境验证: 先在CartPole等简单环境上验证算法正确性

在2号记上训练
20250823: 训练分数228，测试分数375
20250824: 训练分数242.2，测试分数625
20250913: 训练分数244.700，测试分数638
20250914: 训练分数250.6，测试分数653
20251123: 测试分数673，训练通过，待验证
'''
import os
import math
from typing import Any
import torch.nn as nn
import ptan
import time
import gymnasium as gym
import ale_py
import argparse
from tensorboardX import SummaryWriter
from torch.distributions import Categorical

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn.utils as nn_utils
from collections import deque

gym.register_envs(ale_py)
GAMMA = 0.9
GAE_LAMBDA = 0.95 # 优势估计器的lambda因子，0.95是一个比较好的值
entropy_coef = 0.1  # 或从0.1开始逐渐衰减到0.01

TRAJECTORY_SIZE = 2049
LEARNING_RATE_ACTOR = 1e-3

PPO_EPS = 0.1
PPO_EPOCHES = 6 # todo 执行ppo的迭代次数 作用
PPO_BATCH_SIZE = 128 # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 100000 # 采样迭代多少次，进行一次游戏测试

CLIP_GRAD = 1.0

class PPOActor(nn.Module):
    def __init__(self, obs_size, act_size):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(PPOActor, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(obs_size[0], 64, kernel_size=8, stride=4),
            # nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            # nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            # nn.BatchNorm2d(256),
            nn.ReLU()
        )

        conv_out_size = self._get_conv_out(obs_size)
        self.linear = nn.Linear(conv_out_size, 512)
        self.action_linear = nn.Linear(512, act_size)

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        x = x.float() / 255.0
        conv_out = self.conv(x).view(x.size(0), -1)
        x = self.linear(conv_out)
        return self.action_linear(x)


class PPOCrt(nn.Module):
    def __init__(self, obs_size):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(PPOCrt, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(obs_size[0], 64, kernel_size=8, stride=4),
            # nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            # nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            # nn.BatchNorm2d(256),
            nn.ReLU()
        )

        conv_out_size = self._get_conv_out(obs_size)
        self.linear = nn.Linear(conv_out_size, 512)
        self.critic_linear = nn.Linear(512, 1)

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        x = x.float() / 255.0
        conv_out = self.conv(x).view(x.size(0), -1)
        x = self.linear(conv_out)
        return self.critic_linear(x)


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0
        self.stay_long_frame = 10000 / 4
        self.non_action_frame = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            # reward /= 10.0  # 缩放奖励
            self.non_action_frame = 0
        else:
            self.non_action_frame += 1

        if self.non_action_frame // self.stay_long_frame > 0:
            reward += -0.1 * self.non_action_frame // self.stay_long_frame
            self.non_action_frame = 0

        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        # elif current_lives > self.previous_lives:
        #     reward += self.life_loss_penalty
        #     self.previous_lives = current_lives

        return obs, reward, done, truncated, info


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    env = ptan.common.wrappers.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = ptan.common.wrappers.FrameStack(env, stack_frames)
    env = RewardPenaltyWrapper(env)
    return env


def test_net(net, env, count=10, device="cpu"):
    with torch.no_grad():
        rewards = 0.0
        steps = 0
        for _ in range(count):
            noop_action_count = 0
            pre_action = -1
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
                mu_v = net(obs_v)

                # 计算动作概率
                # probs = torch.softmax(mu_v, dim=1)
                # probs = probs.squeeze(dim=0).cpu().detach().numpy()

                # 打印每个动作的概率
                # print("Action probabilities:")
                # for i, prob in enumerate(probs):
                    # print(f"Action {i}: {prob:.4f}")

                action = mu_v.squeeze(dim=0).data.cpu().argmax().item()
                # print(f"Selected action: {action}")

                if action == 0 and pre_action == action:  # Noop
                    noop_action_count += 1
                    if noop_action_count > 30:
                        break
                else:
                    noop_action_count = 0
                pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
    return rewards / count, steps / count


def calc_adv_ref(trajectory, ppo_crt, states_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        values_v = ppo_crt(states_v) # 得到预测的Q值
    values = values_v.squeeze().data.cpu().numpy()
    # generalized advantage estimator: smoothed version of the advantage
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = [] # 存储动作的优势值
    result_ref = [] # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
            last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v

def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = common.select_device(args=args)

    save_path = os.path.join("saves", "ppo-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = wrap_dqn(gym.make("ALE/Berzerk-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    test_env = wrap_dqn(gym.make("ALE/Berzerk-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)

    # 创建动作预测网络
    ppo_act = PPOActor(env.observation_space.shape, env.action_space.n).to(device)
    ppo_crt = PPOCrt(env.observation_space.shape).to(device)
    print(ppo_act)
    print(ppo_crt)

    writer = SummaryWriter(comment="-ppo-" + args.name)
    agent = ptan.agent.PolicyAgent(ppo_act, device=device, preprocessor=ppo_states_preprocessor, apply_softmax=True)
    exp_source = ptan.experience.ExperienceSource(env, agent, steps_count=1)

    opt = optim.Adam(list(ppo_act.parameters()) + list(ppo_crt.parameters()), lr=LEARNING_RATE_ACTOR)
    # 如果是中途加入的scheduler，那么就需要在加载模型的时候创建scheduler，这样
    # 这边调整学习率的对象才是正确的optim的对象
    scheduler = optim.lr_scheduler.StepLR(opt, step_size=1000, gamma=0.9)


    start_idx = 0
    old_ratio_v_mean = 0
    grad_index = 0
    train_frame_idx = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        opt.load_state_dict(checkpoint['opt'])
        ppo_act.load_state_dict(checkpoint['ppo_act'])
        ppo_crt.load_state_dict(checkpoint['ppo_crt'])
        start_idx = checkpoint['start_idx']
        old_ratio_v_mean = checkpoint['old_ratio_v_mean']
        grad_index = checkpoint['grad_index']
        train_frame_idx = checkpoint['train_frame_idx']
        # scheduler.load_state_dict(checkpoint['scheduler'])
        scheduler = optim.lr_scheduler.StepLR(opt, step_size=1000, gamma=0.9)
        print("加载模型成功")
        # 打印学习率
        print("opt_act lr: ", opt.param_groups[0]['lr'])
        print("scheduler last epoch: ", scheduler.last_epoch)
        print("train count: ", train_frame_idx)

    trajectory = [] # 注意，缓冲区更名为轨迹
    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + start_idx)
                tracker.reward(np.mean(rewards), step_idx + start_idx)

            if step_idx > 0 and step_idx % TEST_ITERS == 0:
                ts = time.time()
                ppo_act.eval()
                rewards, steps = test_net(ppo_act, test_env, count=10, device=device)
                ppo_act.train()
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + start_idx)
                writer.add_scalar("test_steps", steps, step_idx + start_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                    best_reward = rewards
                common.save_best_model(rewards, ppo_act.state_dict(), save_path, f"ppo-best-{train_frame_idx}", keep_best=10)


            trajectory.append(exp)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue

            # 这里之所以会需要使用
            traj_states = [t[0].state for t in trajectory]
            traj_actions = [t[0].action for t in trajectory]
            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            # 计算优势值和实际Q值
            ppo_crt.eval()
            traj_adv_v, traj_ref_v = calc_adv_ref(trajectory, ppo_crt, traj_states_v, device=device)
            ppo_crt.train()
            # 根据状态预测动作
            with torch.no_grad():
                ppo_act.eval()
                old_mu_v = F.softmax(ppo_act(traj_states_v), dim=1)
                ppo_act.train()
            # 计算上一轮训练的评价网络、动作网络动作的概率
            old_logprob_v = torch.log(old_mu_v.gather(1, torch.tensor(traj_actions, dtype=torch.int64).to(device).unsqueeze(-1))).detach()

            # normalize advantages 归一化计算得到的Q值 作用是提高训练的稳定性
            traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / torch.std(traj_adv_v)

            # drop last entry from the trajectory, an our adv and ref value calculated without it
            # 这里的作用，为什么截取去除最后一个 P316
            # 可能是因为要和traj_adv_v和traj_ref_v一一对应，因为里面在计算时，是
            # 去掉最后一个的
            trajectory = trajectory[:-1]
            # 这里获取的是从[0到-1)范围的数据
            old_logprob_v = old_logprob_v[:-1].detach()

            sum_loss_value = 0.0
            sum_loss_policy = 0.0
            count_steps = 1
            is_interrupt = False

            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE].unsqueeze(-1)
                    batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_old_logprob_v = old_logprob_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]

                    opt.zero_grad()
                    # 这边就是在计算预测Q值和实际Q值之间的差异损失
                    logits = ppo_act(states_v)
                    value = ppo_crt(states_v)
                    logits = F.softmax(logits, dim=1)
                    indices = actions_v.long().to(device).unsqueeze(-1)
                    
                    # 计算预测执行动作的高斯概率
                    # ---------------- 日志记录---------------- #
                    
                    # gathered_values = logits.gather(1, indices)
                    # min_value = gathered_values.min().item()
                    # max_value = gathered_values.max().item()
                    # zero_count = (gathered_values == 0).sum().item()
                    # near_zero_count = (gathered_values.abs() < 1e-7).sum().item()

                    # writer.add_scalar("gathered_min", min_value, grad_index)
                    # writer.add_scalar("gathered_max", max_value, grad_index)
                    # writer.add_scalar("gathered_zero_count", zero_count, grad_index)
                    # writer.add_scalar("gathered_near_zero_count", near_zero_count, grad_index)
                    # ---------------- -------- ----------------#

                    logprob_pi_v = torch.log(logits.gather(1, indices) + 1e-7)
                    writer.add_scalar("logprob_pi_v mean", logprob_pi_v.mean().item(), grad_index)
                    writer.add_scalar("logprob_pi_v max", logprob_pi_v.max().item(), grad_index)
                    writer.add_scalar("logprob_pi_v min", logprob_pi_v.min().item(), grad_index)
                    writer.add_scalar("batch_old_logprob_v mean", batch_old_logprob_v.mean().item(), grad_index)
                    writer.add_scalar("batch_old_logprob_v max", batch_old_logprob_v.max().item(), grad_index)
                    writer.add_scalar("batch_old_logprob_v min", batch_old_logprob_v.min().item(), grad_index)
                    writer.add_scalar("ratio_v_pre mean", (logprob_pi_v - batch_old_logprob_v).mean().item(), grad_index)
                    writer.add_scalar("ratio_v_pre max", (logprob_pi_v - batch_old_logprob_v).max().item(), grad_index)
                    writer.add_scalar("ratio_v_pre min", (logprob_pi_v - batch_old_logprob_v).min().item(), grad_index)

                    ratio_v = torch.exp(logprob_pi_v - batch_old_logprob_v)
                    # if old_ratio_v_mean != 0 and abs(ratio_v.mean().item() - old_ratio_v_mean) > 100:
                    #     opt_ppo.zero_grad()
                    #     is_interrupt = True
                    #     break
                    old_ratio_v_mean = ratio_v.mean().item()
                    writer.add_scalar("ratio_v mean", ratio_v.mean().item(), grad_index)
                    writer.add_scalar("ratio_v max", ratio_v.max().item(), grad_index)
                    writer.add_scalar("ratio_v min", ratio_v.min().item(), grad_index)
                    writer.add_scalar("batch_adv_v mean", batch_adv_v.mean().item(), grad_index)
                    writer.add_scalar("batch_adv_v min", batch_adv_v.min().item(), grad_index)
                    writer.add_scalar("batch_adv_v max", batch_adv_v.max().item(), grad_index)
                    surr_obj_v = batch_adv_v * ratio_v
                    writer.add_scalar("surr_obj_v mean", surr_obj_v.mean().item(), grad_index)
                    writer.add_scalar("surr_obj_v min", surr_obj_v.min().item(), grad_index)
                    writer.add_scalar("surr_obj_v max", surr_obj_v.max().item(), grad_index)

                    clipped_surr_v = batch_adv_v * torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS)
                    writer.add_scalar("clipped_surr_v mean", clipped_surr_v.mean().item(), grad_index)
                    writer.add_scalar("clipped_surr_v min", clipped_surr_v.min().item(), grad_index)
                    writer.add_scalar("clipped_surr_v max", clipped_surr_v.max().item(), grad_index)

                    writer.add_scalar("torch.clamp mean", torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS).mean().item(), grad_index)
                    writer.add_scalar("torch.clamp min", torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS).min().item(), grad_index)
                    writer.add_scalar("torch.clamp max", torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS).max().item(), grad_index)

                    loss_policy_v = -(torch.min(surr_obj_v, clipped_surr_v).mean())
                    loss_critic_v = F.smooth_l1_loss(batch_ref_v, value.squeeze())
                    entropy_loss = torch.mean(Categorical(logits).entropy())
                    total_loss = loss_policy_v + loss_critic_v - entropy_coef * entropy_loss
                    total_loss.backward()
                    torch.nn.utils.clip_grad_norm_(list(ppo_act.parameters()) + list(ppo_crt.parameters()), CLIP_GRAD)
                    opt.step()

                    # --------------- 记录日志 -----------------#
                    grad_max = 0.0
                    grad_means = 0.0
                    grad_count = 0
                    for p in list(ppo_act.parameters()) + list(ppo_crt.parameters()):
                        grad_max = max(grad_max, p.grad.abs().max().item())
                        grad_means += (p.grad ** 2).mean().sqrt().item()
                        grad_count += 1
                    writer.add_scalar("grad_l2", grad_means / grad_count, grad_index)
                    writer.add_scalar("grad_max", grad_max, grad_index)

                    weights_max = 0.0
                    weights_means = 0.0
                    weights_count = 0
                    for p in list(ppo_act.parameters()) + list(ppo_crt.parameters()):
                        weights_max = max(weights_max, p.data.abs().max().item())
                        weights_means += (p.data ** 2).mean().sqrt().item()
                        weights_count += 1
                    writer.add_scalar("weights_l2", weights_means / weights_count, grad_index)
                    writer.add_scalar("weights_max", weights_max, grad_index)
                    # ------------------------------------------ # 

                    # 记录总损失，用于计算平均损失变化
                    sum_loss_value += loss_critic_v.item()
                    sum_loss_policy += loss_policy_v.item()
                    count_steps += 1
                    grad_index += 1
                if is_interrupt:
                    is_interrupt = False
                    break

            trajectory.clear()
            train_frame_idx += 1
            # scheduler.step()
            writer.add_scalar("advantage", traj_adv_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("values", traj_ref_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("loss_policy", sum_loss_policy / count_steps, step_idx + start_idx)
            writer.add_scalar("loss_value", sum_loss_value / count_steps, step_idx + start_idx)

            checkpoints = {
                'start_idx': start_idx + step_idx,
                'old_ratio_v_mean': old_ratio_v_mean,
                'grad_index':grad_index,
                'train_frame_idx': train_frame_idx,
                'scheduler': scheduler.state_dict(),
                'ppo_act': ppo_act.state_dict(),
                'ppo_crt': ppo_crt.state_dict(),
                'opt': opt.state_dict()
            }
            common.save_checkpoints(train_frame_idx, checkpoints, save_path, "ppo", keep_last=5)



