#!/usr/bin/env python3
'''
参考链接：https://github.com/wisnunugroho21/reinforcement_learning_phasic_policy_gradient/blob/master/discrete/pytorch/ppg_dis_pong.py#L294
已适配，未验证

训练记录：
在笔记本上训练
20250106：训练分数-11， 测试分数1.8分，继续训练
20250107: 学习率： 0.00025，训练分数-12分，测试分数1.8分（产生一堆新分），继续训练，观察学习率
20250108: 学习率： 0.00022500000000000002，训练分数-10.39分，测试分数7.6分，继续训练，观察学习率
20250109: 学习率： 0.00018225，训练分数-11.43分，测试分数7.6（新分3.4），继续训练，观察学习率
20250110: 学习率： 0.00018225，训练分数-8.0分，测试分数7.6分，继续训练，观察学习率
20250111: 学习率： 0.00016402500000000002，训练分数-9.6分，测试分数5.3分，7.6分，暂停训练，模型应该存在问题
'''
import os
from typing import Any
import torch.nn as nn
import ptan
import time
import gymnasium as gym
import ale_py
import argparse
from tensorboardX import SummaryWriter
from torch.distributions import Categorical
from torch.distributions.kl import kl_divergence

from lib import common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset

gym.register_envs(ale_py)
GAMMA = 0.9
GAE_LAMBDA = 1.00  # 优势估计器的lambda因子，0.95是一个比较好的值

# 这里调整为1024，因为如果太长会导致旧策略和新策略的差异过大导致训练出现nan
# 参考代码这边还是只有128，更短
TRAJECTORY_SIZE = 1024
LEARNING_RATE = 2.5e-4

PPO_EPS = 0.2
PPO_EPOCHES = 10
PPO_BATCH_SIZE = 64  # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 100000  # 采样迭代多少次，进行一次游戏测试

CLIP_GRAD = 0.5


class ModelPPO(nn.Module):
    def __init__(self, obs_size, act_size):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(ModelPPO, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(obs_size[0], 64, kernel_size=8, stride=4),
            # nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            # nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            # nn.BatchNorm2d(256),
            nn.ReLU()
        )

        conv_out_size = self._get_conv_out(obs_size)
        self.linear = nn.Linear(conv_out_size, 512)
        self.action_linear = nn.Linear(512, act_size)
        self.critic_linear = nn.Linear(512, 1)

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        x = x.float() / 255.0
        conv_out = self.conv(x).view(x.size(0), -1)
        x = F.relu(self.linear(conv_out), inplace=True)
        return self.action_linear(x), self.critic_linear(x)


class PPOValue(nn.Module):
    def __init__(self, obs_size):
        '''
        :param obs_size: 观测的环境维度
        '''
        super(PPOValue, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(obs_size[0], 64, kernel_size=8, stride=4),
            # nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            # nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            # nn.BatchNorm2d(256),
            nn.ReLU()
        )

        conv_out_size = self._get_conv_out(obs_size)
        self.critic_linear = nn.Sequential(
            nn.Linear(conv_out_size, 512),
            nn.ReLU(inplace=True),
            nn.Linear(512, 1))

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        x = x.float() / 255.0
        conv_out = self.conv(x).view(x.size(0), -1)
        return self.critic_linear(conv_out)


class AuxMemory(Dataset):
    def __init__(self):
        self.states = []

    def __len__(self):
        return len(self.states)

    def __getitem__(self, idx):
        return self.states[idx]

    def save_all(self, states):
        self.states.extend(states)

    def clear(self):
        self.states.clear()


class Discrete():
    def sample(self, probs, device):
        distribution = Categorical(probs)
        return distribution.sample().float().to(device=device)

    def entropy(self, probs, device):
        distribution = Categorical(probs)
        return distribution.entropy().float().to(device=device)

    def logprob(self, probs, probs_value, device):
        distribution = Categorical(probs)
        return distribution.log_prob(probs_value).unsqueeze(1).float().to(device=device)

    def kl_divergence(self, probs_p, probs_q, device):
        distribution1 = Categorical(probs_p)
        distribution2 = Categorical(probs_q)

        return kl_divergence(distribution1, distribution2).unsqueeze(1).float().to(device=device)


class PolicyFunction():
    def __init__(self, gamma=0.99, lam=0.95):
        self.gamma = gamma
        self.lam = lam

    def monte_carlo_discounted(self, rewards, dones):
        running_add = 0
        returns = []

        for step in reversed(range(len(rewards))):
            running_add = rewards[step] + (1.0 - dones[step]) * self.gamma * running_add
            returns.insert(0, running_add)

        return torch.stack(returns)

    def temporal_difference(self, reward, next_value, done):
        q_values = reward + (1 - done) * self.gamma * next_value
        return q_values

    def generalized_advantage_estimation(self, values, rewards, next_values, dones):
        gae = 0
        adv = []

        delta = rewards + (1.0 - dones) * self.gamma * next_values - values
        for step in reversed(range(len(rewards))):
            gae = delta[step] + (1.0 - dones[step]) * self.gamma * self.lam * gae
            adv.insert(0, gae)

        return torch.stack(adv)


class TrulyPPO():
    def __init__(self, policy_kl_range, policy_params, value_clip, vf_loss_coef, entropy_coef, gamma, lam, device):
        self.policy_kl_range = policy_kl_range
        self.policy_params = policy_params
        self.value_clip = value_clip
        self.vf_loss_coef = vf_loss_coef
        self.entropy_coef = entropy_coef

        self.distributions = Discrete()
        self.policy_function = PolicyFunction(gamma, lam)
        self.device = device

    # Loss for PPO
    def compute_loss(self, action_probs, old_action_probs, values, old_values, next_values, actions, rewards, dones):
        # Don't use old value in backpropagation
        old_values = old_values.detach()
        old_action_probs = old_action_probs.detach()

        # Getting general advantages estimator and returns
        advantages = self.policy_function.generalized_advantage_estimation(values, rewards, next_values, dones)
        returns = (advantages + values).detach()
        advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-6)).detach()

        # Finding the ratio (pi_theta / pi_theta__old):
        logprobs = self.distributions.logprob(action_probs, actions, self.device)
        old_logprobs = self.distributions.logprob(old_action_probs, actions, self.device).detach()

        # Finding Surrogate Loss
        ratios = (logprobs - old_logprobs).exp()  # ratios = old_logprobs / logprobs
        kl = self.distributions.kl_divergence(old_action_probs, action_probs, self.device)

        pg_targets = torch.where(
            (kl >= self.policy_kl_range) & (ratios > 1),
            ratios * advantages - self.policy_params * kl,
            ratios * advantages
        )
        pg_loss = pg_targets.mean()

        # Getting Entropy from the action probability
        dist_entropy = self.distributions.entropy(action_probs, self.device).mean()

        # Getting Critic loss by using Clipped critic value
        if self.value_clip is None:
            critic_loss = ((returns - values).pow(2) * 0.5).mean()
        else:
            vpredclipped = old_values + torch.clamp(values - old_values, -self.value_clip,
                                                    self.value_clip)  # Minimize the difference between old value and new value
            vf_losses1 = (returns - values).pow(2) * 0.5  # Mean Squared Error
            vf_losses2 = (returns - vpredclipped).pow(2) * 0.5  # Mean Squared Error
            critic_loss = torch.max(vf_losses1, vf_losses2).mean()

        # We need to maximaze Policy Loss to make agent always find Better Rewards
        # and minimize Critic Loss
        loss = (critic_loss * self.vf_loss_coef) - (dist_entropy * self.entropy_coef) - pg_loss
        return loss


class JointAux():
    def __init__(self, device):
        self.distributions = Discrete()
        self.device = device

    def compute_loss(self, action_probs, old_action_probs, values, returns):
        # Don't use old value in backpropagation
        old_action_probs = old_action_probs.detach()

        # Finding KL Divergence
        kl = self.distributions.kl_divergence(old_action_probs, action_probs, device=self.device).mean()
        aux_loss = ((returns - values).pow(2) * 0.5).mean()

        return aux_loss + kl


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward //= 10

        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward -= self.life_loss_penalty
            self.previous_lives = current_lives

        return obs, reward, done, truncated, info


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info

import cv2

class ProcessFrame84(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """

    def __init__(self, env=None):
        super(ProcessFrame84, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame84.process(obs)

    @staticmethod
    def process(img):
        img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
        x_t = cv2.resize(img, (84, 84), interpolation=cv2.INTER_AREA)
        # save_state_as_image(x_t, r'D:\Projects\Python\my_-nqd\state_image.png')
        x_t = np.reshape(x_t, [84, 84, 1])
        return x_t.astype(np.uint8)


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    # if episodic_life:
    # 将多条生命的游戏模拟成单条生命ActorCriticAgent
    # env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    # if 'FIRE' in env.unwrapped.get_action_meanings():
    # env = FireResetEnv(env)
    env = ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = ptan.common.wrappers.FrameStack(env, stack_frames)
    env = RewardPenaltyWrapper(env)
    return env


def test_net(net, env, count=10, device="cpu"):
    rewards = 0.0
    steps = 0
    for _ in range(count):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
            mu_v, _ = net(obs_v)

            # 计算动作概率
            # probs = torch.softmax(mu_v, dim=1)
            # probs = probs.squeeze(dim=0).cpu().detach().numpy()

            # 打印每个动作的概率
            # print("Action probabilities:")
            # for i, prob in enumerate(probs):
            # print(f"Action {i}: {prob:.4f}")

            action = mu_v.squeeze(dim=0).data.cpu().argmax().item()
            # print(f"Selected action: {action}")

            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            # env.render()
            done = done or trunc
            rewards += reward
            steps += 1
            if done:
                break
    return rewards / count, steps / count


def calc_adv_ref(trajectory, net_ppo, states_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        _, values_v = net_ppo(states_v)  # 得到预测的Q值
    values = values_v.squeeze().data.cpu().numpy()
    # generalized advantage estimator: smoothed version of the advantage
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0  # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = []  # 存储动作的优势值
    result_ref = []  # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val  # 计算实际的Q值和预测的Q值的差值
            last_gae = delta  # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v


def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = select_device(args)

    save_path = os.path.join("saves", "ppg-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = wrap_dqn(gym.make("ALE/DemonAttack-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0),
                   episodic_life=False)
    test_env = wrap_dqn(gym.make("ALE/DemonAttack-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0),
                        episodic_life=False)

    # 创建动作预测网络
    net_ppo = ModelPPO(env.observation_space.shape, env.action_space.n).to(device)
    print(net_ppo)
    net_ppo_target = ptan.agent.TargetNet(net_ppo)
    ppo_value = PPOValue(env.observation_space.shape).to(device)
    print(ppo_value)
    ppo_value_target = ptan.agent.TargetNet(ppo_value)

    aux_memory = AuxMemory()
    aux_joint = JointAux(device=device)

    writer = SummaryWriter(comment="-ppg-" + args.name)
    agent = ptan.agent.PolicyAgent(lambda x: net_ppo(x)[0], device=device, preprocessor=ppo_states_preprocessor,
                                     apply_softmax=True)
    exp_source = ptan.experience.ExperienceSourceNextStates(env, agent, steps_count=1)

    opt_ppo = optim.Adam(net_ppo.parameters(), lr=LEARNING_RATE)
    opt_value = optim.Adam(ppo_value.parameters(), lr=LEARNING_RATE)
    scheduler = optim.lr_scheduler.StepLR(opt_ppo, step_size=2500, gamma=0.9)
    policy_loss = TrulyPPO(policy_kl_range=0.0008, policy_params=20, value_clip=2.0, entropy_coef=0.05,
                           vf_loss_coef=1.0, gamma=0.99, lam=0.95, device=device)

    start_idx = 0
    old_ratio_v_mean = 0
    grad_index = 0
    train_frame_idx = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        opt_ppo.load_state_dict(checkpoint['opt_ppo'])
        net_ppo.load_state_dict(checkpoint['net_ppo'])
        start_idx = checkpoint['start_idx']
        old_ratio_v_mean = checkpoint['old_ratio_v_mean']
        grad_index = checkpoint['grad_index']
        train_frame_idx = checkpoint['train_frame_idx']
        scheduler.load_state_dict(checkpoint['scheduler'])
        # 修改scheduler step size
        scheduler.step_size = 2500
        print("加载模型成功")
        print("学习率：", scheduler.get_lr()[0])
        print("train_frame_idx: ", train_frame_idx)

    trajectory = []  # 注意，缓冲区更名为轨迹
    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + start_idx)
                tracker.reward(np.mean(rewards), step_idx + start_idx)

            if step_idx > 0 and step_idx % TEST_ITERS == 0:
                ts = time.time()
                rewards, steps = test_net(net_ppo, test_env, count=10, device=device)
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + start_idx)
                writer.add_scalar("test_steps", steps, step_idx + start_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                    best_reward = rewards
                common.save_best_model(rewards, net_ppo.state_dict(), save_path, f"ppg-best-{step_idx + start_idx}",
                                       keep_best=10)

            trajectory.append(exp)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue

            # 这里之所以会需要使用
            traj_states = [t[0].state for t in trajectory]
            if isinstance(traj_states[0], np.ndarray):
                # If states are already numpy arrays, stack them
                traj_states = np.stack(traj_states)
            else:
                # If states are LazyFrames, convert to numpy arrays first
                traj_states = np.stack([np.array(state) for state in traj_states])

            traj_actions = [t[0].action for t in trajectory]
            traj_next_states = [t[0].next_state for t in trajectory]
            if isinstance(traj_next_states[0], np.ndarray):
                # If states are already numpy arrays, stack them
                traj_next_states = np.stack(traj_next_states)
            else:
                # If states are LazyFrames, convert to numpy arrays first
                traj_next_states = np.stack([np.array(state) for state in traj_next_states])
            traj_rewards = [t[0].reward for t in trajectory]
            traj_dones = [t[0].done for t in trajectory]
            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            traj_next_states_v = torch.FloatTensor(np.array(traj_next_states)).to(device)
            traj_rewards_v = torch.FloatTensor(np.array(traj_rewards)).to(device)
            traj_dones_v = torch.FloatTensor(np.array(traj_dones)).to(device)
            # print("frame_idx: ", train_frame_idx)

            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    next_states_v = traj_next_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    rewards_v = traj_rewards_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    dones_v = traj_dones_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    pred_actions_probs = F.softmax(net_ppo(states_v)[0], dim=1)
                    pred_values = ppo_value(states_v)
                    pred_actions_probs_target = F.softmax(net_ppo_target.target_model(states_v)[0], dim=1)
                    pred_values_target = ppo_value_target.target_model(states_v)
                    pred_next_values = ppo_value(next_states_v)

                    # print("train epoch: ", epoch, " batch_ofs: ", batch_ofs)
                    loss = policy_loss.compute_loss(pred_actions_probs, pred_actions_probs_target, pred_values,
                                                    pred_values_target, pred_next_values, actions_v, rewards_v, dones_v)

                    opt_ppo.zero_grad()
                    opt_value.zero_grad()

                    loss.backward()

                    opt_ppo.step()
                    opt_value.step()

            aux_memory.save_all(traj_states_v)
            trajectory.clear()
            net_ppo_target.alpha_sync(1e-9)
            ppo_value_target.alpha_sync(1e-9)
            train_frame_idx += 1

            if train_frame_idx % 4 == 0:
                dataloader = DataLoader(aux_memory, PPO_BATCH_SIZE, shuffle=False)

                # Optimize policy for K epochs:
                for _ in range(PPO_EPOCHES):
                    for states in dataloader:
                        pred_values = ppo_value(states).detach()
                        pred_actions_logits, pred_values_logits = net_ppo(states)
                        pred_actions_probs = F.softmax(pred_actions_logits, dim=1)
                        pred_old_actions_probs = F.softmax(net_ppo_target.target_model(states)[0], dim=1)

                        joint_loss = aux_joint.compute_loss(pred_actions_probs, pred_old_actions_probs,
                                                           pred_values_logits, pred_values)
                        opt_ppo.zero_grad()
                        joint_loss.backward()
                        opt_ppo.step()

                    # Clear the memory
                aux_memory.clear()
                # Copy new weights into old policy:
                net_ppo_target.alpha_sync(1e-9)

            scheduler.step()

            checkpoints = {
                'net_ppo': net_ppo.state_dict(),
                'opt_ppo': opt_ppo.state_dict(),
                'start_idx': start_idx + step_idx,
                'old_ratio_v_mean': old_ratio_v_mean,
                'grad_index': grad_index,
                'train_frame_idx': train_frame_idx,
                'scheduler': scheduler.state_dict()
            }
            common.save_checkpoints(train_frame_idx, checkpoints, save_path, "ppg", keep_last=5)
