#!/usr/bin/env python3
'''
已适配，参考链接：
1. https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo_atari_lstm.py
2. https://docs.cleanrl.dev/rl-algorithms/ppo-trxl

训练记录：
在cloudstudio上训练
20241221: 训练报错，在macbook上无问题在cloudstudio上报错
20241223: 重新训练，训练报错如下：
98854: done 16 episodes, mean reward -198.812, speed 56.62 f/s
Traceback (most recent call last):
  File "/workspace/my_-nqd/learning/atari-crazyclimber/train_ppo_lstm.py", line 400, in <module>
    rewards, steps = test_net(net_ppo, test_env, count=10, device=device)
  File "/workspace/my_-nqd/learning/atari-crazyclimber/train_ppo_lstm.py", line 233, in test_net
    mu_v, _ = net(obs_v)
  File "/root/miniforge3/envs/pytorch-gym/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
TypeError: ModelPPO.forward() missing 1 required positional argument: 'lstm_state'、

20241224：训练出现nan值
26998: done 1 episodes, mean reward 0.000, speed 74.36 f/s
27355: done 358 episodes, mean reward 0.000, speed 356.64 f/s
27703: done 706 episodes, mean reward 0.000, speed 347.71 f/s
28038: done 1041 episodes, mean reward 0.000, speed 334.65 f/s
28381: done 1384 episodes, mean reward 0.000, speed 342.09 f/s
Traceback (most recent call last):
  File "/workspace/my_-nqd/learning/atari-crazyclimber/train_ppo_lstm.py", line 478, in <module>
    _, newlogprob, entropy, newvalue, _ = net_ppo(states_v, initial_lstm_state, actions_v)
  File "/root/miniforge3/envs/pytorch-gym/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
    return forward_call(*args, **kwargs)
  File "/workspace/my_-nqd/learning/atari-crazyclimber/train_ppo_lstm.py", line 182, in forward
    probs = Categorical(logits=logits)
  File "/root/miniforge3/envs/pytorch-gym/lib/python3.10/site-packages/torch/distributions/categorical.py", line 66, in __init__
    super().__init__(batch_shape, validate_args=validate_args)
  File "/root/miniforge3/envs/pytorch-gym/lib/python3.10/site-packages/torch/distributions/distribution.py", line 62, in __init__
    raise ValueError(
ValueError: Expected parameter logits (Tensor of shape (64, 4)) of distribution Categorical(logits: torch.Size([64, 4])) to satisfy the constraint IndependentConstraint(Real(), 1), but found invalid values:
tensor([[nan, nan, nan, nan],
        [nan, nan, nan, nan],
        [nan, nan, nan, nan],
'''
import os
import math
from typing import Any
import torch.nn as nn
import ptan
import time
import gymnasium as gym
import ale_py
import argparse
from tensorboardX import SummaryWriter
from torch.distributions import Categorical

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn.utils as nn_utils
from collections import deque

gym.register_envs(ale_py)
GAMMA = 0.9
GAE_LAMBDA = 1.00 # 优势估计器的lambda因子，0.95是一个比较好的值

TRAJECTORY_SIZE = 2049
LEARNING_RATE_ACTOR = 5e-4

PPO_EPS = 0.2
PPO_EPOCHES = 10 # todo 执行ppo的迭代次数 作用
PPO_BATCH_SIZE = 64 # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 100000 # 采样迭代多少次，进行一次游戏测试

CLIP_GRAD = 0.5
CLIP_COEF = 0.1
CLIP_VLOSS = True
ENT_COEF = 0.01
VF_COEF = 0.5
TARGET_KL = None


class DQNLstmAgent(ptan.agent.BaseAgent):

    def __init__(self, dqn_model, num_envs, action_selector=ptan.actions.ProbabilityActionSelector(), device="cpu", preprocessor=ptan.agent.default_states_preprocessor):
        '''
        param dqn_model: dqn网络模型，训练的的网络
        param action_selector: 动作选择器
        '''

        self.dqn_model = dqn_model
        self.action_selector = action_selector
        self.preprocessor = preprocessor
        self.device = device
        self.num_envs = num_envs
        self.next_lstm_state = (
            torch.zeros(dqn_model.lstm.num_layers, self.num_envs, dqn_model.lstm.hidden_size).to(device),
            torch.zeros(dqn_model.lstm.num_layers, self.num_envs, dqn_model.lstm.hidden_size).to(device),
        )
        self.cur_value = None
        self.cur_logprob = None


    def initial_state(self):
        """
        Should create initial empty state for the agent. It will be called for the start of the episode
        :return: Anything agent want to remember
        """
        return (
            self.next_lstm_state[0].clone(),
            self.next_lstm_state[1].clone(),
        )


    def clone_next_lstm_state(self):
        return (
            self.next_lstm_state[0].clone(),
            self.next_lstm_state[1].clone(),
        )
    

    def current_value(self):
        return self.cur_value
    
    
    def current_logprob(self):
        return self.cur_logprob
    

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        # 如果定义了预处理器，则将state进行预处理
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)
        # 用传入的模型计算预测q值或者动作概率
        action, self.cur_logprob, _, self.cur_value, self.next_lstm_state = self.dqn_model(states, self.next_lstm_state)
        return action, agent_states

    # 增加保存和加载next_lstm_state的代码
    def save_state_dict(self, checkpoints):
        checkpoints['next_lstm_state'] = self.next_lstm_state

    def load_state_dict(self, checkpoints):
        self.next_lstm_state = checkpoints['next_lstm_state']


class ModelPPO(nn.Module):
    def __init__(self, obs_size, act_size):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(ModelPPO, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(obs_size[0], 64, kernel_size=8, stride=4),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            nn.ReLU()
        )

        conv_out_size = self._get_conv_out(obs_size)
        self.lstm = nn.LSTM(conv_out_size, 128)
        self.action_linear = nn.Linear(128, act_size)
        self.critic_linear = nn.Linear(128, 1)

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))
    

    def get_states(self, x, lstm_state):
        hidden = self.conv(x / 255.0)
        batch_size = lstm_state[0].shape[1]
        hidden = hidden.reshape((-1, batch_size, self.lstm.input_size))

        new_hidden = []
        for h in hidden:
            h, lstm_state = self.lstm(
                h.unsqueeze(0),
                (
                    lstm_state[0],
                    lstm_state[1],
                ),
            )
            new_hidden += [h]
        new_hidden = torch.flatten(torch.cat(new_hidden), 0, 1)
        return new_hidden, lstm_state

    
    def get_value(self, x, lstm_state):
        hidden, _ = self.get_states(x, lstm_state)
        return self.critic_linear(hidden)

    def forward(self, x, lstm_state, action=None):
        hidden, lstm_state = self.get_states(x, lstm_state)
        logits = self.action_linear(hidden)
        probs = Categorical(logits=logits)
        if action is None and not self.training:
            action = probs.sample()
        elif action is None and self.training:
            action = torch.argmax(logits, dim=-1)
        return action, probs.log_prob(action), probs.entropy(), self.critic_linear(hidden), lstm_state


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward //= 100
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward -= self.life_loss_penalty
            self.previous_lives = current_lives
        
        
        return obs, reward, done, truncated, info


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    # env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = ptan.common.wrappers.FireResetEnv(env)
    env = ptan.common.wrappers.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = RewardPenaltyWrapper(env)
    return env

@torch.no_grad()
def test_net(net, env, count=10, device="cpu"):
    rewards = 0.0
    steps = 0
    net.eval()
    for _ in range(count):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        next_lstm_state = (
            torch.zeros(net.lstm.num_layers, 1, net.lstm.hidden_size).to(device),
            torch.zeros(net.lstm.num_layers, 1, net.lstm.hidden_size).to(device),
        )
        while True:
            obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
            mu_v, _, _, _, next_lstm_state= net(obs_v, next_lstm_state)

            # 计算动作概率
            # probs = torch.softmax(mu_v, dim=1)
            # probs = probs.squeeze(dim=0).cpu().detach().numpy()

            # 打印每个动作的概率
            # print("Action probabilities:")
            # for i, prob in enumerate(probs):
                # print(f"Action {i}: {prob:.4f}")

            action = mu_v.cpu().item()
            # print(f"Selected action: {action}")

            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            # env.render()
            done = done or trunc
            rewards += reward
            steps += 1
            if done:
                break
    net.train()
    return rewards / count, steps / count


def calc_adv_ref(trajectory, net_ppo, lstm_state, states_v, values_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        last_values_v = net_ppo.get_value(states_v[-1].unsqueeze(0), lstm_state) # 得到预测的Q值
    values = values_v.squeeze().data.cpu().numpy() + [last_values_v.item()]
    # generalized advantage estimator: smoothed version of the advantage
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = [] # 存储动作的优势值
    result_ref = [] # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
            last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v

def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = select_device(args)

    save_path = os.path.join("saves", "ppo-lstm-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = wrap_dqn(gym.make("ALE/Blackjack-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=True)
    test_env = wrap_dqn(gym.make("ALE/Blackjack-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=True)

    # 创建动作预测网络
    net_ppo = ModelPPO(env.observation_space.shape, env.action_space.n).to(device)
    print(net_ppo)

    writer = SummaryWriter(comment="-ppo-lstm-" + args.name)
    agent = DQNLstmAgent(net_ppo, num_envs=1, device=device, preprocessor=ppo_states_preprocessor)
    exp_source = ptan.experience.ExperienceSourceNextStates(env, agent, steps_count=1)

    opt_ppo = optim.Adam(net_ppo.parameters(), lr=LEARNING_RATE_ACTOR)
    scheduler = optim.lr_scheduler.StepLR(opt_ppo, step_size=2000, gamma=0.9)


    start_idx = 0
    old_ratio_v_mean = 0
    grad_index = 0
    train_frame_idx = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        opt_ppo.load_state_dict(checkpoint['opt_ppo'])
        net_ppo.load_state_dict(checkpoint['net_ppo'])
        start_idx = checkpoint['start_idx']
        old_ratio_v_mean = checkpoint['old_ratio_v_mean']
        grad_index = checkpoint['grad_index']
        train_frame_idx = checkpoint['train_frame_idx']
        scheduler.load_state_dict(checkpoint['scheduler'])
        print("加载模型成功")
                # 打印学习率
        print("Learning Rate:", opt_ppo.param_groups[0]['lr'])
        print("train_frame_idx:", train_frame_idx)
        print("scheduler epoch:", scheduler.last_epoch)
        # 修改学习率调度器step_size
        scheduler.step_size = 10000


    trajectory = [] # 注意，缓冲区更名为轨迹
    trajectory_values = []
    trajectory_logprobs = []
    best_reward = None
    initial_lstm_state = agent.initial_state()
    with ptan.common.utils.RewardTracker(writer) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + start_idx)
                tracker.reward(np.mean(rewards), step_idx + start_idx)

            if step_idx > 0 and step_idx % TEST_ITERS == 0:
                ts = time.time()
                rewards, steps = test_net(net_ppo, test_env, count=10, device=device)
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + start_idx)
                writer.add_scalar("test_steps", steps, step_idx + start_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                    best_reward = rewards
                common.save_best_model(rewards, net_ppo.state_dict(), save_path, f"ppo-best-{train_frame_idx}", keep_best=10)


            trajectory.append(exp)
            trajectory_values.append(agent.current_value().cpu())
            trajectory_logprobs.append(agent.current_logprob().cpu())
            if len(trajectory) < TRAJECTORY_SIZE:
                continue
            
            buffer_lstm = agent.clone_next_lstm_state()
            # 这里之所以会需要使用
            traj_states = [t[0].state for t in trajectory]
            traj_actions = [t[0].action.cpu() for t in trajectory]
            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            traj_values_v = torch.cat(trajectory_values).squeeze(1).to(device)
            traj_logprobs_v = torch.cat(trajectory_logprobs).to(device)
            # 计算优势值和实际Q值
            traj_adv_v, traj_ref_v = calc_adv_ref(trajectory, net_ppo, buffer_lstm, traj_states_v, traj_values_v, device=device)
            # 根据状态预测动作
            # 计算上一轮训练的评价网络、动作网络动作的概率
            old_logprob_v = traj_logprobs_v

            # normalize advantages 归一化计算得到的Q值 作用是提高训练的稳定性
            traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / torch.std(traj_adv_v)

            # drop last entry from the trajectory, an our adv and ref value calculated without it
            # 这里的作用，为什么截取去除最后一个 P316
            # 可能是因为要和traj_adv_v和traj_ref_v一一对应，因为里面在计算时，是
            # 去掉最后一个的
            trajectory = trajectory[:-1]
            # 这里获取的是从[0到-1)范围的数据
            old_logprob_v = old_logprob_v[:-1].detach()

            sum_loss = 0.0
            count_steps = 1
            is_interrupt = False

            clipfracs = []
            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE].unsqueeze(-1)
                    batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_old_logprob_v = old_logprob_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_values_v = traj_values_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]

                    opt_ppo.zero_grad()
                    # 这边就是在计算预测Q值和实际Q值之间的差异损失
                    _, newlogprob, entropy, newvalue, _ = net_ppo(states_v, initial_lstm_state, actions_v)
                    logratio = newlogprob - batch_old_logprob_v
                    ratio = logratio.exp()

                    with torch.no_grad():
                        old_approx_kl = (-logratio).mean()
                        approx_kl = ((ratio - 1) - logratio).mean()
                        clipfracs += [((ratio - 1.0).abs() > CLIP_COEF).float().mean().item()]

                    pg_loss1 = -batch_adv_v * ratio
                    pg_loss2 = -batch_adv_v * torch.clamp(ratio, 1 - CLIP_COEF, 1 + CLIP_COEF)
                    pg_loss = torch.max(pg_loss1, pg_loss2).mean()

                    newvalue = newvalue.view(-1)
                    if CLIP_VLOSS:
                        v_loss_unclipped = (newvalue - batch_ref_v) ** 2
                        v_clipped = batch_values_v + torch.clamp(
                            newvalue - batch_values_v,
                            -CLIP_COEF,
                            CLIP_COEF
                        )

                        v_loss_clipped = (v_clipped - batch_ref_v) ** 2
                        v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
                        v_loss = 0.5 * torch.mean(v_loss_max)
                    else:
                        v_loss = 0.5 * torch.mean((newvalue - batch_ref_v) ** 2)

                    entropy_loss = entropy.mean()
                    loss = pg_loss - ENT_COEF * entropy_loss + v_loss + VF_COEF

                    opt_ppo.zero_grad()
                    loss.backward()
                    nn.utils.clip_grad_norm_(net_ppo.parameters(), CLIP_GRAD)
                    opt_ppo.step()
                    # 记录总损失，用于计算平均损失变化
                    sum_loss += loss.item()
                    count_steps += 1
                    grad_index += 1
                if TARGET_KL is not None and approx_kl > TARGET_KL:
                    break


            trajectory.clear()
            trajectory_logprobs.clear()
            trajectory_values.clear()
            train_frame_idx += 1
            scheduler.step()
            writer.add_scalar("advantage", traj_adv_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("values", traj_ref_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("sum_loss", sum_loss / count_steps, step_idx + start_idx)

            checkpoints = {
                'net_ppo': net_ppo.state_dict(),
                'opt_ppo': opt_ppo.state_dict(),
                'start_idx': start_idx + step_idx,
                'old_ratio_v_mean': old_ratio_v_mean,
                'grad_index':grad_index,
                'train_frame_idx': train_frame_idx,
                'scheduler': scheduler.state_dict()
            }
            common.save_checkpoints(train_frame_idx, checkpoints, save_path, "ppo", keep_last=3)



