#!/usr/bin/env python3
'''
未适配，可以运行，但是训练一轮后第二轮训练开始会出现nan值，如何解决？，参考链接：https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo_rnd_envpool.py#L322
参考https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/rpo_continuous_action.py#L108对比哪些部分是必须的
封装优化代码

训练记录：
'''
import os
import math
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from collections import deque
import ale_py
from gym.wrappers.normalize import RunningMeanStd
from torch.distributions import Categorical

gym.register_envs(ale_py)

GAMMA = 0.999
INIT_GAMMA = 0.99
GAE_LAMBDA = 0.95 # 优势估计器的lambda因子，0.95是一个比较好的值

TRAJECTORY_SIZE = 2049 # todo 作用 看代码好像是采样的轨迹长度（轨迹，也就是连续采样缓存长度，游戏是连续的）
LEARNING_RATE = 1e-4

PPO_EPS = 0.2
PPO_EPOCHES = 10 # todo 执行ppo的迭代次数 作用
PPO_BATCH_SIZE = 64 # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 10 # 采样迭代多少次，进行一次游戏测试

CLIP_GRAD = 0.1

ANNEAL_LR = True

TOTAL_TIMESTEPS: int = 2000000000

NUM_UPDATES = int(TOTAL_TIMESTEPS / (TRAJECTORY_SIZE))

MAX_GRAD_NORM = 0.5

TARGET_KL = None


class StackFrameWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4):
        super().__init__(env)
        self.env = env
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)

        low = np.repeat(self.observation_space.low, n_frames, axis=2)
        high = np.repeat(self.observation_space.high, n_frames, axis=2)
        self.observation_space = gym.spaces.Box(low=low, high=high, dtype=self.observation_space.dtype)

        self.obs = []

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.n_frames):
            self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), info

    def step(self, action):
        obs, reward, terminated, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), reward, terminated, truncated, info



class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)


def test_net(net, env, count=10, device="cpu"):
    with torch.no_grad():
        rewards = 0.0
        steps = 0
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                mu_v = net(obs_v)
                action = mu_v.squeeze(dim=0).data.cpu().argmax().item()
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
    return rewards / count, steps / count


def calc_adv_ref(trajectory, net_crt, states_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        values_v = net_crt(states_v) # 得到预测的Q值
    values = values_v.squeeze().data.cpu().numpy()
    # generalized advantage estimator: smoothed version of the advantage
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = [] # 存储动作的优势值
    result_ref = [] # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
            last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v

def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        reward /= 50  # 缩放奖励

        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward -= self.life_loss_penalty
            self.previous_lives = current_lives

        return obs, reward, done, truncated, info


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)
    # 跳帧包装器
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)

    # if 'FIRE' in env.unwrapped.get_action_meanings():
    #     env = ptan.common.wrappers.FireResetEnv(env)
    env = ptan.common.wrappers.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = ptan.common.wrappers.FrameStack(env, stack_frames)
    env = RewardPenaltyWrapper(env)
    # if reward_clipping:
        # env = ptan.common.wrappers.ClippedRewardsWrapper(env)
    return env

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")

    save_path = os.path.join("saves", "ppo-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = wrap_dqn(gym.make('ALE/Boxing-v5', frameskip=4, repeat_action_probability=0.0))
    test_env = wrap_dqn(gym.make('ALE/Boxing-v5', frameskip=4, repeat_action_probability=0.0))

    # 创建动作预测网络
    rnd_model = model.RNDModel(env.observation_space.shape).to(device)
    # 创建状态、动作评价网络
    rnd_actor = model.RNDActor(env.observation_space.shape, env.action_space.n).to(device)
    print(rnd_model)
    print(rnd_actor)

    writer = SummaryWriter(comment="-ppo-rnd-" + args.name)
    exp_source = common.RNDExperienceSource(env, rnd_actor, rnd_model, steps_count=1, device=device)

    combined_params = list(rnd_model.parameters()) + list(rnd_actor.parameters())
    opt = optim.Adam(combined_params, lr=LEARNING_RATE, eps=1e-5)
    if ANNEAL_LR:
        def lr_lambda(epoch):
            return 1.0 - (epoch / NUM_UPDATES)

        scheduler = optim.lr_scheduler.LambdaLR(opt, lr_lambda=lr_lambda)
    else:
        scheduler = optim.lr_scheduler.StepLR(opt, step_size=10000, gamma=0.9)
    discounted_reward = common.RewardForwardFilter(gamma=0.99)
    reward_rms = RunningMeanStd()
    obs_rms = RunningMeanStd(shape=(1, 1, 84, 84))

    frame_idx = 0
    train_count = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))

        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            rnd_model.load_state_dict(checkpoint['rnd_model'])
            rnd_actor.load_state_dict(checkpoint['rnd_actor'])
            opt.load_state_dict(checkpoint['opt'])
            scheduler.load_state_dict(checkpoint['scheduler'])
            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']
            print("加载模型成功")
            print(f"learning rate: {scheduler.get_last_lr()[0]}")

    trajectory = [] # 注意，缓冲区更名为轨迹
    best_reward = None
    grad_index = 0
    num_update = TOTAL_TIMESTEPS // TRAJECTORY_SIZE

    logger = common.setup_logger(save_path)
    def get_track_info(info):
        logger.info(info)

    with ptan.common.utils.RewardTracker(writer, info_callback=get_track_info) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + frame_idx)
                tracker.reward(np.mean(rewards), step_idx + frame_idx)

            trajectory.append(exp)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue

            # if ANNEAL_LR:
            #     frac = 1.0 - (train_count - 1.0) / NUM_UPDATES
            #     lrnow = frac * args.learning_rate
            #     opt.param_groups[0]["lr"] = lrnow

            # 这里之所以会需要使用
            traj_states = [t[0].state for t in trajectory]
            if isinstance(traj_states[0], np.ndarray):
                # If states are already numpy arrays, stack them
                traj_states = np.stack(traj_states)
            else:
                # If states are LazyFrames, convert to numpy arrays first
                traj_states = np.stack([np.array(state) for state in traj_states])

            traj_actions = [t[0].action for t in trajectory]
            traj_next_state = [t[0].next_state for t in trajectory]
            traj_rewards = [t[0].reward for t in trajectory]
            traj_done = [t[0].done for t in trajectory]
            traj_ext_values = [t[0].ext_value for t in trajectory]
            traj_int_value = [t[0].int_value for t in trajectory]
            traj_curiosity_reward = [t[0].curiosity_reward for t in trajectory]
            traj_logprobs = [t[0].logprob for t in trajectory]

            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            traj_next_state_v = torch.FloatTensor(np.array(traj_next_state)).to(device)
            traj_rewards_v = torch.FloatTensor(np.array(traj_rewards)).to(device)
            traj_done_v = torch.FloatTensor(np.array(traj_done)).to(device)
            traj_ext_values_v = torch.FloatTensor(np.array(traj_ext_values)).to(device)
            traj_int_value_v = torch.FloatTensor(np.array(traj_int_value)).to(device)
            traj_logprobs_v = torch.FloatTensor(np.array(traj_logprobs)).to(device)
            tray_curiosity_reward_v = torch.FloatTensor(np.array(traj_curiosity_reward)).to(device)

            curiosity_reward_per_env = np.array(
                [discounted_reward.update(reward_per_step) for reward_per_step in tray_curiosity_reward_v.cpu().data.numpy().T])
            mean, std, count = (
                np.mean(curiosity_reward_per_env),
                np.std(curiosity_reward_per_env),
                len(curiosity_reward_per_env)
            )
            reward_rms.update_from_moments(mean, std**2, count)
            traj_curiosity_reward /= np.sqrt(reward_rms.var)

            # 根据状态预测动作
            with torch.no_grad():
                ext_advantages = torch.zeros_like(traj_rewards_v, device=device)
                int_advantages = torch.zeros_like(tray_curiosity_reward_v, device=device)
                ext_lastgaelam = 0
                int_lastgaelam = 0

                for t in reversed(range(TRAJECTORY_SIZE)):
                    if t == TRAJECTORY_SIZE - 1:
                        next_value_ext, next_value_int = rnd_actor.get_values(traj_states_v[-1].unsqueeze(0))
                        next_value_ext, next_value_int = next_value_ext.reshape(1, -1), next_value_int.reshape(1, -1)
                        ext_nextnonterminal = 1.0 - traj_done_v[-1]
                        int_nextnonterminal = 1.0
                        ext_nextvalues = next_value_ext
                        int_nextvalues = next_value_int
                    else:
                        ext_nextnonterminal = 1.0 - traj_done_v[t + 1]
                        int_nextnonterminal = 1.0
                        ext_nextvalues = traj_ext_values_v[t + 1]
                        int_nextvalues = traj_int_value_v[t + 1]
                    ext_delta = traj_rewards_v[t] + GAMMA * ext_nextvalues * ext_nextnonterminal - traj_ext_values_v[t]
                    int_delta = tray_curiosity_reward_v[t] + GAMMA * int_nextvalues * int_nextnonterminal - \
                                traj_int_value_v[t]
                    ext_advantages[t] = ext_lastgaelam = (
                            ext_delta + GAMMA * GAE_LAMBDA * ext_nextnonterminal * ext_lastgaelam
                    )
                    int_advantages[t] = int_lastgaelam = (
                            int_delta + GAMMA * GAE_LAMBDA * int_nextnonterminal * int_lastgaelam
                    )
                ext_returns = ext_advantages + traj_ext_values_v
                int_returns = int_advantages + traj_int_value_v

            b_ext_returns = ext_returns.view(-1)
            b_int_returns = int_returns.view(-1)
            b_ext_advantages = ext_advantages.view(-1)
            b_int_advantages = int_advantages.view(-1)

            b_advantages = b_int_advantages * 1.0 + b_ext_advantages * 2.0
            obs_rms.update(traj_states_v[:, 3, :, :].reshape(-1, 1, 84, 84).cpu().numpy())

            rnd_next_obs = (
                (
                    (traj_states_v - torch.from_numpy(obs_rms.mean).to(device)) / torch.sqrt(torch.from_numpy(obs_rms.var).to(device))
                ).clip(-5, 5)
            ).float()

            clipfracs = []
            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    print("batch_ofs: ", batch_ofs)
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    next_state_v = traj_next_state_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    logprobs_v = traj_logprobs_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]

                    predict_next_state_feature, target_next_state_feature = rnd_model(next_state_v)
                    forward_loss = F.mse_loss(predict_next_state_feature, target_next_state_feature.detach(), reduction='none').mean(-1)

                    mask = torch.rand(len(forward_loss), device=device)
                    mask = (mask < 0.25).type(torch.FloatTensor).to(device)
                    forward_loss = (forward_loss * mask).sum() / torch.max(
                        mask.sum(), torch.tensor([1], device=device, dtype=torch.float32)
                    )

                    logits, new_ext_values, new_int_values = rnd_actor(states_v)
                    if torch.isnan(logits).any():
                        print("NaN detected in logits")
                        # 可以在这里打印更多信息，如 states_v 的值
                    probs = Categorical(logits=logits)
                    newlogprob, entropy = probs.log_prob(actions_v), probs.entropy()

                    logratio = newlogprob - logprobs_v
                    ratio = logratio.exp()

                    with torch.no_grad():
                        old_approx_kl = (-logratio).mean()
                        approx_kl = ((ratio - 1) - logratio).mean()
                        clipfracs += [((ratio - 1.0).abs() > 0.1).float().mean().item()]
                    mb_advantages = b_advantages[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    if True:
                        mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
                    pg_loss1 = -mb_advantages * ratio
                    pg_loss2 = -mb_advantages * torch.clamp(ratio, 1.0 - CLIP_GRAD, 1.0 + CLIP_GRAD)
                    pg_loss = torch.max(pg_loss1, pg_loss2).mean()

                    new_ext_values, new_int_values = new_ext_values.view(-1), new_int_values.view(-1)
                    if True:
                        ext_v_loss_unclipped = (new_ext_values - b_ext_returns[batch_ofs:batch_ofs + PPO_BATCH_SIZE]) ** 2
                        ext_v_clipped = traj_ext_values_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE] + torch.clamp(
                            new_ext_values - traj_ext_values_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE],
                            -CLIP_GRAD, CLIP_GRAD)
                        ext_v_loss_clipped = (ext_v_clipped - b_ext_returns[batch_ofs:batch_ofs + PPO_BATCH_SIZE]) ** 2
                        ext_v_loss_max = torch.max(ext_v_loss_unclipped, ext_v_loss_clipped)
                        ext_v_loss = 0.5 * torch.mean(ext_v_loss_max)
                    else:
                        ext_v_loss = 0.5 * ((new_ext_values - b_ext_returns[batch_ofs:batch_ofs + PPO_BATCH_SIZE]) ** 2).mean()

                    int_v_loss = 0.5 * ((new_int_values - b_int_returns[batch_ofs:batch_ofs + PPO_BATCH_SIZE]) ** 2).mean()
                    v_loss = ext_v_loss + int_v_loss
                    entropy_loss = entropy.mean()
                    loss = pg_loss - 0.001 * entropy_loss + v_loss * 0.5 + forward_loss

                    opt.zero_grad()
                    loss.backward()
                    if MAX_GRAD_NORM:
                        nn.utils.clip_grad_norm_(combined_params, MAX_GRAD_NORM)
                    opt.step()

                if TARGET_KL is not None:
                    if approx_kl > TARGET_KL:
                        # todo 这里有点像新旧策略不同，中断训练
                        break

            scheduler.step()
            trajectory.clear()
            train_count += 1

            checkpoint = {
                "rnd_model": rnd_model.state_dict(),
                "rnd_actor": rnd_actor.state_dict(),
                "opt": opt.state_dict(),
                "frame_idx": step_idx + frame_idx,
                "train_count": train_count,
                "scheduler": scheduler.state_dict(),
            }
            common.save_checkpoints(train_count, checkpoint, save_path, "ppo-rnd")

            if train_count % TEST_ITERS == 0:
                ts = time.time()
                rnd_actor.eval()
                rewards, steps = test_net(lambda x: rnd_actor(x)[0], test_env, count=5, device=device)
                rnd_actor.train()
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + frame_idx)
                writer.add_scalar("test_steps", steps, step_idx + frame_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                        name = "best_%+.3f_%d.dat" % (rewards, step_idx + frame_idx)
                        fname = os.path.join(save_path, name)
                    best_reward = rewards
                    common.save_best_model(rewards, rnd_actor.state_dict(), save_path, 'ppo-rnd')



