#!/usr/bin/env python3
'''
完成适配，参考链接：https://github.com/alirezakazemipour/PPO-RND/tree/main

训练记录：
20250203:验证失败，模型有问题
20250204:重新调整模型，在2号机上验证，训练分数138.680，测试分数500分（目前基本都是400分以上），play模型，基本可以算是训练通过，应该是训练时间短等其他超参数的原因，所以训练分数不高
'''
import os
import math
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from collections import deque
import ale_py
from torch.distributions import Categorical
from gym.wrappers.normalize import RunningMeanStd

gym.register_envs(ale_py)

GAMMA = 0.999
EXT_ADV_COEFF = 2
INT_ADV_COEFF = 1
INIT_GAMMA = 0.99
INT_GAMMA = 0.99
EXT_GAMMA = 0.999
GAE_LAMBDA = 0.95 # 优势估计器的lambda因子，0.95是一个比较好的值

TRAJECTORY_SIZE = 2048 # todo 作用 看代码好像是采样的轨迹长度（轨迹，也就是连续采样缓存长度，游戏是连续的）
LEARNING_RATE = 1e-4

PPO_EPS = 0.2
PPO_EPOCHES = 4 # todo 执行ppo的迭代次数 作用
PPO_BATCH_SIZE = 64 # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 10 # 采样迭代多少次，进行一次游戏测试

CLIP_GRAD = 0.1
CLIP_RANGE = 0.1

ANNEAL_LR = True

TOTAL_TIMESTEPS: int = 2000000000

NUM_UPDATES = int(TOTAL_TIMESTEPS / (TRAJECTORY_SIZE))

MAX_GRAD_NORM = 0.5

TARGET_KL = None
PREDICTOR_PROPORTION = 32 / 1
ENT_COEF = 0.001
PRE_NORMALIZATION_STEPS = 50


class StackFrameWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4):
        super().__init__(env)
        self.env = env
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)

        low = np.repeat(self.observation_space.low, n_frames, axis=2)
        high = np.repeat(self.observation_space.high, n_frames, axis=2)
        self.observation_space = gym.spaces.Box(low=low, high=high, dtype=self.observation_space.dtype)

        self.obs = []

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.n_frames):
            self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), info

    def step(self, action):
        obs, reward, terminated, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), reward, terminated, truncated, info



class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)


def test_net(net, env, count=10, device="cpu"):
    with torch.no_grad():
        rewards = 0.0
        steps = 0
        same_action_count = 0
        pre_action = None
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                mu_v = net(obs_v)
                action = mu_v.squeeze(dim=0).data.cpu().argmax().item()
                if pre_action == action:
                    same_action_count += 1
                    if same_action_count > 100:
                        break
                else:
                    same_action_count = 0
                    pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
    return rewards / count, steps / count


def calc_adv_ref(trajectory, net_crt, states_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        values_v = net_crt(states_v) # 得到预测的Q值
    values = values_v.squeeze().data.cpu().numpy()
    # generalized advantage estimator: smoothed version of the advantage
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = [] # 存储动作的优势值
    result_ref = [] # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
            last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v

def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    return env


def discounted_reward(predictor_model, target_model, next_states, obs_rms, device="cpu"):
    next_states = np.clip((next_states  - obs_rms.mean) / np.sqrt(obs_rms.var ** 0.5), -5, 5, dtype='float32')
    next_states_v = torch.from_numpy(next_states).to(device)
    predictor_encoded_features = predictor_model(next_states_v)
    target_encoded_features = target_model(next_states_v)

    int_reward = (predictor_encoded_features - target_encoded_features).pow(2).mean(1)
    return int_reward.cpu().data.numpy()


def get_gae(rewards, values, next_values, dones, gamma):
    returns = [[] for _ in range(1)]
    extended_values = np.zeros((1, TRAJECTORY_SIZE + 1))
    for worker in range(1):
        extended_values[worker] = np.append(values[worker], next_values[worker][-1])
        gae = 0
        for step in reversed(range(len(rewards[worker]))):
            delta = rewards[worker][step] + gamma * extended_values[worker][step + 1] * (1 - dones[worker][step]) - extended_values[worker][step]
            gae = delta + gamma * GAE_LAMBDA * (1 - dones[worker][step]) * gae
            returns[worker].insert(0, gae + extended_values[worker][step])

    return np.concatenate(returns)


def compute_pg_loss(ratio, adv):
    new_r = ratio * adv
    clamped_r = torch.clamp(ratio, 1 - CLIP_RANGE, 1 + CLIP_RANGE) * adv
    loss = -torch.min(new_r, clamped_r).mean()
    return loss


def calculate_rnd_loss(next_states, target_model, predictor_model, device="cpu"):
    encoded_target_features = target_model(next_states)
    encoded_predictor_features = predictor_model(next_states)
    loss = (encoded_predictor_features - encoded_target_features).pow(2).mean(-1)
    mask = torch.rand(loss.size(), device=device)
    mask = (mask < PREDICTOR_PROPORTION) 
    loss = (loss * mask).sum() / torch.max(mask.sum(), torch.Tensor([1]).to(device=device))
    return loss


def clip_grad_norm_(parameters, norm_type: float = 2.0, device='cpu'):
    if isinstance(parameters, torch.Tensor):
        parameters = [parameters]
    parameters = [p for p in parameters if p.grad is not None]
    norm_type = float(norm_type)
    if len(parameters) == 0:
        return torch.tensor(0.)
    if norm_type == float('inf'):
        total_norm = torch.max(p.grad.detach().abs().max().to(device=device) for p in parameters)
    else:
        total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device=device) for p in parameters]), norm_type)

    return total_norm


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default='cartpole', help="Name of the run")
    args = parser.parse_args()
    device = select_device(args=args)

    save_path = os.path.join("saves", "ppo-rnd-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = wrap_dqn(gym.make('CartPole-v1'))
    test_env = wrap_dqn(gym.make('CartPole-v1'))

    # 创建动作预测网络
    rnd_model = model.RNDModel(env.observation_space.shape[0], env.action_space.n).to(device)
    rnd_predictor_model = model.PredictorModel(env.observation_space.shape[0]).to(device)    
    target_rnd_model = model.TargetModel(env.observation_space.shape[0]).to(device)
    for param in target_rnd_model.parameters():
        param.requires_grad = False
    
    print(rnd_model)
    print(rnd_predictor_model)

    writer = SummaryWriter(comment="-ppo-rnd-" + args.name)
    agent = model.RNDAgent(rnd_model, device=device)
    exp_source = ptan.experience.ExperienceSourceRAW(env, agent, steps_count=1)

    combined_params = list(rnd_model.parameters()) + list(rnd_predictor_model.parameters())
    opt = optim.Adam(combined_params, lr=LEARNING_RATE, eps=1e-5)
    scheduler = optim.lr_scheduler.StepLR(opt, step_size=10000, gamma=0.9)
    reward_rms = RunningMeanStd(shape=(1,))
    obs_rms = RunningMeanStd(shape=(4,))

    frame_idx = 0
    train_count = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))

        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            rnd_model.load_state_dict(checkpoint['rnd_model'])
            rnd_predictor_model.load_state_dict(checkpoint['rnd_predictor_model'])
            target_rnd_model.load_state_dict(checkpoint['target_rnd_model'])
            for param in target_rnd_model.parameters():
                param.requires_grad = False
            opt.load_state_dict(checkpoint['opt'])
            scheduler.load_state_dict(checkpoint['scheduler'])
            obs_rms.mean = checkpoint['obs_rms_mean']
            obs_rms.var = checkpoint['obs_rms_var']
            obs_rms.count = checkpoint['obs_rms_count']
            reward_rms.mean = checkpoint['int_reward_nms_mean']
            reward_rms.var = checkpoint['int_reward_nms_var']
            reward_rms.count = checkpoint['int_reward_nms_count']
            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']

            print("加载模型成功")
            print(f"learning rate: {scheduler.get_last_lr()[0]}")

    # todo 根据参考链接增加一个预热nms代码# region PreNormalization

    trajectory = [] # 注意，缓冲区更名为轨迹
    best_reward = None
    grad_index = 0
    num_update = TOTAL_TIMESTEPS // TRAJECTORY_SIZE
    total_normalization_steps =  TRAJECTORY_SIZE * PRE_NORMALIZATION_STEPS

    logger = common.setup_logger(save_path)
    def get_track_info(info):
        logger.info(info)

    with ptan.common.utils.RewardTracker(writer, info_callback=get_track_info) as tracker:

        if frame_idx < total_normalization_steps:
            print("进行预热")
            exps = []
            for step_idx, exp in enumerate(exp_source):
                exps.append(exp)
                if step_idx >= total_normalization_steps:
                    break

                if len(exps) % (1 * TRAJECTORY_SIZE) == 0:
                    obs_rms.update(np.stack([t[0][4] for t in exps]))
                    exps = []

        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + frame_idx)
                tracker.reward(np.mean(rewards), step_idx + frame_idx)

            trajectory.append(exp)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue


            # 这里之所以会需要使用
            traj_states = [t[0][0] for t in trajectory]
            traj_actions = [t[0][1] for t in trajectory]
            traj_next_state = [t[0][4] for t in trajectory]
            traj_rewards = [t[0][2] for t in trajectory]
            traj_done = [t[0][3] for t in trajectory]
            traj_int_values = [t[0][5][1] for t in trajectory]
            traj_ext_values = [t[0][5][2] for t in trajectory]
            traj_log_probs = [t[0][5][3] for t in trajectory]

            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            traj_next_state_v = torch.FloatTensor(np.array(traj_next_state)).to(device)
            traj_rewards_v = torch.FloatTensor(np.array(traj_rewards)).to(device)
            traj_done_v = torch.FloatTensor(np.array(traj_done)).to(device)
            traj_ext_values_v = torch.FloatTensor(traj_ext_values).to(device)
            traj_int_values_v = torch.FloatTensor(traj_int_values).to(device)
            traj_ext_values_v = torch.FloatTensor(traj_ext_values).to(device)
            traj_log_probs_v = torch.FloatTensor(traj_log_probs).to(device)

            total_int_rewards = discounted_reward(rnd_predictor_model, target_rnd_model, traj_next_state, obs_rms, device=device)
            intrinsic_return = [[] for _ in range(1)]
            for worker in range(1):
                rewems = 0
                for step in reversed(range(TRAJECTORY_SIZE)):
                    rewems = rewems * INIT_GAMMA + total_int_rewards[step]
                    intrinsic_return[0].insert(0, rewems)
            reward_rms.update(np.ravel(intrinsic_return).reshape(-1, 1))
            total_int_rewards = total_int_rewards  / reward_rms.var ** 0.5

            with torch.no_grad():
                next_dists, next_int_values, next_ext_values, next_action_prob = rnd_model(traj_next_state_v)
                next_actions = next_dists.sample()
                next_log_probs = next_dists.log_prob(next_actions)

            
            int_rets = get_gae(np.expand_dims(total_int_rewards, axis=0), traj_int_values_v.unsqueeze(0).cpu(), next_int_values.unsqueeze(0).cpu(), np.zeros_like(traj_done_v.unsqueeze(0).cpu()), gamma=INT_GAMMA)
            ext_rets = get_gae(traj_rewards_v.unsqueeze(0).cpu(), traj_ext_values_v.unsqueeze(0).cpu(), next_ext_values.unsqueeze(0).cpu(), traj_done_v.unsqueeze(0).cpu(), gamma=EXT_GAMMA)

            ext_values = traj_ext_values_v.cpu().numpy()
            ext_advs = ext_rets - ext_values

            int_values = traj_int_values_v.cpu().numpy()
            int_advs = int_rets - int_values

            advs = ext_advs * EXT_ADV_COEFF + int_advs * INT_ADV_COEFF
            advs = (advs - advs.mean()) / (advs.std() + 1e-8)
            obs_rms.update(np.array(traj_next_state))
            total_next_obs = ((traj_next_state - obs_rms.mean) / (obs_rms.var ** 0.5)).clip(-5, 5)

            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    # todo 参考代码这里选择的是随机选取，而不是全部拉入训练
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    next_state_v = traj_next_state_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    logprobs_v = traj_log_probs_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_int_rets = torch.FloatTensor(int_rets[batch_ofs:batch_ofs + PPO_BATCH_SIZE]).to(device)
                    batch_ext_rets = torch.FloatTensor(ext_rets[batch_ofs:batch_ofs + PPO_BATCH_SIZE]).to(device)
                    batch_advs = torch.FloatTensor(advs[batch_ofs:batch_ofs + PPO_BATCH_SIZE]).to(device)
                    batch_log_probs = traj_log_probs[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    traj_next_states = traj_next_state_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]

                    dist, int_value, ext_value, _ = rnd_model(states_v)
                    entropy = dist.entropy().mean()
                    new_log_probs = dist.log_prob(actions_v)
                    ratio = (new_log_probs - logprobs_v).exp()
                    pg_loss = compute_pg_loss(ratio, batch_advs)

                    int_value_loss = F.mse_loss(int_value, batch_int_rets)
                    ext_value_loss = F.mse_loss(ext_value, batch_ext_rets)

                    critic_loss = 0.5 * (int_value_loss + ext_value_loss)
                    rnd_loss = calculate_rnd_loss(next_state_v, target_rnd_model, rnd_predictor_model, device=device)
                    total_loss = critic_loss + pg_loss - ENT_COEF * entropy + rnd_loss
                    opt.zero_grad()
                    total_loss.backward()
                    clip_grad_norm_(combined_params)
                    opt.step()

            scheduler.step()
            trajectory.clear()
            train_count += 1

            checkpoint = {
                'rnd_model': rnd_model.state_dict(),
                'rnd_predictor_model': rnd_predictor_model.state_dict(),
                'target_rnd_model': target_rnd_model.state_dict(),
                'opt': opt.state_dict(),
                'scheduler': scheduler.state_dict(),
                'obs_rms_mean': obs_rms.mean,
                'obs_rms_var': obs_rms.var,
                'obs_rms_count': obs_rms.count,
                'int_reward_nms_mean': reward_rms.mean,
                'int_reward_nms_var': reward_rms.var,
                'int_reward_nms_count': reward_rms.count,
                'frame_idx': frame_idx + step_idx,
                'train_count': train_count
            }
            common.save_checkpoints(train_count, checkpoint, save_path, "ppo-rnd")

            if train_count % TEST_ITERS == 0:
                ts = time.time()
                rnd_model.eval()
                rewards, steps = test_net(lambda x: rnd_model(x)[-1], test_env, count=5, device=device)
                rnd_model.train()
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + frame_idx)
                writer.add_scalar("test_steps", steps, step_idx + frame_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                        name = "best_%+.3f_%d.dat" % (rewards, step_idx + frame_idx)
                        fname = os.path.join(save_path, name)
                    best_reward = rewards
                common.save_best_model(rewards, rnd_model.state_dict(), save_path, 'ppo-rnd-best')



