#!/usr/bin/env python3
'''
未适配，未验证，参考链接：https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/rpo_continuous_action.py#L108


训练记录：
与原始代码存在较大的差异
'''
import os
import math
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F


# 其余超参数可以参考Clean_RL
ENV_ID = "Walker2d-v5"
GAMMA = 0.99
GAE_LAMBDA = 0.95 # 优势估计器的lambda因子，0.95是一个比较好的值

TRAJECTORY_SIZE = 2049 # 采样的连续样本轨迹长度 todo AI建议调整为512
LEARNING_RATE = 5e-4

PPO_EPS = 0.2 # todo AI建议关闭裁剪或者放大范围比如0.5，如果explained_variance长期小于0.3
PPO_EPOCHES = 10 # todo 执行ppo的迭代次数 作用
PPO_BATCH_SIZE = 64 # 每次进行轨迹样本计算的batch长度 todo AI建议调整为128

TEST_ITERS = 10 # 采样迭代多少次，进行一次游戏测试

CLIP_VLOSS = True
VF_COEF = 0.5
ENT_COEF = 0.0
MAX_GRAD_NORM = 0.5
TARGET_KL = None # todo 应该是用于中断训练的判断，防止新旧差异过大


def test_net(net, env, count=10, device="cpu"):
    rewards = 0.0
    steps = 0
    for _ in range(count):
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
            mu_v = net(obs_v)[0]
            action = mu_v.squeeze(dim=0).data.cpu().numpy()
            action = np.clip(action, -1, 1)
            obs, reward, done, trunc, _ = env.step(action)
            done = done or trunc
            rewards += reward
            steps += 1
            if done:
                break
    return rewards / count, steps / count


def calc_logprob(mu_v, logstd_v, actions_v):
    """
    这里依旧采用的是高斯概率分布计算预测的动作概率
    """
    p1 = - ((mu_v - actions_v) ** 2) / (2*torch.exp(logstd_v).clamp(min=1e-3))
    p2 = - torch.log(torch.sqrt(2 * math.pi * torch.exp(logstd_v)))
    return p1 + p2


def calc_adv_ref(trajectory, net_rpo, states_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        values = net_rpo.get_value(states_v) # 得到预测的Q值
        # generalized advantage estimator: smoothed version of the advantage
        # 广义优势估计量:优势的平滑版
        # 这里会将未来的优势获取的情况考虑在内
        adv_v = torch.zeros(len(trajectory) - 1, device=device) # 存储动作的优势值
        ref_v = torch.zeros(len(trajectory) - 1, device=device) # 存储实际的Q值
        last_gae = 0.0 # 应该是历史轨迹累计优势
        for t in reversed(range(len(trajectory) - 1)):
            exp = trajectory[t][0]
            nextnonterminal = 1.0 - float(exp.done)
            nextvalues = values[t + 1]
            delta = exp.reward + GAMMA * nextvalues * nextnonterminal - values[t]
            last_gae = delta + GAMMA * GAE_LAMBDA * nextnonterminal * last_gae
            adv_v[t] = last_gae
            ref_v[t] = last_gae + values[t]

        return adv_v, ref_v

# 确认是否需要引入这个
def make_env(env_id, idx, capture_video, run_name, gamma):
    def thunk():
        if capture_video and idx == 0:
            env = gym.make(env_id, render_mode="rgb_array")
            env = gym.wrappers.RecordVideo(env, f"videos/{run_name}")
        else:
            env = gym.make(env_id)
        env = gym.wrappers.FlattenObservation(env)  # deal with dm_control's Dict observation space
        env = gym.wrappers.RecordEpisodeStatistics(env)
        env = gym.wrappers.ClipAction(env)
        env = gym.wrappers.NormalizeObservation(env)
        env = gym.wrappers.TransformObservation(env, lambda obs: np.clip(obs, -10, 10))
        env = gym.wrappers.NormalizeReward(env, gamma=gamma)
        env = gym.wrappers.TransformReward(env, lambda reward: np.clip(reward, -10, 10))
        return env

    return thunk


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    parser.add_argument("-e", "--env", default=ENV_ID, help="Environment id, default=" + ENV_ID)
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")

    save_path = os.path.join("saves", "rpo-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = gym.make(args.env)
    # todo 测试试用这个环境
    # env = gym.vector.SyncVectorEnv(
    #     [make_env(args.env, idx, False, args.name, GAMMA) for idx in range(1)]
    # )
    test_env = gym.make(args.env)
    # todo 测试试用这个环境
    # env = gym.vector.SyncVectorEnv(
    #     [make_env(args.env, idx, False, args.name, GAMMA) for idx in range(1)]
    # )

    # 创建动作预测网络
    net_rpo = model.RPOModel(env.observation_space.shape[0], env.action_space.shape[0]).to(device)
    # 创建状态、动作评价网络
    print(net_rpo)

    writer = SummaryWriter(comment="-rpo_" + args.name)
    agent = model.AgentRPO(net_rpo, device=device)
    exp_source = common.RPOExperienceSource(env, agent, steps_count=1)

    opt_rpo = optim.Adam(net_rpo.parameters(), lr=LEARNING_RATE, eps=1e-5)
    # todo 如果未生效，可以考虑参考链接中的学习率的变化
    scheduler = optim.lr_scheduler.StepLR(opt_rpo, step_size=10000, gamma=0.9)

    frame_idx = 0
    train_count = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        net_rpo.load_state_dict(checkpoint['net_rpo'])
        opt_rpo.load_state_dict(checkpoint['opt_rpo'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        frame_idx = checkpoint['frame_idx']
        train_count = checkpoint['train_count']
        print("加载模型成功")

    logger = common.setup_logger(save_path)
    trajectory = [] # 注意，缓冲区更名为轨迹
    best_reward = None
    start_time = time.time()
    with ptan.common.utils.RewardTracker(writer) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + frame_idx)
                tracker.reward(np.mean(rewards), step_idx + frame_idx)

            frame_idx += 1

            trajectory.append(exp)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue

            # 这里之所以会需要使用
            traj_states = [t[0].state for t in trajectory]
            traj_actions = [t[0].action for t in trajectory]
            traj_next_states_v = torch.FloatTensor(np.array([t[0].next_state for t in trajectory])).to(device)
            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            traj_logprobs = torch.FloatTensor(np.array([t[0].log_prob for t in trajectory])).to(device)
            # 计算优势值和实际Q值
            net_rpo.eval()
            traj_adv_v, traj_ref_v = calc_adv_ref(trajectory, net_rpo, traj_states_v, device=device)
            with torch.no_grad():
                old_values = net_rpo.get_value(traj_states_v)

            net_rpo.train()
            # 根据状态预测动作
            traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / (torch.std(traj_adv_v) + 1e-8)

            clipfracs = []
            # 开始进行PPO的迭代（近端策略优化）
            for epoch in range(PPO_EPOCHES):
                for batch_ofs in range(0, len(trajectory), PPO_BATCH_SIZE):
                    states_v = traj_states_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    actions_v = traj_actions_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE].unsqueeze(-1)
                    batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_old_logprob_v = traj_logprobs[batch_ofs:batch_ofs + PPO_BATCH_SIZE]
                    batch_old_values = old_values[batch_ofs:batch_ofs + PPO_BATCH_SIZE]

                    _, newlogprob, entropy, newvalue = net_rpo(states_v, actions_v)
                    logratio = newlogprob - batch_old_logprob_v
                    ratio = torch.exp(logratio)

                    with torch.no_grad():
                        old_approx_kl = (-logratio).mean()
                        approx_kl = ((ratio - 1) - logratio).mean()
                        clipfracs += [((ratio - 1.0).abs() > 0.2).float().mean().item()]

                    pg_loss1 = -batch_adv_v * ratio
                    pg_loss2 = -batch_adv_v * torch.clamp(ratio, 1.0 - PPO_EPS, 1.0 + PPO_EPS)
                    pg_loss = torch.max(pg_loss1, pg_loss2).mean()

                    newvalue = newvalue.view(-1)
                    if CLIP_VLOSS:
                        v_loss_unclipped = (newvalue - batch_ref_v) ** 2
                        v_clipped = batch_old_values + torch.clamp(newvalue - batch_old_values, -PPO_EPS, PPO_EPS)
                        v_loss_clipped = (v_clipped - batch_ref_v) ** 2
                        v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
                        v_loss = 0.5 * v_loss_max.mean()
                    else:
                        v_loss = 0.5 * ((newvalue - batch_ref_v) ** 2).mean()

                    entropy_loss = entropy.mean()
                    loss = pg_loss - ENT_COEF * entropy_loss + v_loss * VF_COEF

                    opt_rpo.zero_grad()
                    loss.backward()
                    nn.utils.clip_grad_norm_(net_rpo.parameters(), MAX_GRAD_NORM)
                    opt_rpo.step()

                if TARGET_KL is not None:
                    if approx_kl > TARGET_KL:
                        break

            train_count += 1
            trajectory.clear()
            y_pred, y_true = old_values.cpu().numpy(), traj_ref_v.cpu().numpy()
            var_y = np.var(y_true)
            explained_var = np.nan if var_y == 0 else 1 - np.var(y_true - y_pred) / var_y

            # TRY NOT TO MODIFY: record rewards for plotting purposes
            writer.add_scalar("charts/learning_rate", opt_rpo.param_groups[0]["lr"], step_idx + frame_idx)
            writer.add_scalar("losses/value_loss", v_loss.item(), step_idx + frame_idx)
            writer.add_scalar("losses/policy_loss", pg_loss.item(), step_idx + frame_idx)
            writer.add_scalar("losses/entropy", entropy_loss.item(), step_idx + frame_idx)
            writer.add_scalar("losses/old_approx_kl", old_approx_kl.item(), step_idx + frame_idx)
            writer.add_scalar("losses/approx_kl", approx_kl.item(), step_idx + frame_idx)
            writer.add_scalar("losses/clipfrac", np.mean(clipfracs), step_idx + frame_idx)
            writer.add_scalar("losses/explained_variance", explained_var, step_idx + frame_idx)
            print("SPS:", int(step_idx + frame_idx / (time.time() - start_time)))
            writer.add_scalar("charts/SPS", int(step_idx + frame_idx / (time.time() - start_time)), step_idx + frame_idx)

            if train_count % TEST_ITERS == 0:
                ts = time.time()
                net_rpo.eval()
                rewards, steps = test_net(lambda x:net_rpo(x)[0], test_env, device=device)
                net_rpo.train()
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx)
                writer.add_scalar("test_steps", steps, step_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                        name = "best_%+.3f_%d.dat" % (rewards, step_idx)
                    best_reward = rewards
                    common.save_best_model(train_count, net_rpo.state_dict(), save_path, "rpo-", keep_best=10)
                checkpoint = {
                    "net_rpo": net_rpo.state_dict(),
                    "opt_rpo": opt_rpo.state_dict(),
                    "scheduler": scheduler.state_dict(),
                    "train_count": train_count,
                    "frame_idx": frame_idx + step_idx,
                }
                common.save_checkpoints(frame_idx, checkpoint, save_path, "rpo-")

