#!/usr/bin/env python3
'''
待验证，参考链接：https://github.com/acyclics/MPO/blob/master/main.py
封装优化代码,注意参考链接的代码是线性空间，其写的部分代码耦合了线性空间
todo 后续适配参考链接的多环境并行代码

训练记录：
在2号机上训练
20250203:训练分数-8.61分，测试分数2分，继续训练
20250204: ACT学习率： [0.0003]，CRT学习率： [0.0003],训练分数-8.5分，测试分数2分，，继续训练一天
20250206：根据建议对代码进行了调整
20250324: 正在调整mpo算法的代码，完成调整后移植到pendulum环境中验证
'''
import os
import math
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from collections import deque
import ale_py
from torch.distributions import Categorical
from scipy.optimize import minimize


gym.register_envs(ale_py)

NUM_ENVS = 50
GAMMA = 0.999
EXT_ADV_COEFF = 2
INT_ADV_COEFF = 1
INIT_GAMMA = 0.99
INT_GAMMA = 0.99
EXT_GAMMA = 0.999
GAE_LAMBDA = 0.95 # 优势估计器的lambda因子，0.95是一个比较好的值

BUFFER_SIZE = 50000 # todo 作用 看代码好像是采样的轨迹长度（轨迹，也就是连续采样缓存长度，游戏是连续的）
LEARNING_RATE = 3e-4
TRAJECTORY_SIZE = 128

PPO_EPS = 0.2
PPO_EPOCHES = 10 # todo 执行ppo的迭代次数 作用
BATCH_SIZE = 128 # 每次进行轨迹样本计算的batch长度

TEST_ITERS = 10 # 采样迭代多少次，进行一次游戏测试

CLIP_GRAD = 0.5
CLIP_RANGE = 0.1

ANNEAL_LR = True

TOTAL_TIMESTEPS: int = 2000000000

MAX_GRAD_NORM = 0.5

TARGET_KL = None
PREDICTOR_PROPORTION = 32 / 1
ENT_COEF = 0.001
PRE_NORMALIZATION_STEPS = 50
ENV_ID = 'ALE/HumanCannonball-v5'
γ = 0.99
ε = 0.1
ε_kl = 0.01
α = 1.0
LAGRANGE_IT = 5


class StackFrameWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4):
        super().__init__(env)
        self.env = env
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)

        low = np.repeat(self.observation_space.low, n_frames, axis=2)
        high = np.repeat(self.observation_space.high, n_frames, axis=2)
        self.observation_space = gym.spaces.Box(low=low, high=high, dtype=self.observation_space.dtype)

        self.obs = []

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.n_frames):
            self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), info

    def step(self, action):
        obs, reward, terminated, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return np.concatenate(list(self.frames), axis=0), reward, terminated, truncated, info



class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)


def test_net(net, env, count=10, device="cpu"):
    with torch.no_grad():
        rewards = 0.0
        steps = 0
        same_action_count = 0
        pre_action = None
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                probs = net(obs_v)
                action = probs.squeeze(dim=0).data.cpu().argmax().item()
                if pre_action == action:
                    same_action_count += 1
                    if same_action_count > 100:
                        break
                else:
                    same_action_count = 0
                    pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
    return rewards / count, steps / count


def calc_adv_ref(trajectory, net_crt, states_v, device="cpu"):
    """
    By trajectory calculate advantage and 1-step ref value
    通过轨迹计算优势和1步参考值
    :param trajectory: trajectory list 收集的连续采样记录
    :param net_crt: critic network 评价网络
    :param states_v: states tensor 状态张量
    :return: tuple with advantage numpy array and reference values
    """
    with torch.no_grad():
        values_v = net_crt(states_v) # 得到预测的Q值
    values = values_v.squeeze().data.cpu().numpy()
    # generalized advantage estimator: smoothed version of the advantage
    # 广义优势估计量:优势的平滑版
    last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
    # 这里会将未来的优势获取的情况考虑在内
    result_adv = [] # 存储动作的优势值
    result_ref = [] # 存储实际的Q值
    # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
    # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
    # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            # 如果游戏的状态是结束的
            delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
            last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
        else:
            # 如果游戏的状态不是结束的
            # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
            delta = exp.reward + GAMMA * next_val - val
            # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
            # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
            # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
            # 的和
            # 这步体现了累计的优势，即当前获得的优势和之后都有关系
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    # 这里的逆序的作用
    # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
    # ref_v保存的好像是实际Q值
    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v

def ppo_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.array([np.array(s, copy=False) for s in states], copy=False)
    return torch.tensor(np_states.copy())


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)
    # 跳帧包装器
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = ptan.common.wrappers.FireResetEnv(env)
    env = common.ProcessFrame84(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = common.FrameStack(env, stack_frames)
    env = common.RewardPenaltyWrapper(env)
    return env


def wrap_dqn_sync(ENV_ID, stack_frames=4, episodic_life=True, reward_clipping=True):
    def thunk():
        env = gym.make(ENV_ID, frameskip=4, repeat_action_probability=0.0)
        if episodic_life:
            # 将多条生命的游戏模拟成单条生命ActorCriticAgent
            env = ptan.common.wrappers.EpisodicLifeEnv(env)
        # 增强初始化
        env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)
        # 跳帧包装器
        # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)

        if 'FIRE' in env.unwrapped.get_action_meanings():
            env = ptan.common.wrappers.FireResetEnv(env)
        env = common.ProcessFrame84(env)
        env = ptan.common.wrappers.ImageToPyTorch(env)
        env = common.FrameStack(env, stack_frames)
        env = common.RewardPenaltyWrapper(env)
        return env
    return thunk


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


def categorical_kl(p1, p2):
    p1 = torch.clamp_min(p1, 0.0001)
    p2 = torch.clamp_min(p2, 0.0001)
    return torch.mean((p1 * torch.log(p1 / p2)).sum(dim=-1))


def train_critic(states_v, actions_v, policies_v, rewards_v, dones_v, obs_shape, opt_crt, mpo_act_model, target_mpo_act_model, mpo_crt_model):
    state_last_v = states_v[-1]
    states_batch_v = states_v[:-1]
    actions_batch_v = actions_v[:-1]
    policies_batch_v = policies_v[:-1]
    rewards_batch_v = rewards_v[:-1]

    action_size = policies_batch_v.shape[-1]
    nsteps = states_batch_v.shape[0]
    n_envs = 1

    opt_crt.zero_grad()
    with torch.no_grad():
        policies, a_log_prob, _ = mpo_act_model.evaluate_action(states_batch_v, actions_batch_v.view(-1, 1))
        target_policies, _, _ = target_mpo_act_model.evaluate_action(states_batch_v, actions_batch_v.view(-1, 1))

    qval = mpo_crt_model(states_batch_v)
    val = (qval * policies).sum(1, keepdim=True)

    old_policies = policies_batch_v.view(-1, action_size)
    policies = policies.view(-1, action_size)
    target_policies = target_policies.view(-1, action_size)

    val = val.view(-1, 1)
    qval = qval.view(-1, action_size)
    a_log_prob = a_log_prob.view(-1, 1)
    actions = actions_batch_v.view(-1, 1)

    q_i = qval.gather(1, actions.long())
    rho = policies / (old_policies + 1e-10)
    rho_i = rho.gather(1, actions.long())

    with torch.no_grad():
        next_qval = mpo_crt_model(state_last_v.unsqueeze(0)).detach()
        next_policies = mpo_act_model.get_action_prob(state_last_v.unsqueeze(0)).detach()
        next_val = (next_qval * next_policies).sum(1, keepdim=True)

    q_retraces = rewards_batch_v.new(nsteps + 1, n_envs, 1).zero_()
    q_retraces[-1] = next_val

    # 增加reward scaling
    rewards_batch_v = (rewards_batch_v - rewards_batch_v.mean()) / (rewards_batch_v.std() + 1e-8)
    
    # 修改Q-retrace计算
    for step in reversed(range(nsteps)):
        q_ret = rewards_batch_v[step]
        for t in range(step + 1, nsteps):
            q_ret += (γ ** (t - step)) * rewards_batch_v[t] * (1 - dones_v[t])
        q_retraces[step] = q_ret

    q_retraces = q_retraces[:-1]
    q_retraces = q_retraces.view(-1, 1)

    # 增加L2 regularization
    q_loss = (q_i - q_retraces.detach()).pow(2).mean() * 0.5
    q_loss.backward()
    torch.nn.utils.clip_grad_norm_(mpo_crtitic_model.parameters(), 5.0)
    opt_crt.step()

    return q_loss.detach()



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default='humancannonball', help="Name of the run")
    args = parser.parse_args()
    device = select_device(args=args)

    save_path = os.path.join("saves", "mpo-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    # todo 后续增加多进程的环境代码，好像有一个参考链接是直接把ENV包装为多进程包装器
    env = wrap_dqn_sync(ENV_ID=ENV_ID, episodic_life=True)()
    test_env = wrap_dqn_sync(ENV_ID=ENV_ID, episodic_life=True)()

    # 创建动作预测网络
    mpo_act_model = model.ActorModel(env.observation_space.shape, env.action_space.n).to(device)
    target_mpo_act_model = ptan.agent.TargetNet(mpo_act_model)
    mpo_crtitic_model = model.CriticModel(env.observation_space.shape, env.action_space.n).to(device)
    target_mpo_crtitic_model = ptan.agent.TargetNet(mpo_crtitic_model)
    
    print(mpo_act_model)
    print(mpo_crtitic_model)

    writer = SummaryWriter(comment="-mpo-" + args.name)
    agent = model.MPOAgent(target_mpo_act_model.target_model, device=device)
    exp_source = ptan.experience.ExperienceSourceRAW(env, agent, steps_count=1)

    opt_act = optim.Adam(mpo_act_model.parameters(), lr=LEARNING_RATE, eps=1e-5)
    scheduler_act = optim.lr_scheduler.StepLR(opt_act, step_size=50000, gamma=0.9)
    opt_crt = optim.Adam(mpo_crtitic_model.parameters(), lr=LEARNING_RATE, eps=1e-5)
    scheduler_crt = optim.lr_scheduler.StepLR(opt_crt, step_size=50000, gamma=0.9)

    η = np.random.rand()
    η_kl = 0.0

    frame_idx = 0
    train_count = 0
    trajectory = []
    mean_q_loss = 0.0
    mean_policy = 0.0
    best_reward = None
    
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))

        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            mpo_act_model.load_state_dict(checkpoint['mpo_act_model'])
            mpo_crtitic_model.load_state_dict(checkpoint['mpo_crtitic_model'])
            target_mpo_act_model.target_model.load_state_dict(checkpoint['target_mpo_act_model'])
            target_mpo_crtitic_model.target_model.load_state_dict(checkpoint['target_mpo_crtitic_model'])
            opt_act.load_state_dict(checkpoint['opt_act'])
            opt_crt.load_state_dict(checkpoint['opt_crt'])
            scheduler_act.load_state_dict(checkpoint['scheduler_act'])
            scheduler_crt.load_state_dict(checkpoint['scheduler_crt'])
            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']
            η = checkpoint['η']
            η_kl = checkpoint['η_kl']

            print("加载模型成功")
            # 打印学习率
            print("ACT学习率：", scheduler_act.get_lr())
            print("CRT学习率：", scheduler_crt.get_lr())

    logger = common.setup_logger(save_path)
    def get_track_info(info):
        logger.info(info)

    with ptan.common.utils.RewardTracker(writer, info_callback=get_track_info) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), frame_idx + step_idx)
                tracker.reward(np.mean(rewards), frame_idx + step_idx)

            trajectory.append(exp)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue

            traj_states = [t[0][0] for t in trajectory]
            traj_actions = [t[0][1] for t in trajectory]
            traj_next_state = [t[0][4] for t in trajectory]
            traj_rewards = [t[0][2] for t in trajectory]
            traj_done = [t[0][3] for t in trajectory]
            traj_policies = [t[0][5][0] for t in trajectory]

            traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
            traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
            traj_next_state_v = torch.FloatTensor(np.array(traj_next_state)).to(device)
            traj_rewards_v = torch.FloatTensor(np.array(traj_rewards)).to(device)
            traj_done_v = torch.FloatTensor(np.array(traj_done)).to(device)
            traj_policies_v = torch.stack(traj_policies).to(device)


            # 训练q
            q_loss = train_critic(traj_states_v, traj_actions_v, traj_policies_v, traj_rewards_v, traj_done_v, env.observation_space.shape[0], opt_crt, mpo_act_model, target_mpo_act_model.target_model, mpo_crtitic_model)
            mean_q_loss += q_loss

            with torch.no_grad():
                actions = torch.arange(env.action_space.n)[..., None].to(device)
                b_p = target_mpo_act_model.target_model(traj_states_v)
                b = Categorical(probs=b_p)
                b_prob = b.expand((env.action_space.n, (TRAJECTORY_SIZE * 1))).log_prob(actions).exp()
                target_q = target_mpo_crtitic_model.target_model(traj_states_v)
                target_q = target_q.transpose(0, 1)
                b_prob_np = b_prob.cpu().numpy()
                target_q_np = target_q.cpu().numpy()

            def dual(η):
                max_q = np.max(target_q_np, 0)
                return η * ε + np.mean(max_q) + η * np.mean(np.log(np.sum(b_prob_np * np.exp((target_q_np - max_q) / η), axis=0)))
            
            bounds = [(1e-6, None)]
            res = minimize(dual, np.array([η]), method='SLSQP', bounds=bounds)
            η = res.x[0]

            qij = torch.softmax(target_q / η, dim=0)

            for _ in range(LAGRANGE_IT):
                π_p = mpo_act_model(traj_states_v)
                π = Categorical(probs=π_p)
                loss_p = torch.mean(qij * π.expand((env.action_space.n, (TRAJECTORY_SIZE * 1))).log_prob(actions))

                kl = categorical_kl(p1=π_p, p2=b_p)
                η_kl -= α * (ε_kl - kl).detach().item()

                if η_kl < 0:
                    η_kl = 0

                opt_act.zero_grad()
                loss_policy = -(loss_p + η_kl * (ε_kl - kl))
                loss_policy.backward()
                torch.nn.utils.clip_grad_norm_(mpo_act_model.parameters(), 5.0)
                opt_act.step()
                mean_policy += loss_policy.item()    


            target_mpo_act_model.alpha_sync(alpha=0.99)
            target_mpo_crtitic_model.alpha_sync(alpha=0.99)
            train_count += 1
            trajectory.clear()

            checkpoint = {
                'mpo_act_model': mpo_act_model.state_dict(),
                'mpo_crtitic_model': mpo_crtitic_model.state_dict(),
                'target_mpo_act_model': target_mpo_act_model.target_model.state_dict(),
                'target_mpo_crtitic_model': target_mpo_crtitic_model.target_model.state_dict(),
                'opt_act': opt_act.state_dict(),
                'opt_crt': opt_crt.state_dict(),
                'scheduler_act': scheduler_act.state_dict(),
                'scheduler_crt': scheduler_crt.state_dict(),
                'frame_idx': frame_idx,
                'train_count': train_count,
                'η': η,
                'η_kl': η_kl,
            }
            
            common.save_checkpoints(train_count, checkpoint, save_path, "mpo-")

            if True or train_count % TEST_ITERS == 0:
                ts = time.time()
                mpo_act_model.eval()
                rewards, steps = test_net(mpo_act_model, test_env, count=5, device=device)
                mpo_act_model.train()
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + frame_idx)
                writer.add_scalar("test_steps", steps, step_idx + frame_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                        name = "best_%+.3f_%d.dat" % (rewards, step_idx + frame_idx)
                        fname = os.path.join(save_path, name)
                    best_reward = rewards
                    common.save_best_model(rewards, mpo_act_model.state_dict(), save_path, 'mpo-best-')