"""
@author: Viet Nguyen <nhviet1009@gmail.com>

参考链接：https://github.com/vietnh1009/Super-mario-bros-PPO-pytorch/blob/master/src/env.py#L136

已适配，需要安装nes-py和gym-super-mario-bros
在主力机上训练
训练记录：
20241227:Episode: 1957. Total loss: 3.423954963684082，测试显示已久无法过第一个怪物
20241228:Episode: 2015. Total loss: 3.3431591987609863,测试显示一直无法过第一个怪物，停止训练找到原因
"""

import os

os.environ['OMP_NUM_THREADS'] = '1'
import argparse
import torch
from lib.env import MultipleEnvironments
from lib.model import PPO
from lib.process import eval
import torch.multiprocessing as _mp
from torch.distributions import Categorical
import torch.nn.functional as F
import lib.common as common
import numpy as np
import shutil
import torch.multiprocessing as mp


def get_args():
    parser = argparse.ArgumentParser(
        """Implementation of model described in the paper: Proximal Policy Optimization Algorithms for Super Mario Bros""")
    parser.add_argument("--world", type=int, default=1)
    parser.add_argument("--stage", type=int, default=1)
    parser.add_argument("--action_type", type=str, default="simple")
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--gamma', type=float, default=0.9, help='discount factor for rewards')
    parser.add_argument('--tau', type=float, default=1.0, help='parameter for GAE')
    parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
    parser.add_argument('--epsilon', type=float, default=0.2, help='parameter for Clipped Surrogate Objective')
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--num_epochs', type=int, default=10)
    parser.add_argument("--num_local_steps", type=int, default=512)
    parser.add_argument("--num_global_steps", type=int, default=5e6)
    parser.add_argument("--num_processes", type=int, default=8)
    parser.add_argument("--save_interval", type=int, default=50, help="Number of steps between savings")
    parser.add_argument("--max_actions", type=int, default=200, help="Maximum repetition steps in test phase")
    parser.add_argument("--log_path", type=str, default="tensorboard/ppo_super_mario_bros")
    parser.add_argument("--saved_path", type=str, default="trained_models")
    parser.add_argument('--cuda', action='store_true', default=False, help='enables CUDA training')
    args = parser.parse_args()
    return args


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


def train(opt):
    save_path = os.path.join("saves", "third-ppo-super-mario")
    os.makedirs(save_path, exist_ok=True)

    if torch.cuda.is_available():
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)
    if os.path.isdir(opt.log_path):
        shutil.rmtree(opt.log_path)
    os.makedirs(opt.log_path)
    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)
    mp = _mp.get_context("spawn")
    envs = MultipleEnvironments(opt.world, opt.stage, opt.action_type, opt.num_processes)
    model = PPO(envs.num_states, envs.num_actions)
    if torch.cuda.is_available():
        model.cuda()
    model.share_memory()
    process = mp.Process(target=eval, args=(opt, model, envs.num_states, envs.num_actions))
    process.start()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    [agent_conn.send(("reset", None)) for agent_conn in envs.agent_conns]
    curr_states = [states[0] for states in [agent_conn.recv() for agent_conn in envs.agent_conns]]
    curr_states = torch.from_numpy(np.concatenate(curr_states, 0))
    if torch.cuda.is_available():
        curr_states = curr_states.cuda()
    curr_episode = 0
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location="cuda" if torch.cuda.is_available() else "cpu", weights_only=False)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        curr_episode = checkpoint['epoch']
        print("加载模型成功")
        print("Learning Rate:", optimizer.param_groups[0]['lr'])

    while True:
        # if curr_episode % opt.save_interval == 0 and curr_episode > 0:
        #     torch.save(model.state_dict(),
        #                "{}/ppo_super_mario_bros_{}_{}".format(opt.saved_path, opt.world, opt.stage))
        #     torch.save(model.state_dict(),
        #                "{}/ppo_super_mario_bros_{}_{}_{}".format(opt.saved_path, opt.world, opt.stage, curr_episode))
        curr_episode += 1
        old_log_policies = []
        actions = []
        values = []
        states = []
        rewards = []
        dones = []
        for _ in range(opt.num_local_steps):
            states.append(curr_states)
            logits, value = model(curr_states)
            values.append(value.squeeze())
            policy = F.softmax(logits, dim=1)
            old_m = Categorical(policy)
            action = old_m.sample()
            actions.append(action)
            old_log_policy = old_m.log_prob(action)
            old_log_policies.append(old_log_policy)
            if torch.cuda.is_available():
                [agent_conn.send(("step", act)) for agent_conn, act in zip(envs.agent_conns, action.cpu())]
            else:
                [agent_conn.send(("step", act)) for agent_conn, act in zip(envs.agent_conns, action)]

            state, reward, done, trunc, info = zip(*[agent_conn.recv() for agent_conn in envs.agent_conns])
            done = done or trunc
            state = torch.from_numpy(np.concatenate(state, 0))
            if torch.cuda.is_available():
                state = state.cuda()
                reward = torch.cuda.FloatTensor(reward)
                done = torch.cuda.FloatTensor(done)
            else:
                reward = torch.FloatTensor(reward)
                done = torch.FloatTensor(done)
            rewards.append(reward)
            dones.append(done)
            curr_states = state

        _, next_value, = model(curr_states)
        next_value = next_value.squeeze()
        old_log_policies = torch.cat(old_log_policies).detach()
        actions = torch.cat(actions)
        values = torch.cat(values).detach()
        states = torch.cat(states)
        gae = 0
        R = []
        for value, reward, done in list(zip(values, rewards, dones))[::-1]:
            gae = gae * opt.gamma * opt.tau
            gae = gae + reward + opt.gamma * next_value.detach() * (1 - done) - value.detach()
            next_value = value
            R.append(gae + value)
        R = R[::-1]
        R = torch.cat(R).detach()
        advantages = R - values
        for i in range(opt.num_epochs):
            indice = torch.randperm(opt.num_local_steps * opt.num_processes)
            for j in range(opt.batch_size):
                batch_indices = indice[
                                int(j * (opt.num_local_steps * opt.num_processes / opt.batch_size)): int((j + 1) * (
                                        opt.num_local_steps * opt.num_processes / opt.batch_size))]
                logits, value = model(states[batch_indices])
                new_policy = F.softmax(logits, dim=1)
                new_m = Categorical(new_policy)
                new_log_policy = new_m.log_prob(actions[batch_indices])
                ratio = torch.exp(new_log_policy - old_log_policies[batch_indices])
                actor_loss = -torch.mean(torch.min(ratio * advantages[batch_indices],
                                                   torch.clamp(ratio, 1.0 - opt.epsilon, 1.0 + opt.epsilon) *
                                                   advantages[
                                                       batch_indices]))
                # critic_loss = torch.mean((R[batch_indices] - value) ** 2) / 2
                critic_loss = F.smooth_l1_loss(R[batch_indices], value.squeeze())
                entropy_loss = torch.mean(new_m.entropy())
                total_loss = actor_loss + critic_loss - opt.beta * entropy_loss
                optimizer.zero_grad()
                total_loss.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
                optimizer.step()
                checkpoint = {
                    'epoch': curr_episode,
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                }
                common.save_checkpoints(curr_episode, checkpoint, save_path, "third-ppo-super-mario", keep_last=5)
        print("Episode: {}. Total loss: {}".format(curr_episode, total_loss))


if __name__ == "__main__":
    mp.set_start_method('spawn')
    opt = get_args()
    train(opt)


'''
训练无法进步、越不过第一个怪物可能由以下原因导致：

1. 动作空间不合适  
   - 如果您的动作空间定义过于简单 (e.g., 只有左右移动，没有跳跃)，或动作不够丰富，可能导致无法学会跳过怪物。  
   - 建议检查 action_type 是否包含跳跃动作。

2. 回报设计不足  
   - 如果回报 (reward) 只根据通关或死亡来计算，可能过于稀疏，导致模型在早期无法获得正确的策略信号。  
   - 建议添加中间奖励，例如前进距离、踩到怪物、或到达新的区域。

3. 超参数设置不当  
   - 学习率过高或过低会影响收敛质量。可以尝试减小或增大学习率 (--lr)、GAE 参数 (--tau)、或熵系数 (--beta)。  
   - 训练批量大小 (--batch_size)、更新次数 (--num_epochs) 也会影响收敛。

4. 环境回合过短  
   - 如果环境回合限制太短 (e.g., max_actions 很小或因为死亡过早导致训练数据少)，模型无法得到足够的探索。  
   - 可尝试增大 max_actions 或在环境中加大生命值等。

5. 多进程问题  
   - 多进程间数据汇总或同步不当，可能导致学习效果不稳定。确保每个进程的数据都能正确汇总到全局。

6. 随机种子与观测预处理  
   - 检查是否正确预处理输入图像：是否转换为 float、是否裁剪/缩放；输入错误可能导致模型无法识别关键信息。  
   - 也可以尝试不同的随机种子，排除偶然性。

可尝试以下思路排查和改善：
• 确认动作空间包含跳跃、加速等必要动作。  
• 为部分关键行为（如消灭怪物、到达关卡中点等）提供小额奖励。  
• 调整学习率、熵系数、打折因子等超参数。  
• 打印或记录智能体做出的动作，查看是否有真正尝试跳跃或撞怪。  
• 加大训练时间或增大单次训练回合的步数。  

通过逐项排查，可找出无法越过怪物的主要原因并改进策略学习。
'''