#!/usr/bin/env python3
'''
完成适配，验证失败，模型存在问题
根据链接https://github.com/ayghri/TRPO-ATARI/blob/master/trpo.py修改
补充其他的trpo算法的连接：https://github.com/chainer/chainerrl、https://github.com/ikostrikov/pytorch-trpo/blob/master/main.py

训练记录：
在2号机上训练
20250201: 训练了一天，测试分数一直徘徊在15～16分，训练分数也未能突破30分，一直在徘徊，说明模型结果有问题，需要重新调整
'''
import os
import math
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter

from lib import model, trpo, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from typing import Any
import ale_py


gym.register_envs(ale_py)
ENV_ID = "CartPole-v1"
GAMMA = 0.99
GAE_LAMBDA = 0.95
NUM_ENVS = 16

TRAJECTORY_SIZE = 2049 # 对于信赖域策略优化来说，可以使用较长的缓冲区
LEARNING_RATE_CRITIC = 1e-3

TRPO_MAX_KL = 0.01
TRPO_DAMPING = 0.1

TEST_ITERS = 100

class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward /= 1000.0  # 缩放奖励
            if reward == 0:
                reward = 1

        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward = -self.life_loss_penalty
            self.previous_lives = current_lives

        return obs, reward, done, truncated, info


def wrap_dqn(ENV_ID):
    def thunk():
        env = gym.make(ENV_ID)
        return env
    return thunk


def test_net(net, env, count=10, device="cpu"):
    with torch.no_grad():
        rewards = 0.0
        steps = 0
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
                logits_v = net(obs_v)
                probs_v = F.softmax(logits_v, dim=1)
                action = probs_v.max(dim=1)[1].item()
                obs, reward, done, trunc, _ = env.step(action)
                rewards += reward
                steps += 1
                if done or trunc:
                    break
    return rewards / count, steps / count


def calc_logprob(logstd_v, actions_v):
    log_probs_v = F.log_softmax(logstd_v, dim=1)
    actions_v = actions_v.long().unsqueeze(-1)
    log_prob_v = log_probs_v.gather(1, actions_v).squeeze(-1)
    return log_prob_v


def calc_adv_ref(trajectory, net_crt, states_v, device="cpu"):
    # 对状态评估Q值
    values_v = net_crt(states_v)
    values = values_v.squeeze().data.cpu().numpy()
    # generalized advantage estimator: smoothed version of the advantage
    last_gae = 0.0
    result_adv = []
    result_ref = []
    # val 当前状态的Q值，到达的下一个状态的Q值，当前的的经验
    for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                     reversed(trajectory[:-1])):
        if exp.done:
            delta = exp.reward - val
            last_gae = delta
        else:
            delta = exp.reward + GAMMA * next_val - val
            last_gae = delta + GAMMA * GAE_LAMBDA * last_gae
        result_adv.append(last_gae)
        result_ref.append(last_gae + val)

    adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
    ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
    return adv_v, ref_v


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif args.cuda and torch.backends.mps.is_available():
        return torch.device("mps")
    return torch.device("cpu")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="battlezone", help="Name of the run")
    args = parser.parse_args()
    device = select_device(args)

    save_path = os.path.join("saves", "trpo-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    env = gym.vector.SyncVectorEnv([wrap_dqn(ENV_ID=ENV_ID) for _ in range(NUM_ENVS)])
    test_env = wrap_dqn(ENV_ID=ENV_ID)()

    # 创建动作预测网络
    net_act = model.TropActor(test_env.observation_space.shape[0], test_env.action_space.n).to(device)
    # 创建状态、动作评价网络
    net_crt = model.TrpoCritic(test_env.observation_space.shape[0]).to(device)
    print(net_act)
    print(net_crt)

    writer = SummaryWriter(comment="-trpo_" + args.name)
    agent = ptan.agent.PolicyAgent(net_act, apply_softmax=True, device=device)
    exp_source = ptan.experience.ExperienceSource(env, agent, steps_count=1, vectorized=True)

    opt_crt = optim.Adam(net_crt.parameters(), lr=LEARNING_RATE_CRITIC)

    start_idx = 0
    train_count = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        net_act.load_state_dict(checkpoint['net_act'])
        net_crt.load_state_dict(checkpoint['net_crt'])
        opt_crt.load_state_dict(checkpoint['opt_crt'])
        start_idx = checkpoint['start_idx']
        train_count = checkpoint['train_count']
        print("加载模型成功")
        print("学习率：", opt_crt.param_groups[0]['lr'])
        print("train_count:", train_count)

    trajectory = [] # 经验轨迹，不同之处在于这里序列必须是连续的，而不是随机采样的
    best_reward = None
    with ptan.common.utils.RewardTracker(writer) as tracker:
        for step_idx, exp in enumerate(exp_source):
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                writer.add_scalar("episode_steps", np.mean(steps), step_idx + start_idx)
                tracker.reward(np.mean(rewards), step_idx + start_idx)

            trajectory.append(exp)
            if len(trajectory) < TRAJECTORY_SIZE:
                continue

            traj_states = np.array([t[0].state for t in trajectory])
            traj_actions = np.array([t[0].action for t in trajectory])
            traj_states_v = torch.FloatTensor(traj_states).to(device)
            traj_actions_v = torch.FloatTensor(traj_actions).to(device)
            with torch.no_grad():
                traj_logits0_v = net_act(traj_states_v[:-1])
            traj_adv_v, traj_ref_v = calc_adv_ref(trajectory, net_crt, traj_states_v, device=device)
            # 预测执行的动作序列
            logits_v = net_act(traj_states_v)
            # 得到执行动作的概率
            old_logprob_v = calc_logprob(logits_v, traj_actions_v)

            # normalize advantages
            # 正则归一化优势
            traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / torch.std(traj_adv_v)

            # drop last entry from the trajectory, an our adv and ref value calculated without it
            trajectory = trajectory[:-1]
            old_logprob_v = old_logprob_v[:-1].detach()
            traj_states_v = traj_states_v[:-1]
            traj_actions_v = traj_actions_v[:-1]
            sum_loss_value = 0.0
            sum_loss_policy = 0.0
            count_steps = 0

            # critic step
            # 这边是对预测的Q值+GAE优势进行训练
            for _ in range(3):
                opt_crt.zero_grad()
                value_v = net_crt(traj_states_v)
                loss_value_v = F.mse_loss(value_v.squeeze(-1), traj_ref_v)
                loss_value_v.backward()
                opt_crt.step()

            # actor step
            def get_loss():
                '''
                获取损失
                获取执行动作获取优势的损失
                '''
                # 根据状态预测动作
                logits_v = net_act(traj_states_v)
                # 计算预测动作的概率
                logprob_v = calc_logprob(logits_v, traj_actions_v)
                # 这里使用的公式和公式P317一样，也是为了更加稳定采用了对比的方式
                # 通过对比的方式，可以知道调整的方向和原有的预测是相同方向还是相反
                # 方向
                # 为了能够达到最大的优化效果（在这里加了负号，所以是最小值），那么如果traj_adv_v是正的，那么torch.exe需要增加，那么logprob_v - old_logprob_v中，logprob_v就是往大于old_logprob_v方向的调整；反之则往反方向调整。使用这种方式作为损失函数的好处就是，能够明确知道往哪个方向调整梯度，而不是像之前的策略梯度，只能通过梯度的方向来调整，但是梯度的方向不一定是最优的方向，所以会导致训练不稳定
                action_loss_v = -traj_adv_v.unsqueeze(dim=-1) * torch.exp(logprob_v - old_logprob_v)
                return action_loss_v.mean()

            def get_kl():
                logits_v = net_act(traj_states_v)
                probs_v = F.softmax(logits_v, dim=-1)
                log_prob_v = F.log_softmax(logits_v, dim=-1)

                log_prob0_v = F.log_softmax(traj_logits0_v, dim=-1)

                kl_v = probs_v * (log_prob_v - log_prob0_v)
                return kl_v.sum(1, keepdim=True)

            trpo.trpo_step(net_act, get_loss, get_kl)

            trajectory.clear()
            writer.add_scalar("advantage", traj_adv_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("values", traj_ref_v.mean().item(), step_idx + start_idx)
            writer.add_scalar("loss_value", loss_value_v.item(), step_idx + start_idx)

            train_count += 1
            if train_count % TEST_ITERS == 0:
                ts = time.time()
                net_act.eval()
                rewards, steps = test_net(net_act, test_env, device=device)
                net_act.train()
                print("Test done in %.2f sec, reward %.3f, steps %d" % (
                    time.time() - ts, rewards, steps))
                writer.add_scalar("test_reward", rewards, step_idx + start_idx)
                writer.add_scalar("test_steps", steps, step_idx + start_idx)
                if best_reward is None or best_reward < rewards:
                    if best_reward is not None:
                        print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                    best_reward = rewards
                common.save_best_model(rewards, net_act.state_dict(), save_path, "trpo-best", keep_best=10)
                checkpoint = {
                    "net_act": net_act.state_dict(),
                    "net_crt": net_crt.state_dict(),
                    "opt_crt": opt_crt.state_dict(),
                    "start_idx": start_idx + step_idx,
                    "train_count": train_count,
                }
                common.save_checkpoints(train_count, checkpoint, save_path, "trpo", keep_last=5)

