# 参考https://github.com/lutery/Hybrid-Action-PPO.git 实现 todo
# HardMove-v0
'''
待适配

'''
from email import policy
import gym  # 使用gym而不是gymnasium
import gym_hybrid  # 导入gym_hybrid以注册Sliding-v0环境
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp

import time
import yaml
import pathlib
import sys
import copy
import os
import pickle
from copy import deepcopy
from tensorboardX import SummaryWriter
import threading

import lib.common_hppo as common
import lib.model_hppo as model

import ale_py
from gym.wrappers.normalize import RunningMeanStd

class Trainer:
    def __init__(self, params, device):

        self.params = params
        self.device = device
        self.num_timesteps = 0
        self._total_timesteps = 0
        self.n_steps = params.get('n_steps', 1025)
        self.gae_lambda = params.get('gae_lambda', 0.95)
        self.gamma = params.get('gamma', 0.99)
        
        
        self.save_path = os.path.join("saves", "ppo-" + self.name)
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-ppo_" + self.name)

        self.build_env()
        self.build_model()
        self.build_buffer()
        self.load_trainer()


    def build_buffer(self):
        # 构建PPO代理
        self.agent = ptan.agent.PolicyAgent(self.net_act, device=self.device, apply_softmax=True)
        self.exp_source = ptan.experience.ExperienceSource(self.env, self.agent, steps_count=1)
        self.exp_buffer = common.HYRolloutBuffer(
            buffer_size=self.n_steps,
            observation_space=self.env.observation_space,
            action_space=self.env.action_space,
            device=self.device,
            gae_lambda=self.gae_lambda,
            gamma=self.gamma,
            n_envs=self.n_env
        )


    def build_env(self):
        # 创建训练和测试环境
        self.n_env = 1
        self.env = common.wrap_dqn("HardMove-v0")
        self.test_env = common.wrap_dqn("HardMove-v0")
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.shape
        self.action_disc = self.action_shape[0]
        self.action_con = self.action_shape[1]
        


    def build_model(self):
        # 创建动作预测网络
        self.net_act = model.PPOModelActor(self.env.observation_space.shape, self.env.action_space.n).to(self.device)
        # 创建状态、动作评价网络
        self.net_crt = model.PPOModelCritic(self.env.observation_space.shape).to(self.device)
        print(self.net_act)
        print(self.net_crt)

        self.opt_act = optim.Adam(self.net_act.parameters(), lr=self.learning_rate_actor)
        self.scheduler_act = optim.lr_scheduler.StepLR(self.opt_act, step_size=2000, gamma=0.9)
        self.opt_crt = optim.Adam(self.net_crt.parameters(), lr=self.learning_rate_critic)
        self.scheduler_crt = optim.lr_scheduler.StepLR(self.opt_crt, step_size=1500, gamma=0.9)


    def load_trainer(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
                self.net_act.load_state_dict(checkpoint['net_act'])
                self.net_crt.load_state_dict(checkpoint['net_crt'])
                self.opt_act.load_state_dict(checkpoint['opt_act'])
                self.opt_crt.load_state_dict(checkpoint['opt_crt'])
                self.frame_idx = checkpoint['frame_idx']
                self.train_count = checkpoint['train_count']
                self.scheduler_act.load_state_dict(checkpoint['scheduler_act'])
                self.scheduler_crt.load_state_dict(checkpoint['scheduler_crt'])
                print("加载模型成功")
                # 打印当前学习率
                print(f"Actor learning rate: {self.scheduler_act.get_last_lr()[0]}")
                print(f"Critic learning rate: {self.scheduler_crt.get_last_lr()[0]}")
                print(f"train_count: {self.train_count}")


    def save_trainer(self, step_idx):
        checkpoint = {
            "net_act": self.net_act.state_dict(),
            "net_crt": self.net_crt.state_dict(),
            "opt_act": self.opt_act.state_dict(),
            "opt_crt": self.opt_crt.state_dict(),
            "frame_idx": step_idx + self.frame_idx,
            "train_count": self.train_count,
            "scheduler_act": self.scheduler_act.state_dict(),
            "scheduler_crt": self.scheduler_crt.state_dict(),
        }
        common.save_checkpoints(self.train_count, checkpoint, self.save_path, "ppo", keep_last=5)


    @staticmethod
    def calc_adv_ref(trajectory, net_crt, states_v, device="cpu", gamma=0.99, gae_lambda=0.95):
        """
        By trajectory calculate advantage and 1-step ref value
        通过轨迹计算优势和1步参考值
        :param trajectory: trajectory list 收集的连续采样记录
        :param net_crt: critic network 评价网络
        :param states_v: states tensor 状态张量
        :return: tuple with advantage numpy array and reference values
        """
        with torch.no_grad():
            values_v = net_crt(states_v) # 得到预测的Q值
        values = values_v.squeeze().data.cpu().numpy()
        # generalized advantage estimator: smoothed version of the advantage
        # 广义优势估计量:优势的平滑版
        last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
        # 这里会将未来的优势获取的情况考虑在内
        result_adv = [] # 存储动作的优势值
        result_ref = [] # 存储实际的Q值
        # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
        # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
        # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
        for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                        reversed(trajectory[:-1])):
            if exp.done:
                # 如果游戏的状态是结束的
                delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
                last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
            else:
                # 如果游戏的状态不是结束的
                # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
                delta = exp.reward + gamma * next_val - val
                # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
                # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
                # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
                # 的和
                # 这步体现了累计的优势，即当前获得的优势和之后都有关系
                last_gae = delta + gamma * gae_lambda * last_gae
            result_adv.append(last_gae)
            result_ref.append(last_gae + val)

        # 这里的逆序的作用
        # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
        # ref_v保存的好像是实际Q值
        adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
        ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
        return adv_v, ref_v


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                for step_idx, exp in enumerate(self.exp_source):
                    self.frame_idx += 1
                    rewards_steps = self.exp_source.pop_rewards_steps()
                    if rewards_steps:
                        # 记录当前的训练进度并判断是否达到了奖励目标
                        rewards, steps = zip(*rewards_steps)
                        tb_tracker.track("episode_steps", steps[0], self.frame_idx)
                        tracker.reward(rewards[0], self.frame_idx)

                    self.trajectory.append(exp)
                    if len(self.trajectory) < self.trajectory_size:
                        continue

                    self.__train_trainer(self.trajectory, step_idx=step_idx)
                    self.trajectory.clear()

                    if self.train_count % self.eval_freq == 0:
                        self.__test_trainer()

                    if self.train_count % self.save_freq == 0:
                        self.save_trainer(step_idx=step_idx)


    def __train_trainer(self, trajectory, step_idx):
        traj_states = [t[0].state for t in trajectory]
        traj_actions = [t[0].action for t in trajectory]
        traj_states_v = torch.FloatTensor(np.array(traj_states)).to(self.device)
        traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(self.device)
        # 计算优势值和实际Q值
        traj_adv_v, traj_ref_v = Trainer.calc_adv_ref(trajectory, self.net_crt, traj_states_v, device=self.device, gamma=self.gamma, gae_lambda=self.gae_lambda)
        # 根据状态预测动作
        with torch.no_grad():
            mu_v = F.softmax(self.net_act(traj_states_v), dim=1)
        # 计算上一轮训练的评价网络、动作网络动作的概率
        old_logprob_v = torch.log(mu_v.gather(1, torch.tensor(traj_actions, dtype=torch.int64).to(self.device).unsqueeze(-1))).detach()

        # normalize advantages 归一化计算得到的Q值 作用是提高训练的稳定性
        traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / (torch.std(traj_adv_v) + 1e-8)

        # drop last entry from the trajectory, an our adv and ref value calculated without it
        # 这里的作用，为什么截取去除最后一个 P316
        # 可能是因为要和traj_adv_v和traj_ref_v一一对应，因为里面在计算时，是
        # 去掉最后一个的
        trajectory = trajectory[:-1]
        # 这里获取的是从[0到-1)范围的数据
        old_logprob_v = old_logprob_v[:-1].detach()

        sum_loss_value = 0.0
        sum_loss_policy = 0.0
        count_steps = 1
        old_ratio_v_mean = 0
        is_interrupt = False

        # 开始进行PPO的迭代（近端策略优化）
        for epoch in range(self.ppo_epoches):
            for batch_ofs in range(0, len(trajectory), self.ppo_batch_size):
                states_v = traj_states_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                actions_v = traj_actions_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + self.ppo_batch_size].unsqueeze(-1)
                batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_old_logprob_v = old_logprob_v[batch_ofs:batch_ofs + self.ppo_batch_size]

                # critic training
                # 这边就是在计算预测Q值和实际Q值之间的差异损失
                self.opt_crt.zero_grad()
                value_v = self.net_crt(states_v)
                if torch.isnan(value_v).any() or torch.isinf(value_v).any():
                    print(f"Warning: NaN or inf detected in value_v at step {step_idx + self.frame_idx}")
                    raise ValueError("NaN or inf detected in value_v") 
                loss_value_v = F.mse_loss(value_v.squeeze(-1), batch_ref_v)
                loss_value_v.backward()
                nn_utils.clip_grad_norm_(self.net_crt.parameters(), self.clip_grad)
                self.opt_crt.step()

                # actor training
                self.opt_act.zero_grad()
                new_policy_logits = self.net_act(states_v)
                if torch.isnan(new_policy_logits).any() or torch.isinf(new_policy_logits).any():
                    print(f"Warning: NaN or inf detected in new_policy_logits at step {step_idx + self.frame_idx}")
                    raise ValueError("NaN or inf detected in new_policy_logits") 
                
                # todo 如果修改后也不生效则尝试使用Pytorch内置的熵计算函数
                new_policy = F.softmax(new_policy_logits, dim=1)
                new_log_policy = F.log_softmax(new_policy_logits, dim=1)
                entropy_loss = -torch.mean(torch.sum(new_policy * new_log_policy, dim=1))
                # 计算预测执行动作的高斯概率
                indices = actions_v.long().to(self.device).unsqueeze(-1)
                logprob_pi_v = torch.log(new_policy.gather(1, indices) + 1e-7)

                ratio_v = torch.exp(logprob_pi_v - batch_old_logprob_v)
                if abs(ratio_v.mean().item() - old_ratio_v_mean) > 100:
                    self.opt_act.zero_grad()
                    is_interrupt = True
                    print("epoch %d, batch_ofs %d, ratio_v mean changed too much: %.2f -> %.2f, interrupting training" % (
                        epoch, batch_ofs, old_ratio_v_mean, ratio_v.mean().item()))
                    break
                old_ratio_v_mean = ratio_v.mean().item()

                # ratio_v的作用
                # 用于计算新旧策略之间的比例，这个比例用于计算新旧策略之间的差异
                # 根据这个差异调整网络的参数，使其能够往更好的方向调整
                # batch_adv_v对应书中P317中的At
                # ratio_v对应书中的rt(theta)
                # torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS)对应书中的clip
                surr_obj_v = batch_adv_v * ratio_v

                clipped_surr_v = batch_adv_v * torch.clamp(ratio_v, 1.0 - self.ppo_eps, 1.0 + self.ppo_eps)

                loss_policy_v = -torch.min(surr_obj_v, clipped_surr_v).mean() - 0.01 * entropy_loss
                # # Replace with conditional selection logic
                # condition = surr_obj_v < 0
                # # For negative values: choose larger one (less negative) to minimize loss
                # # For positive values: choose smaller one to minimize loss
                # selected_surr = torch.where(
                #     condition,
                #     torch.max(surr_obj_v, clipped_surr_v),  # For negative values: larger one (less negative)
                #     torch.min(surr_obj_v, clipped_surr_v)   # For positive values: smaller one
                # )
                # policy_loss = -selected_surr.mean()
                # loss_policy_v = policy_loss - 0.01 * entropy_loss
                loss_policy_v.backward()
                nn_utils.clip_grad_norm_(self.net_act.parameters(), self.clip_grad)
                self.opt_act.step()

                # 记录总损失，用于计算平均损失变化
                sum_loss_value += loss_value_v.item()
                sum_loss_policy += loss_policy_v.item()
                count_steps += 1
            if is_interrupt:
                break

        trajectory.clear()
        self.writer.add_scalar("advantage", traj_adv_v.mean().item(), step_idx + self.frame_idx)
        self.writer.add_scalar("values", traj_ref_v.mean().item(), step_idx + self.frame_idx)
        self.writer.add_scalar("loss_policy", sum_loss_policy / count_steps, step_idx + self.frame_idx)
        self.writer.add_scalar("loss_value", sum_loss_value / count_steps, step_idx + self.frame_idx)
        self.train_count += 1
        # self.scheduler_act.step()
        # self.scheduler_crt.step()


    def __test_trainer(self):
        """单进程版本的测试方法"""
        ts = time.time()
        rewards, steps = self.test_net(count=10, device=self.device, net=self.net_act, env=self.test_env)
        print("Train Count %d, Test done in %.2f sec, reward %.3f, steps %d" % (self.train_count,
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.frame_idx)
        self.writer.add_scalar("test_steps", steps, self.frame_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
            
            checkpoints = {
                "actor_model": self.net_act.state_dict(),
            }
            common.save_best_model(rewards, checkpoints, self.save_path, 'ppo_best_' + str(steps) + '_')


    @staticmethod
    @torch.no_grad()
    def test_net(count, device, net, env):
        '''
        count: 执行游戏的次数（每次都是执行到游戏结束）

        return: （平均奖励，平均步数）
        '''
        rewards = 0.0
        steps = 0
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                mu_v = net(obs_v)
                action = mu_v.squeeze(dim=0).data.cpu().argmax().item()
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
        return rewards / count, steps / count



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default='acrobot', help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # 从配置文件加载参数
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'conf/ppo_configs.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # 从命令行更新参数
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)

    trainer = Trainer(params, device)
    trainer.train()

