#!/usr/bin/env python3
'''
已完成整体代码的调整，需要确认参数是否匹配、待调试
1. https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/ppo_atari_lstm.py
2. https://docs.cleanrl.dev/rl-algorithms/ppo-trxl


训练记录：
在2号机上训练
20250513: 无法收敛，调整代码
'''
import os
import sys
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
import yaml
import pathlib

from lib import model_ppo_lstm as model, common_ppo_lstm as common

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)


class Trainer:
    def __init__(self, params, device):

        self.params = params
        self.learning_rate = params['learning_rate']
        self.trajectory_size = params['trajectory_size']
        self.test_iters = params['test_iters']
        self.ppo_epoches = params['ppo_epoches']
        self.ppo_batch_size = params['ppo_batch_size']
        self.target_kl = params['target_kl']
        self.clip_grad = params['clip_grad']
        self.clip_vloss = params['clip_vloss']
        self.clip_coef = params['clip_coef']
        self.ent_coef = params['ent_coef']
        self.vf_coef = params['vf_coef']
        self.gamma = params['gamma']
        self.gae_lambda = params['gae_lambda']
        self.num_iterations = params['num_iterations']
        self.anneal_lr = params['anneal_lr']
        self.start_idx = 0
        self.old_ratio_v_mean = 0
        self.grad_index = 0
        self.train_frame_idx = 0
        self.best_reward = None

        self.device = device
        
        self.save_path = os.path.join("saves", "ppo-lstm-" + self.params['name'])
        os.makedirs(self.save_path, exist_ok=True)

        self.build_env()
        self.build_model()
        self.build_buffer()

    
    def build_buffer(self):
            
        self.writer = SummaryWriter(comment="-ppo-lstm-" + self.params['name'])
        # 构建DDPG代理 todo 这里只能一个环境，否则会出现问题，todo：后续看看如何增加实现多环境
        self.agent = model.DQNLstmAgent(self.net_ppo, num_envs=1, device=self.device, preprocessor=common.ppo_states_preprocessor)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.agent, steps_count=1)

    def sample_action(self, obs):
        '''
        动作随机采集
        '''
        with torch.no_grad():
            _, pi, _, _ = self.actor(obs, compute_log_pi=False)
            return pi


    def build_env(self):
        # todo dreamerv1可以不使用多帧堆叠，尝试一下
        self.env = common.wrap_dqn(gym.make('CartPole-v1', render_mode="rgb_array"))
        self.test_env = common.wrap_dqn(gym.make('CartPole-v1', render_mode="rgb_array"))
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.shape


    def build_model(self):
        self.net_ppo = model.ModelPPO(self.env.observation_space.shape, self.env.action_space.n).to(device)
        self.opt_ppo = optim.Adam(self.net_ppo.parameters(), lr=self.learning_rate)
        # After creating the optimizer
        self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.opt_ppo, lr_lambda=lambda iteration: 1.0 - (iteration / self.num_iterations))

        print(self.net_ppo)


    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
                self.opt_ppo.load_state_dict(checkpoint['opt_ppo'])
                self.net_ppo.load_state_dict(checkpoint['net_ppo'])
                self.start_idx = checkpoint['start_idx']
                self.old_ratio_v_mean = checkpoint['old_ratio_v_mean']
                self.grad_index = checkpoint['grad_index']
                self.train_frame_idx = checkpoint['train_frame_idx']
                self.scheduler.load_state_dict(checkpoint['scheduler'])
                print("加载模型成功")
                        # 打印学习率
                print("Learning Rate:", self.opt_ppo.param_groups[0]['lr'])
                print("train_frame_idx:", self.train_frame_idx)
                print("scheduler epoch:", self.scheduler.last_epoch)
                # 修改学习率调度器step_size
                self.scheduler.step_size = 10000

            print("加载模型成功")


    def save_model(self, step_idx=0):
        checkpoints = {
            'net_ppo': self.net_ppo.state_dict(),
            'opt_ppo': self.opt_ppo.state_dict(),
            'start_idx': self.start_idx + step_idx,
            'old_ratio_v_mean': self.old_ratio_v_mean,
            'grad_index': self.grad_index,
            'train_frame_idx': self.train_frame_idx,
            'scheduler': self.scheduler.state_dict()
        }
        common.save_checkpoints(self.train_frame_idx, checkpoints, self.save_path, "ppo", keep_last=3)


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                trajectory = [] # 注意，缓冲区更名为轨迹
                initial_lstm_state = self.agent.clone_next_lstm_state()

                for step_idx, exp in enumerate(self.exp_source):
                    rewards_steps = self.exp_source.pop_rewards_steps()
                    if rewards_steps:
                        rewards, steps = zip(*rewards_steps)
                        self.writer.add_scalar("episode_steps", np.mean(steps), step_idx + self.start_idx)
                        tracker.reward(np.mean(rewards), step_idx + self.start_idx)

                    trajectory.append(exp)
                    if len(trajectory) < self.trajectory_size:
                        continue

                    self.__train_model(trajectory=trajectory, initial_lstm_state=initial_lstm_state, step_idx=step_idx)

                    trajectory.clear()
                    initial_lstm_state = self.agent.clone_next_lstm_state()

                    if step_idx % self.test_iters == 0:
                        self.__test_model(step_idx=step_idx)

                    self.save_model(step_idx=step_idx)


    def calc_adv_ref(self, trajectory, values_v, device="cpu"):
        """
        By trajectory calculate advantage and 1-step ref value
        通过轨迹计算优势和1步参考值
        :param trajectory: trajectory list 收集的连续采样记录
        :param net_crt: critic network 评价网络
        :return: tuple with advantage numpy array and reference values
        """
        # generalized advantage estimator: smoothed version of the advantage
        # 广义优势估计量:优势的平滑版
        last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
        # 这里会将未来的优势获取的情况考虑在内
        result_adv = [] # 存储动作的优势值
        result_ref = [] # 存储实际的Q值
        # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
        # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
        # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
        for val, next_val, (exp,) in zip(reversed(values_v[:-1]), reversed(values_v[1:]),
                                        reversed(trajectory[:-1])):
            if exp.done:
                # 如果游戏的状态是结束的
                delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
                last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
            else:
                # 如果游戏的状态不是结束的
                # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
                delta = exp.reward + self.gamma * next_val - val
                # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
                # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
                # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
                # 的和
                # 这步体现了累计的优势，即当前获得的优势和之后都有关系
                last_gae = delta + self.gamma * self.gae_lambda * last_gae
            result_adv.append(last_gae)
            result_ref.append(last_gae + val)

        # 这里的逆序的作用
        # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
        # ref_v保存的好像是实际Q值
        adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
        ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
        return adv_v, ref_v


    def __train_model(self, trajectory, initial_lstm_state, step_idx=0):
        # 这里之所以会需要使用
        traj_states, traj_actions, traj_done, traj_values, traj_logprobs = zip(*[
            (t[0][0], t[0][1], t[0][3], t[0][5][1], t[0][5][0]) for t in trajectory
        ])
        traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
        traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
        traj_done_v = torch.FloatTensor(np.array(traj_done)).to(device)
        traj_values_v = torch.FloatTensor(np.array(traj_values)).to(device)
        traj_logprobs_v = torch.FloatTensor(np.array(traj_logprobs)).to(device)
        # 计算优势值和实际Q值
        traj_adv_v, traj_ref_v = self.calc_adv_ref(trajectory, traj_values_v, device=device)
        # 根据状态预测动作
        # 计算上一轮训练的评价网络、动作网络动作的概率
        old_logprob_v = traj_logprobs_v

        # normalize advantages 归一化计算得到的Q值 作用是提高训练的稳定性
        traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / torch.std(traj_adv_v)

        # drop last entry from the trajectory, an our adv and ref value calculated without it
        # 这里的作用，为什么截取去除最后一个 P316
        # 可能是因为要和traj_adv_v和traj_ref_v一一对应，因为里面在计算时，是
        # 去掉最后一个的
        trajectory = trajectory[:-1]
        # 这里获取的是从[0到-1)范围的数据
        old_logprob_v = old_logprob_v[:-1].detach()

        sum_loss = 0.0
        count_steps = 1

        clipfracs = []
        approx_kl = []
        old_approx_kl = []
        # 开始进行PPO的迭代（近端策略优化）
        for epoch in range(self.ppo_epoches):
            for batch_ofs in range(0, len(trajectory), self.ppo_batch_size):
                states_v = traj_states_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                actions_v = traj_actions_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + self.ppo_batch_size].unsqueeze(-1)
                batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_old_logprob_v = old_logprob_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_values_v = traj_values_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_done_v = traj_done_v[batch_ofs:batch_ofs + self.ppo_batch_size]

                self.opt_ppo.zero_grad()
                # 这边就是在计算预测Q值和实际Q值之间的差异损失
                _, newlogprob, entropy, newvalue, _ = self.net_ppo(states_v, initial_lstm_state, batch_done_v, actions_v)
                logratio = newlogprob - batch_old_logprob_v
                ratio = logratio.exp()

                with torch.no_grad():
                    old_approx_kl = (-logratio).mean()
                    old_approx_kl.append(old_approx_kl.item())
                    approx_kl = ((ratio - 1) - logratio).mean()
                    approx_kl.append(approx_kl.item())
                    clipfracs += [((ratio - 1.0).abs() > self.clip_coef).float().mean().item()]

                pg_loss1 = -batch_adv_v * ratio
                pg_loss2 = -batch_adv_v * torch.clamp(ratio, 1 - self.clip_coef, 1 + self.clip_coef)
                pg_loss = torch.max(pg_loss1, pg_loss2).mean()

                newvalue = newvalue.view(-1)
                if self.clip_vloss:
                    v_loss_unclipped = (newvalue - batch_ref_v) ** 2
                    v_clipped = batch_values_v + torch.clamp(
                        newvalue - batch_values_v,
                        -self.clip_coef,
                        self.clip_coef
                    )

                    v_loss_clipped = (v_clipped - batch_ref_v) ** 2
                    v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
                    v_loss = 0.5 * torch.mean(v_loss_max)
                else:
                    v_loss = 0.5 * torch.mean((newvalue - batch_ref_v) ** 2)

                entropy_loss = entropy.mean()
                loss = pg_loss - self.ent_coef * entropy_loss + v_loss + self.vf_coef

                self.opt_ppo.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.net_ppo.parameters(), self.clip_grad)
                self.opt_ppo.step()
                # 记录总损失，用于计算平均损失变化
                sum_loss += loss.item()
                count_steps += 1
                self.grad_index += 1
            if self.target_kl is not None and approx_kl > self.target_kl:
                break
        
        self.train_frame_idx += 1
        if self.anneal_lr:
            self.scheduler.step()
        self.writer.add_scalar("advantage", traj_adv_v.mean().item(), step_idx + self.start_idx)
        self.writer.add_scalar("values", traj_ref_v.mean().item(), step_idx + self.start_idx)
        self.writer.add_scalar("sum_loss", sum_loss / count_steps, step_idx + self.start_idx)
        self.writer.add_scalar("approx_kl", approx_kl.mean().item(), step_idx + self.start_idx)
        self.writer.add_scalar("old_approx_kl", old_approx_kl.mean().item(), step_idx + self.start_idx)


    @torch.no_grad()
    def __test_net(self, net, env, count, device):
        rewards = 0.0
        steps = 0
        for _ in range(count):
            noop_action_count = 0
            pre_action = -1
            obs, _ = env.reset()
            next_lstm_state = (
                torch.zeros(net.lstm.num_layers, 1, net.lstm.hidden_size).to(device),
                torch.zeros(net.lstm.num_layers, 1, net.lstm.hidden_size).to(device),
            )
            while True:
                obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
                mu_v, _, _, _, next_lstm_state= net(obs_v, next_lstm_state)

                action = mu_v.cpu().item()

                if action == 0 and pre_action == action:  # Noop
                    noop_action_count += 1
                    if noop_action_count > 30:
                        break
                else:
                    noop_action_count = 0
                pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
        return rewards / count, steps / count


    @torch.no_grad()
    def __test_model(self, step_idx=0):
         # 测试并保存最好测试结果的庶数据
        ts = time.time()
        self.net_ppo.eval()
        rewards, steps = self.__test_net(net=self.net_ppo, env=self.test_env, count=10, device=device)
        self.net_ppo.train()
        print("Train Count %d, Test done in %.2f sec, reward %.3f, steps %d" % (self.train_frame_idx,
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, step_idx + self.start_idx)
        self.writer.add_scalar("test_steps", steps, step_idx + self.start_idx)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards

        common.save_best_model(rewards, self.net_ppo.state_dict(), self.save_path, f"ppo-best-{self.train_frame_idx}", keep_best=10)


        


if __name__ == "__main__":
    torch.set_default_dtype(torch.float32)
    np.set_printoptions(precision=8)
    np_float32 = np.float32

    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="cartpole", help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/cartpole_configs_ppo_lstm.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)


    trainer = Trainer(params, device)
    trainer.load_model()
    trainer.train()
