#!/usr/bin/env python3
'''
完成适配
采用多进程收集数据、测试模型

训练记录：
20250506: 在2号机上训练，测试分数-500分，感觉不太行，调整训练结构继续
20250519：完成代码重新调整
20250709：再次调整代码，重新进行训练
20251103：待train_ppo_single.py完成验证后，再调整本代码
'''
import os
import yaml

import pathlib
import sys

import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter

from lib import model, common

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn.utils as nn_utils
import torch.multiprocessing as mp
import collections


# 创建一个可以给成员命名的元组，可以类似于类一样使用
TotalReward = collections.namedtuple('TotalReward', field_names=('reward', 'steps'))
TestInfo = collections.namedtuple('TestInfo', field_names=('net_state', 'train_count', 'frame_idx'))


class Trainer:
    def __init__(self, params, device):

        self.params = params
        self.gamma = self.params['gamma'] # Q值计算奖励衰减GAMMA系数
        self.gae_lambda = self.params['gae_lambda'] # 优势估计器的lambda因子，0.95是一个比较好的值
        self.trajectory_size = self.params['trajectory_size'] # todo 作用 看代码好像是采样的轨迹长度（轨迹，也就是连续采样缓存长度，游戏是连续的）
        self.learning_rate_actor = self.params['learning_rate_actor'] # 动作网络的学习率
        self.learning_rate_critic = self.params['learning_rate_critic'] # 评价网络的学习率
        self.ppo_eps = self.params['ppo_eps'] # PPO的epsilon值
        self.ppo_epoches = self.params['ppo_epoches'] # PPO的迭代次数
        self.ppo_batch_size = self.params['ppo_batch_size'] # PPO的批次大小
        self.test_iters = self.params['test_iters'] # 用于判断多少轮测试一次，更新保存一次网络
        self.clip_grad = self.params['clip_grad'] # 梯度裁剪的值
        self.device = device
        self.frame_idx = 1
        self.train_count = 500
        self.trajectory = [] # 注意，缓冲区更名为轨迹
        self.best_reward = float('-inf')
        self.save_freq = self.params['save_freq'] # 保存模型的频率
        self.eval_freq = self.params['eval_freq'] # 测试模型的频率
        self.data_queue_size = self.params['data_queue_size'] # 数据队列的大小
        self.name = self.params['name'] # 训练的名称
        
        self.save_path = os.path.join("saves", "ppo-" + self.name)
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-ppo_" + self.name)



        self.build_env()
        self.build_model()
        self.build_buffer()
        self.load_trainer()
        self.build_multiprocess()


    def build_multiprocess(self):
        self.train_queue = mp.Queue(maxsize=self.data_queue_size)
        self.state_queue = []
        self.data_proc_list = []
        for _ in range(self.data_queue_size):
            # 创建子进程，将net、目标设备、训练子进程队列传入子进程的参数中
            state_queue = mp.Queue(maxsize=1)
            self.state_queue.append(state_queue)
            data_proc = mp.Process(target=Trainer.data_func, args=(self.get_cpu_state_dict(), self.device, self.train_queue, state_queue, self.trajectory_size, self.env))
            data_proc.start()
            self.data_proc_list.append(data_proc)

        
        self.test_state_queue = mp.Queue(maxsize=1)
        self.test_mp = mp.Process(target=Trainer.test_trainer, args=(self.get_cpu_state_dict(), self.name, self.device, self.test_env, self.test_state_queue, self.save_path, self.train_count, self.frame_idx, self.best_reward))
        self.test_mp.start()

    
    def get_cpu_state_dict(self):
        # 获取cpu的网络参数
        state_dict = self.net_act.state_dict()
        for key in state_dict.keys():
            state_dict[key] = state_dict[key].cpu()
        return state_dict


    @staticmethod
    def data_func(net_state, device, queue, state_queue, trajectory_size, env):
        net_act = model.PPOModelActor(env.observation_space.shape, env.action_space.n).to(device)
        net_act.load_state_dict(net_state)
        agent = ptan.agent.PolicyAgent(net_act, device=device, apply_softmax=True)
        exp_source = ptan.experience.ExperienceSource(env, agent, steps_count=1)
        traj_data = []

        for exp in exp_source:
            rewards_steps = exp_source.pop_rewards_steps()
            if rewards_steps:
                # 记录当前的训练进度并判断是否达到了奖励目标
                rewards, steps = zip(*rewards_steps)
                queue.put(TotalReward(rewards[0], steps[0]))

            traj_data.append(exp)
            if len(traj_data) < trajectory_size:
                continue
            queue.put(traj_data)
            traj_data = []

            if not state_queue.empty():
                net_act.load_state_dict(state_queue.get())

    
    def build_buffer(self):
        # 构建DDPG代理
        self.agent = ptan.agent.PolicyAgent(self.net_act, device=self.device, apply_softmax=True)
        self.exp_source = ptan.experience.ExperienceSource(self.env, self.agent, steps_count=1)


    def build_env(self):
        # todo dreamerv1可以不使用多帧堆叠，尝试一下
        self.env = common.wrap_dqn_ppo(gym.make('Acrobot-v1', render_mode="rgb_array"))
        self.test_env = common.wrap_dqn_ppo(gym.make('Acrobot-v1', render_mode="rgb_array"))
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.shape


    def build_model(self):
            # 创建动作预测网络
        self.net_act = model.PPOModelActor(self.env.observation_space.shape, self.env.action_space.n).to(device)
        # 创建状态、动作评价网络
        self.net_crt = model.PPOModelCritic(self.env.observation_space.shape).to(device)
        print(self.net_act)
        print(self.net_crt)

        self.opt_act = optim.Adam(self.net_act.parameters(), lr=self.learning_rate_actor)
        self.scheduler_act = optim.lr_scheduler.StepLR(self.opt_act, step_size=2000, gamma=0.9)
        self.opt_crt = optim.Adam(self.net_crt.parameters(), lr=self.learning_rate_critic)
        self.scheduler_crt = optim.lr_scheduler.StepLR(self.opt_crt, step_size=1500, gamma=0.9)

        # 生命网络net为共享内存，share_memory必须在调用fork前调用
        # 这样net就可以很方便的在每个进程间使用了
        # 如果net设置的dev是cuda，那么网络张量默认的共享的，不用调用share_memory
        # 如果net设置的dev是cpu，那么网络张量是不共享的，需要调用share_memory
        # if self.device == "cpu":
        #     self.net_act.share_memory()
        


    def load_trainer(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=device, weights_only=False)
                self.net_act.load_state_dict(checkpoint['net_act'])
                self.net_crt.load_state_dict(checkpoint['net_crt'])
                self.opt_act.load_state_dict(checkpoint['opt_act'])
                self.opt_crt.load_state_dict(checkpoint['opt_crt'])
                self.frame_idx = checkpoint['frame_idx']
                self.train_count = checkpoint['train_count']
                self.scheduler_act.load_state_dict(checkpoint['scheduler_act'])
                self.scheduler_crt.load_state_dict(checkpoint['scheduler_crt'])
                print("加载模型成功")
                # 打印当前学习率
                print(f"Actor learning rate: {self.scheduler_act.get_last_lr()[0]}")
                print(f"Critic learning rate: {self.scheduler_crt.get_last_lr()[0]}")
                print(f"train_count: {self.train_count}")


    def save_trainer(self, step_idx):
        checkpoint = {
            "net_act": self.net_act.state_dict(),
            "net_crt": self.net_crt.state_dict(),
            "opt_act": self.opt_act.state_dict(),
            "opt_crt": self.opt_crt.state_dict(),
            "frame_idx": step_idx + self.frame_idx,
            "train_count": self.train_count,
            "scheduler_act": self.scheduler_act.state_dict(),
            "scheduler_crt": self.scheduler_crt.state_dict(),
        }
        common.save_checkpoints(self.train_count, checkpoint, self.save_path, "ppo", keep_last=5)


    @staticmethod
    def calc_adv_ref(trajectory, net_crt, states_v, device="cpu", gamma=0.99, gae_lambda=0.95):
        """
        By trajectory calculate advantage and 1-step ref value
        通过轨迹计算优势和1步参考值
        :param trajectory: trajectory list 收集的连续采样记录
        :param net_crt: critic network 评价网络
        :param states_v: states tensor 状态张量
        :return: tuple with advantage numpy array and reference values
        """
        with torch.no_grad():
            values_v = net_crt(states_v) # 得到预测的Q值
        values = values_v.squeeze().data.cpu().numpy()
        # generalized advantage estimator: smoothed version of the advantage
        # 广义优势估计量:优势的平滑版
        last_gae = 0.0 # 作用 存储动作优势值，这里的优势值与之前不同之处在于
        # 这里会将未来的优势获取的情况考虑在内
        result_adv = [] # 存储动作的优势值
        result_ref = [] # 存储实际的Q值
        # zip(reversed(values[:-1]), reversed(values[1:] 是将中的数据按照
        # ((-2, -1), (-3, -2), (-4, -3)......)的顺序进行组合，对应了val和next_val
        # 并且每一个组合都和trajectory中的经验进行了逆序进行组合也就是(（（-2,-1）,-2）,((-3,-2)-3)......)
        for val, next_val, (exp,) in zip(reversed(values[:-1]), reversed(values[1:]),
                                        reversed(trajectory[:-1])):
            if exp.done:
                # 如果游戏的状态是结束的
                delta = exp.reward - val # 计算实际的Q值和预测的Q值的差值
                last_gae = delta # 由于没有后续的动作，那么不考虑之前的优势了
            else:
                # 如果游戏的状态不是结束的
                # 根据bellman公式计算实际Q值后，计算实际Q值和预测Q值的差值
                delta = exp.reward + gamma * next_val - val
                # 这个公式是计算优势的公式，这个公式的作用是将优势进行平滑
                # 因为使用的是平滑版本的优势估计，所以这里的每一步的优势值是会包含
                # 后续步骤（因为是逆序遍历）的优势迭代之与折扣因子GAMMA * GAE_LAMBDA
                # 的和
                # 这步体现了累计的优势，即当前获得的优势和之后都有关系
                last_gae = delta + gamma * gae_lambda * last_gae
            result_adv.append(last_gae)
            result_ref.append(last_gae + val)

        # 这里的逆序的作用
        # adv_v保存的好像是动作优势，也就是实际执行的和预测的Q值的差值
        # ref_v保存的好像是实际Q值
        adv_v = torch.FloatTensor(list(reversed(result_adv))).to(device)
        ref_v = torch.FloatTensor(list(reversed(result_ref))).to(device)
        return adv_v, ref_v


    def train(self):
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
                while True:
                    self.trajectory = self.train_queue.get()
                    self.frame_idx += 1
                    
                    if isinstance(self.trajectory, TotalReward):
                        rewards, steps = self.trajectory.reward, self.trajectory.steps
                        tb_tracker.track("episode_steps", steps, self.frame_idx)
                        tracker.reward(rewards, self.frame_idx)
                        continue

                    if len(self.trajectory) < self.trajectory_size:
                        print("Trajectory size is less than trajectory size: %d < %d" % (len(self.trajectory), self.trajectory_size))
                        continue

                    self.__train_trainer(self.trajectory, step_idx=self.frame_idx)
                    state_dict = self.get_cpu_state_dict()
                    for state_queue in self.state_queue:
                        if not state_queue.full():
                            # 将当前的网络参数传递给子进程
                            state_queue.put(state_dict)

                    if self.train_count % self.eval_freq == 0:
                        self.test_state_queue.put(TestInfo(state_dict, self.train_count, self.frame_idx))
                        # self.__test_trainer()

                    if self.train_count % self.save_freq == 0:
                        self.save_trainer(step_idx=self.frame_idx)


    # def train(self):
    #     with ptan.common.utils.RewardTracker(self.writer) as tracker:
    #         with ptan.common.utils.TBMeanTracker(self.writer, batch_size=100) as tb_tracker:
    #             for step_idx, exp in enumerate(self.exp_source):
    #                 self.frame_idx += 1
    #                 rewards_steps = self.exp_source.pop_rewards_steps()
    #                 if rewards_steps:
    #                     # 记录当前的训练进度并判断是否达到了奖励目标
    #                     rewards, steps = zip(*rewards_steps)
    #                     tb_tracker.track("episode_steps", steps[0], self.frame_idx)
    #                     tracker.reward(rewards[0], self.frame_idx)

    #                 self.trajectory.append(exp)
    #                 if len(self.trajectory) < self.trajectory_size:
    #                     continue

    #                 self.__train_trainer(self.trajectory, step_idx=step_idx)
    #                 self.trajectory.clear()

    #                 if self.train_count % self.eval_freq == 0:
    #                     self.__test_trainer()

    #                 if self.train_count % self.save_freq == 0:
    #                     self.save_trainer(step_idx=step_idx)


    def __train_trainer(self, trajectory, step_idx):
        traj_states = [t[0].state for t in trajectory]
        traj_actions = [t[0].action for t in trajectory]
        traj_states_v = torch.FloatTensor(np.array(traj_states)).to(device)
        traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(device)
        # 计算优势值和实际Q值
        traj_adv_v, traj_ref_v = Trainer.calc_adv_ref(trajectory, self.net_crt, traj_states_v, device=device, gamma=self.gamma, gae_lambda=self.gae_lambda)
        # 根据状态预测动作
        with torch.no_grad():
            mu_v = F.softmax(self.net_act(traj_states_v), dim=1)
        # 计算上一轮训练的评价网络、动作网络动作的概率
        old_logprob_v = torch.log(mu_v.gather(1, torch.tensor(traj_actions, dtype=torch.int64).to(device).unsqueeze(-1))).detach()

        # normalize advantages 归一化计算得到的Q值 作用是提高训练的稳定性
        traj_adv_v = (traj_adv_v - torch.mean(traj_adv_v)) / (torch.std(traj_adv_v) + 1e-8)

        # drop last entry from the trajectory, an our adv and ref value calculated without it
        # 这里的作用，为什么截取去除最后一个 P316
        # 可能是因为要和traj_adv_v和traj_ref_v一一对应，因为里面在计算时，是
        # 去掉最后一个的
        trajectory = trajectory[:-1]
        # 这里获取的是从[0到-1)范围的数据
        old_logprob_v = old_logprob_v[:-1].detach()

        sum_loss_value = 0.0
        sum_loss_policy = 0.0
        count_steps = 1
        old_ratio_v_mean = 0
        is_interrupt = False

        # 开始进行PPO的迭代（近端策略优化）
        for epoch in range(self.ppo_epoches):
            for batch_ofs in range(0, len(trajectory), self.ppo_batch_size):
                states_v = traj_states_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                actions_v = traj_actions_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_adv_v = traj_adv_v[batch_ofs:batch_ofs + self.ppo_batch_size].unsqueeze(-1)
                batch_ref_v = traj_ref_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_old_logprob_v = old_logprob_v[batch_ofs:batch_ofs + self.ppo_batch_size]

                # critic training
                # 这边就是在计算预测Q值和实际Q值之间的差异损失
                self.opt_crt.zero_grad()
                value_v = self.net_crt(states_v)
                if torch.isnan(value_v).any() or torch.isinf(value_v).any():
                    print(f"Warning: NaN or inf detected in value_v at step {step_idx + self.frame_idx}")
                    raise ValueError("NaN or inf detected in value_v") 
                loss_value_v = F.mse_loss(value_v.squeeze(-1), batch_ref_v)
                loss_value_v.backward()
                nn_utils.clip_grad_norm_(self.net_crt.parameters(), self.clip_grad)
                self.opt_crt.step()

                # actor training
                self.opt_act.zero_grad()
                new_policy_logits = self.net_act(states_v)
                if torch.isnan(new_policy_logits).any() or torch.isinf(new_policy_logits).any():
                    print(f"Warning: NaN or inf detected in new_policy_logits at step {step_idx + self.frame_idx}")
                    raise ValueError("NaN or inf detected in new_policy_logits") 
                
                # todo 如果修改后也不生效则尝试使用Pytorch内置的熵计算函数
                new_policy = F.softmax(new_policy_logits, dim=1)
                new_log_policy = F.log_softmax(new_policy_logits, dim=1)
                entropy_loss = -torch.mean(torch.sum(new_policy * new_log_policy, dim=1))
                # 计算预测执行动作的高斯概率
                indices = actions_v.long().to(device).unsqueeze(-1)
                logprob_pi_v = torch.log(new_policy.gather(1, indices) + 1e-7)

                ratio_v = torch.exp(logprob_pi_v - batch_old_logprob_v)
                if abs(ratio_v.mean().item() - old_ratio_v_mean) > 100:
                    self.opt_act.zero_grad()
                    is_interrupt = True
                    print("epoch %d, batch_ofs %d, ratio_v mean changed too much: %.2f -> %.2f, interrupting training" % (
                        epoch, batch_ofs, old_ratio_v_mean, ratio_v.mean().item()))
                    break
                old_ratio_v_mean = ratio_v.mean().item()

                # ratio_v的作用
                # 用于计算新旧策略之间的比例，这个比例用于计算新旧策略之间的差异
                # 根据这个差异调整网络的参数，使其能够往更好的方向调整
                # batch_adv_v对应书中P317中的At
                # ratio_v对应书中的rt(theta)
                # torch.clamp(ratio_v, 1.0 - PPO_EPS, 1.0 + PPO_EPS)对应书中的clip
                surr_obj_v = batch_adv_v * ratio_v

                clipped_surr_v = batch_adv_v * torch.clamp(ratio_v, 1.0 - self.ppo_eps, 1.0 + self.ppo_eps)

                loss_policy_v = -torch.min(surr_obj_v, clipped_surr_v).mean() - 0.01 * entropy_loss
                # # Replace with conditional selection logic
                # condition = surr_obj_v < 0
                # # For negative values: choose larger one (less negative) to minimize loss
                # # For positive values: choose smaller one to minimize loss
                # selected_surr = torch.where(
                #     condition,
                #     torch.max(surr_obj_v, clipped_surr_v),  # For negative values: larger one (less negative)
                #     torch.min(surr_obj_v, clipped_surr_v)   # For positive values: smaller one
                # )
                # policy_loss = -selected_surr.mean()
                # loss_policy_v = policy_loss - 0.01 * entropy_loss
                loss_policy_v.backward()
                nn_utils.clip_grad_norm_(self.net_act.parameters(), self.clip_grad)
                self.opt_act.step()

                # 记录总损失，用于计算平均损失变化
                sum_loss_value += loss_value_v.item()
                sum_loss_policy += loss_policy_v.item()
                count_steps += 1
            if is_interrupt:
                break

        trajectory.clear()
        self.writer.add_scalar("advantage", traj_adv_v.mean().item(), step_idx + self.frame_idx)
        self.writer.add_scalar("values", traj_ref_v.mean().item(), step_idx + self.frame_idx)
        self.writer.add_scalar("loss_policy", sum_loss_policy / count_steps, step_idx + self.frame_idx)
        self.writer.add_scalar("loss_value", sum_loss_value / count_steps, step_idx + self.frame_idx)
        self.train_count += 1
        # self.scheduler_act.step()
        # self.scheduler_crt.step()


    @staticmethod
    @torch.no_grad()
    def test_net(count, device, net, env):
        '''
        count: 执行游戏的次数（每次都是执行到游戏结束）

        return: （平均奖励，平均步数）
        '''
        rewards = 0.0
        steps = 0
        for _ in range(count):
            obs, _ = env.reset()
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                mu_v = net(obs_v)
                action = mu_v.squeeze(dim=0).data.cpu().argmax().item()
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
        return rewards / count, steps / count


    @staticmethod
    @torch.no_grad()
    def test_trainer(net_act_state, writer_name, device, env, net_state_queue, save_path, train_count, frame_idx, best_reward):
        # 测试并保存最好测试结果的庶数据
        net_act = model.PPOModelActor(env.observation_space.shape, env.action_space.n)
        net_act.load_state_dict(net_act_state)
        net_act.to(device)
        net_act.eval()
        train_count = train_count
        frame_idx = frame_idx
        best_reward = best_reward
        writer = SummaryWriter(comment="-ppo_" + writer_name)
        while True:
            ts = time.time()
            rewards, steps = Trainer.test_net(count=10, device=device, net=net_act, env=env)
            print("Train Count %d, Test done in %.2f sec, reward %.3f, steps %d" % (train_count,
                time.time() - ts, rewards, steps))
            writer.add_scalar("test_reward", rewards, frame_idx)
            writer.add_scalar("test_steps", steps, frame_idx)
            if best_reward is None or best_reward < rewards:
                if best_reward is not None:
                    print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
                best_reward = rewards
            
            checkpoints = {
                "actor_model": net_act.state_dict(),
            }
            # common.save_best_model(rewards, checkpoints, self.save_path, 'ppo_best_' + str(steps) + '_')
            common.save_best_model(rewards, checkpoints, save_path, 'ppo_best_' + str(steps) + '_')

            test_info = net_state_queue.get()
            net_act.to(device='cpu')
            net_act.load_state_dict(test_info.net_state)
            net_act.to(device)
            train_count = test_info.train_count
            frame_idx = test_info.frame_idx



if __name__ == "__main__":
    # spawn: 子进程将只继承父进程的信号处理机制,而不会继承父进程的句柄、锁等资源。所以spawn方式更安全,也是推荐的默认方式。
    # fork: 子进程通过fork启动,会通过复制父进程获得父进程的全部资源,包括锁、信号处理等,所以可能会导致在多进程中出现问题。
    # 由于pytorch的限制，spawn是最佳的方式
    mp.set_start_method('spawn')
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default='acrobot', help="Name of the run")
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'conf/ppo_configs.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)


    trainer = Trainer(params, device)
    trainer.train()



