#!/usr/bin/env python3
'''
未适配

参考链接：https://github.com/alirezakazemipour/PPO-RND/tree/RNN_Policy

训练记录：
'''
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp

import time
import yaml
import pathlib
import sys
import copy
import os
import pickle
from copy import deepcopy
from tensorboardX import SummaryWriter
import threading

from lib import model, common

import ale_py
from gym.wrappers.normalize import RunningMeanStd

gym.register_envs(ale_py)


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device
        self.num_samples = params['num_samples']
        self.lr = params['learning_rate']
        self.eps = params['eps']
        self.frame_idx = 0
        self.train_count = 0
        self.best_reward = None
        self.total_normalization_steps = self.params['trajectory_size'] * self.params['pre_normalization_steps']
        self.trajectory_size = self.params['trajectory_size']
        self.init_gamma = self.params['init_gamma']
        self.gae_lambda = self.params['gae_lambda']
        self.ext_gamma = self.params['ext_gamma']
        self.ext_adv_coeff = self.params['ext_adv_coeff']
        self.int_adv_coeff = self.params['int_adv_coeff']
        self.ppo_epochs = self.params['ppo_epochs']
        self.ppo_batch_size = self.params['ppo_batch_size']
        self.clip_range = self.params['clip_range']
        self.predictor_proportion = self.params['predictor_proportion']
        self.ent_coef = self.params['ent_coef']
        self.clip_grad = self.params['clip_grad']

        self.save_path = os.path.join("saves", "ppo_lstm_rnd", params['name'])
        os.makedirs(self.save_path, exist_ok=True)
        self.writer = SummaryWriter(comment="-" + "ppo_lstm_rnd_" + params['name'])
        self.logger = common.setup_logger(self.save_path)

        self.build_env()
        self.build_model()
        self.build_buffer()


    def build_env(self):
        self.test_env = common.wrap_dqn(self.params['env_name'])  
        self.obs_shape = self.test_env.observation_space.shape
        self.action_shape = self.test_env.action_space.n


    def build_model(self):
        self.rnd_model = model.RNDModel(self.obs_shape, self.action_shape).to(device=self.device)
        self.rnd_predictor_model = model.PredictorModel(self.obs_shape).to(device=self.device)    
        self.target_rnd_model = model.TargetModel(self.obs_shape).to(device=self.device)
        for param in self.target_rnd_model.parameters():
            param.requires_grad = False
        print(self.rnd_model)
        print(self.rnd_predictor_model)


        self.combined_params = list(self.rnd_model.parameters()) + list(self.rnd_predictor_model.parameters())
        self.optimizer = torch.optim.Adam(self.combined_params, lr=self.lr, eps=self.eps) # 只针对online_net进行优化
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10000, gamma=0.9)
        self.reward_nms = RunningMeanStd(shape=(1,))
        self.obs_rms = RunningMeanStd(shape=(1, 84, 84))


    def build_buffer(self):
        # 看起来要搞多进程训练
        self.sample_queue = mp.Queue(self.num_samples)

        for i in range(self.num_samples):
            # 创建采集进程
            sample_proc = mp.Process(target=Trainer.collect_data, args=(self.rnd_model, self.sample_queue, self.params, self.obs_rms, i, self.frame_idx, self.device, self.total_normalization_steps, self.logger))
            sample_proc.start()


    @staticmethod
    def get_track_info(info):
        print(info)


    def collect_data(rnd_model, sample_queue, params, obs_rms, i, frame_idx, device, total_normalization_steps, logger):
        env = common.wrap_dqn(params['env_name'])
        agent = model.RNDAgent(rnd_model, device=device)
        exp_source = ptan.experience.ExperienceSourceRAW(env, agent, steps_count=1)
        writer = SummaryWriter(comment="-" + "ppo_lstm_rnd_" + params['name'] + "_" + str(i))

        trajectory = [] # 注意，缓冲区更名为轨迹
        with ptan.common.utils.RewardTracker(writer=writer, info_callback=Trainer.get_track_info) as tracker:
            if frame_idx < total_normalization_steps:
                logger.info("进行预热")
                exps = []
                for step_idx, exp in enumerate(exp_source):
                    exps.append(exp)
                    if step_idx >= total_normalization_steps:
                        break

                    if len(exps) % (1 * params['trajectory_size']) == 0:
                        obs_rms.update(np.stack([t[0][4] for t in exps]))
                        exps = []

            for step_idx, exp in enumerate(exp_source):
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    rewards, steps = zip(*rewards_steps)
                    writer.add_scalar("episode_steps", np.mean(steps), step_idx + frame_idx)
                    tracker.reward(np.mean(rewards), step_idx + frame_idx)

                trajectory.append(exp)
                if len(trajectory) < params['trajectory_size']:
                    continue

                sample_queue.put(trajectory)
                trajectory = []

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[2].split('.')[0]))

            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
                self.rnd_model.load_state_dict(checkpoint['rnd_model'])
                self.rnd_predictor_model.load_state_dict(checkpoint['rnd_predictor_model'])
                self.target_rnd_model.load_state_dict(checkpoint['target_rnd_model'])
                for param in self.target_rnd_model.parameters():
                    param.requires_grad = False
                self.optimizer.load_state_dict(checkpoint['opt'])
                self.scheduler.load_state_dict(checkpoint['scheduler'])
                self.obs_rms.mean = checkpoint['obs_rms_mean']
                self.obs_rms.var = checkpoint['obs_rms_var']
                self.obs_rms.count = checkpoint['obs_rms_count']
                self.reward_nms.mean = checkpoint['int_reward_nms_mean']
                self.reward_nms.var = checkpoint['int_reward_nms_var']
                self.reward_nms.count = checkpoint['int_reward_nms_count']
                self.frame_idx = checkpoint['frame_idx']
                self.train_count = checkpoint['train_count']

                print("加载模型成功")
                print(f"learning rate: {self.scheduler.get_last_lr()[0]}")
                print(f"scheduler step: {self.scheduler.last_epoch}")


    def save_model(self):
        checkpoint = {
            "num_updates": self.num_updates,
            "online_net": self.online_net.target_model.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "add_frame_idx": self.buffer.add_frame_idx,
        }

        common.save_checkpoints(self.num_updates, checkpoint, self.save_path, "r2d2", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")


    def discounted_reward(self, predictor_model, target_model, next_states, obs_rms, device="cpu"):
        next_states = np.clip((next_states  - obs_rms.mean) / np.sqrt(obs_rms.var ** 0.5), -5, 5, dtype='float32')
        next_states_v = torch.from_numpy(next_states).to(device)
        predictor_encoded_features = predictor_model(next_states_v)
        target_encoded_features = target_model(next_states_v)

        int_reward = (predictor_encoded_features - target_encoded_features).pow(2).mean(1)
        return int_reward.cpu().data.numpy()
    

    def compute_pg_loss(self, ratio, adv):
        new_r = ratio * adv
        clamped_r = torch.clamp(ratio, 1 - self.clip_range, 1 + self.clip_range) * adv
        loss = -torch.min(new_r, clamped_r).mean()
        return loss
    
    
    def get_gae(self, rewards, values, next_values, dones, gamma):
        returns = [[] for _ in range(1)]
        extended_values = np.zeros((1, self.trajectory_size + 1))
        for worker in range(1):
            extended_values[worker] = np.append(values[worker], next_values[worker][-1])
            gae = 0
            for step in reversed(range(len(rewards[worker]))):
                delta = rewards[worker][step] + gamma * extended_values[worker][step + 1] * (1 - dones[worker][step]) - extended_values[worker][step]
                gae = delta + gamma * self.gae_lambda * (1 - dones[worker][step]) * gae
                returns[worker].insert(0, gae + extended_values[worker][step])

        return np.concatenate(returns)
    

    def calculate_rnd_loss(self, next_states, target_model, predictor_model, device="cpu"):
        encoded_target_features = target_model(next_states)
        encoded_predictor_features = predictor_model(next_states)
        loss = (encoded_predictor_features - encoded_target_features).pow(2).mean(-1)
        mask = torch.rand(loss.size(), device=device)
        mask = (mask < self.predictor_proportion) 
        loss = (loss * mask).sum() / torch.max(mask.sum(), torch.Tensor([1]).to(device=device))
        return loss


    def __train(self):
        trajectory = self.sample_queue.get()

        # 这里之所以会需要使用
        traj_states = [t[0][0] for t in trajectory]
        traj_actions = [t[0][1] for t in trajectory]
        traj_next_state = [t[0][4] for t in trajectory]
        traj_rewards = [t[0][2] for t in trajectory]
        traj_done = [t[0][3] for t in trajectory]
        traj_int_values = [t[0][5][1] for t in trajectory]
        traj_ext_values = [t[0][5][2] for t in trajectory]
        traj_log_probs = [t[0][5][3] for t in trajectory]
        traj_hidden_states = [t[0][5][4] for t in trajectory]
        traj_next_hidden_states = [t[0][5][5] for t in trajectory]

        traj_states_v = torch.FloatTensor(np.array(traj_states)).to(self.device)
        traj_actions_v = torch.FloatTensor(np.array(traj_actions)).to(self.device)
        traj_next_state_v = torch.FloatTensor(np.array(traj_next_state)).to(self.device)
        traj_rewards_v = torch.FloatTensor(np.array(traj_rewards)).to(self.device)
        traj_done_v = torch.FloatTensor(np.array(traj_done)).to(self.device)
        traj_ext_values_v = torch.FloatTensor(traj_ext_values).to(self.device)
        traj_int_values_v = torch.FloatTensor(traj_int_values).to(self.device)
        traj_ext_values_v = torch.FloatTensor(traj_ext_values).to(self.device)
        traj_log_probs_v = torch.FloatTensor(traj_log_probs).to(self.device)
        traj_hidden_states_v = torch.cat(traj_hidden_states).to(self.device)
        traj_next_hidden_states_v = torch.cat(traj_next_hidden_states).to(self.device)

        total_int_rewards = self.discounted_reward(self.rnd_predictor_model, self.target_rnd_model, traj_next_state, self.obs_rms, device=self.device)
        intrinsic_return = [[] for _ in range(1)]
        for worker in range(1):
            rewems = 0
            for step in reversed(range(self.trajectory_size)):
                rewems = rewems * self.init_gamma + total_int_rewards[step]
                intrinsic_return[0].insert(0, rewems)
        self.reward_nms.update(np.ravel(intrinsic_return).reshape(-1, 1))
        total_int_rewards = total_int_rewards  / self.reward_nms.var ** 0.5

        with torch.no_grad():
            next_dists, next_int_values, next_ext_values, next_action_prob, *_ = self.rnd_model(traj_next_state_v, traj_next_hidden_states_v)
            next_actions = next_dists.sample()
            next_log_probs = next_dists.log_prob(next_actions)

        
        int_rets = self.get_gae(np.expand_dims(total_int_rewards, axis=0), traj_int_values_v.unsqueeze(0).cpu(), next_int_values.unsqueeze(0).cpu(), np.zeros_like(traj_done_v.unsqueeze(0).cpu()), gamma=self.init_gamma)
        ext_rets = self.get_gae(traj_rewards_v.unsqueeze(0).cpu(), traj_ext_values_v.unsqueeze(0).cpu(), next_ext_values.unsqueeze(0).cpu(), traj_done_v.unsqueeze(0).cpu(), gamma=self.ext_gamma)

        ext_values = traj_ext_values_v.cpu().numpy()
        ext_advs = ext_rets - ext_values

        int_values = traj_int_values_v.cpu().numpy()
        int_advs = int_rets - int_values

        advs = ext_advs * self.ext_adv_coeff + int_advs * self.int_adv_coeff
        advs = (advs - advs.mean()) / (advs.std() + 1e-8)
        self.obs_rms.update(np.array(traj_next_state))
        total_next_obs = ((traj_next_state - self.obs_rms.mean) / (self.obs_rms.var ** 0.5)).clip(-5, 5)

        # 开始进行PPO的迭代（近端策略优化）
        for epoch in range(self.ppo_epochs):
            for batch_ofs in range(0, len(trajectory), self.ppo_batch_size):
                # todo 参考代码这里选择的是随机选取，而不是全部拉入训练
                states_v = traj_states_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                next_state_v = traj_next_state_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                actions_v = traj_actions_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                logprobs_v = traj_log_probs_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_int_rets = torch.FloatTensor(int_rets[batch_ofs:batch_ofs + self.ppo_batch_size]).to(self.device)
                batch_ext_rets = torch.FloatTensor(ext_rets[batch_ofs:batch_ofs + self.ppo_batch_size]).to(self.device)
                batch_advs = torch.FloatTensor(advs[batch_ofs:batch_ofs + self.ppo_batch_size]).to(self.device)
                batch_log_probs = traj_log_probs[batch_ofs:batch_ofs + self.ppo_batch_size]
                traj_next_states = traj_next_state_v[batch_ofs:batch_ofs + self.ppo_batch_size]
                batch_hidden_states = traj_hidden_states_v[batch_ofs:batch_ofs + self.ppo_batch_size]

                dist, int_value, ext_value, *_ = self.rnd_model(states_v, batch_hidden_states)
                entropy = dist.entropy().mean()
                new_log_probs = dist.log_prob(actions_v)
                ratio = (new_log_probs - logprobs_v).exp()
                pg_loss = self.compute_pg_loss(ratio, batch_advs)

                int_value_loss = F.mse_loss(int_value, batch_int_rets)
                ext_value_loss = F.mse_loss(ext_value, batch_ext_rets)

                critic_loss = 0.5 * (int_value_loss + ext_value_loss)
                rnd_loss = self.calculate_rnd_loss(next_state_v, self.target_rnd_model, self.rnd_predictor_model, device=self.device)
                total_loss = critic_loss + pg_loss - self.ent_coef * entropy + rnd_loss
                self.optimizer.zero_grad()
                total_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.combined_params, self.clip_grad)
                self.optimizer.step()

        self.scheduler.step()
        self.frame_idx += len(trajectory)  # 更新frame_idx
        self.train_count += 1

        # 保存检查点
        checkpoint = {
            'rnd_model': self.rnd_model.state_dict(),
            'rnd_predictor_model': self.rnd_predictor_model.state_dict(),
            'target_rnd_model': self.target_rnd_model.state_dict(),
            'opt': self.optimizer.state_dict(),
            'scheduler': self.scheduler.state_dict(),
            'obs_rms_mean': self.obs_rms.mean,
            'obs_rms_var': self.obs_rms.var,
            'obs_rms_count': self.obs_rms.count,
            'int_reward_nms_mean': self.reward_nms.mean,
            'int_reward_nms_var': self.reward_nms.var,
            'int_reward_nms_count': self.reward_nms.count,
            'frame_idx': self.frame_idx,
            'train_count': self.train_count
        }
        common.save_checkpoints(self.train_count, checkpoint, self.save_path, "ppo-rnd")
    

    
    def train_model(self):
        with common.RewardTracker(self.writer, stop_reward=99999) as tracker:
            while True:
                self.__train()
                # 修复调用错误
                if self.train_count % 10 == 0:  # 每10次训练测试一次
                    self.test_model()


    def test_model(self):
        ts = time.time()
        self.rnd_model.eval()
        rewards, steps = Trainer.eval_model(self.rnd_model, self.test_env, count=10, device=self.device)
        self.rnd_model.train()
        print("Test done in %.2f sec, reward %.3f, steps %d" % (
            time.time() - ts, rewards, steps))
        self.writer.add_scalar("test_reward", rewards, self.train_count)
        self.writer.add_scalar("test_steps", steps, self.train_count)
        if self.best_reward is None or self.best_reward < rewards:
            if self.best_reward is not None:
                print("Best reward updated: %.3f -> %.3f" % (self.best_reward, rewards))
            self.best_reward = rewards
            common.save_best_model(rewards, self.rnd_model.state_dict(), self.save_path, 'ppo-rnd')

        print(f"save best model, current test score: {rewards}, mean_step: {steps}")


    @torch.no_grad()
    @staticmethod
    def eval_model(net, env, count=10, device="cpu"):
        rewards = 0.0
        steps = 0
        same_action_count = 0
        pre_action = None
        for _ in range(count):
            obs, _ = env.reset()
            init_hidden = torch.zeros((1, 256)).to(device)
            while True:
                obs_v = ptan.agent.float32_preprocessor(np.array(obs)[np.newaxis, :]).to(device)
                probs = net(obs_v, init_hidden)[3]
                action = probs.squeeze(dim=0).data.cpu().argmax().item()
                if pre_action == action:
                    same_action_count += 1
                    if same_action_count > 100:
                        break
                else:
                    same_action_count = 0
                    pre_action = action
                obs, reward, done, trunc, _ = env.step(action)
                # env.render()
                done = done or trunc
                rewards += reward
                steps += 1
                if done:
                    break
        return rewards / count, steps / count



if __name__ == "__main__":
    mp.set_start_method('spawn')
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    parser.add_argument("-n", "--name", default='laserGates', help="Name of the run")
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

     # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/config.yaml').read_text(encoding='utf-8'))
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)
    params['num_updates'] = int(params['total_timesteps'] / params['trajectory_size'])
    params['device'] = device

    trainer = Trainer(params=params, device=device)
    trainer.load_model()
    trainer.train_model()

