#!/usr/bin/env python3
'''
完成适配,正在训练

参考链接：
1. https://github.com/smmislam/pytorch-planet?tab=readme-ov-file(参考这个)
2. https://github.com/abhayraw1/planet-torch
3. https://github.com/Kaixhin/PlaNet

训练记录：
20251126: 训练记录-1241.459，测试分数-506，技术训练
'''

import os
import ptan
import time
import gymnasium as gym
import argparse
from torch.distributions import Normal, Independent
from tensorboardX import SummaryWriter
import numpy as np
from typing import Any
from tqdm import tqdm

from lib import model, common
from collections import deque

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import yaml
import pathlib
import sys
import copy


ENV_ID = "Pendulum-v1"

def test_net(net, env, count=10, device="cpu"):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    for _ in range(count):
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
            # 根据环境预测动作
            mu_v = net(obs_v)
            action = mu_v.squeeze(dim=0).data.cpu().numpy()
            action = np.clip(action, -1, 1)
            # 执行动作
            obs, reward, done, truncated, _ = env.step(action)
            rewards += reward
            steps += 1
            if done or truncated:
                break
    return rewards / count, steps / count


class Trainer:

    def __init__(self, params, device):
        self.params = params
        self.device = device

        self.d_type = common.get_dtype(self.params['fp_precision'])
        self.env = common.wrapper_env(gym.make(ENV_ID, g=9.81, render_mode='rgb_array'), frame_skip=self.params['action_repeat'], params=params)
        self.test_env = common.wrapper_env(gym.make(ENV_ID, g=9.81, render_mode='rgb_array'), frame_skip=self.params['action_repeat'], params=params)
        self.action_dim = self.env.action_space.shape[0]

         # 也是一个世界模型
        self.planet = model.Planet(params=self.params, obs_shape=self.env.observation_space.shape, action_dim=self.env.action_space.shape[0], device=device).to(self.d_type).to(self.device)
        print(f'Initialized {self.planet} ({common.count_parameters(self.planet)}) as the world-model')

        self.agent = model.AgentPlaNet(self.planet, action_dim=self.env.action_space.shape[0], params=self.params, device=device)
        self.random_agent = ptan.agent.EnvRandomSampleAgent(self.env, self.device)
        self.exp_source = ptan.experience.ExperienceSourceRAW(self.env, self.agent, steps_count=1)
        self.buffer = ptan.experience.ExperienceEpisodeeplayBuffer(self.exp_source, epsilon_size=self.params['max_episode'], d_type=self.d_type, device=self.device)

        self.writer = SummaryWriter(comment="-planet-rgb" + self.params['env_name'])
        # 直接对整个世界模型进行优化，和之前的dreamer不一样，dreamer是对每个模型进行分别优化
        self.optimizer = optim.Adam(params=self.planet.parameters(), lr=float(self.params['lr']), eps=float(self.params['adam_epsilon']))
        self.global_step = 0
        self.train_count = 0
        self.learning_step = 0
        self.save_path = os.path.join("saves", "planet-rgb-" + params['env_name'])
        os.makedirs(self.save_path, exist_ok=True)

    
    def load_trainer(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[2].split('.')[0]))
            
            if len(checkpoints) == 0:
                print("没有找到模型文件")
                return
            checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)
            self.planet.load_state_dict(checkpoint['planet'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.global_step = checkpoint['global_step']
            self.train_count = checkpoint['train_count']
            self.learning_step = checkpoint['learning_step']
            print("加载模型成功")

    
    def save_trainer(self):
        checkpoints = {
            "planet": self.planet.state_dict(),
            "optimizer": self.optimizer.state_dict(),
            "global_step": self.global_step,
            "train_count": self.train_count,
            "learning_step": self.learning_step
        }

        common.save_checkpoints(self.train_count, checkpoints, self.save_path, 'planet')



    def collect_seed_episodes(self):
        '''
        随机动作预热缓冲区
        '''
        print('缓冲区预热\n')
        self.exp_source.agent = self.random_agent
        while len(self.buffer) < self.params['n_seed_episodes']:
            self.buffer.populate(1)
        print(f'\rCollected {self.params["n_seed_episodes"]} episodes as initial seed data!')
        self.exp_source.agent = self.agent
        print('缓冲区预热完成\n')

    
    def train(self):
        # 这里训练指定的步数吗？
        with ptan.common.utils.RewardTracker(self.writer) as tracker:
            with ptan.common.utils.TBMeanTracker(self.writer, batch_size=10) as tb_tracker:
                self.tracker = tracker
                self.tb_tracker = tb_tracker
                while True:
                    # 切换训练模式
                    self.planet.train()
                    self.learning_step += 1
                    # 进行 collect_interval 次的训练
                    for update_step in range(self.params['collect_interval']):
                        print(f'\rFitting world model : ({update_step+1}/{self.params["collect_interval"]})', end='')
                        # 先进行采样
                        sampled_episodes = self.buffer.sample(self.params['batch_size'], self.params['chunk_length'])
                        # 对采样的每个时间步进行预测
                        dist_predicted = self.planet(sampled_episodes=sampled_episodes)
                        # 计算损失
                        loss, (recon_loss, kl_loss, reward_loss) = self.planet.compute_loss(target=sampled_episodes, dist_predicted=dist_predicted)
                        loss.backward()
                        # 梯度求导
                        nn.utils.clip_grad_value_(self.planet.parameters(), clip_value=self.params['max_grad_norm'])
                        self.optimizer.step()
                        self.optimizer.zero_grad()
                        # 记录损失
                        self.writer.add_scalar('Reward/max_from_train_batch', sampled_episodes['reward'].max(), self.train_count)
                        self.writer.add_scalar('TrainLoss/obs_recon', recon_loss, self.train_count)
                        self.writer.add_scalar('TrainLoss/kl_div', kl_loss, self.train_count)
                        self.writer.add_scalar('TrainLoss/reward_prediction', reward_loss, self.train_count)
                        self.train_count += 1
                        self.global_step += 1
                    print('\rUpdated world model!' + 50*' ')
                    self.save_trainer()
                    # 每隔一定步数进行评估以及收集新的episode todo
                    with torch.no_grad():
                        # 进行数据采样，采样的同时会对预测的奖励、动作、观察加入噪声
                        self.collect_episode(tb_tracker)
                        self.evaluate_learning(step=self.learning_step+1)
                        self.evaluate_video_prediction(step=self.learning_step+1)

    
    def evaluate_learning(self, step):
        '''
        评估学习网络
        '''
        print('\rEvaluating learning progress ...', end='')
        self.planet.eval() # 进入评估模式
        prev_obs, _ = self.test_env.reset() # 重置游戏环境
        prev_obs = torch.from_numpy(prev_obs).to(self.d_type).to(self.device) # 将游戏观察转换为tensor
        h_state = self.planet.get_init_h_state(batch_size=1) # 获取新的起始确定性隐藏状态
        # observed_frames保存游戏观察
        # reconstructed_frames保存预测的游戏观察
        observed_frames, reconstructed_frames = list(), list()
        ep_reward = 0
        while True:
            observed_frames.append(prev_obs)
            # Get posterior states using observation
            # 对观察进行编码
            encoded_obs = self.planet.obs_encoder(prev_obs.unsqueeze(dim=0).to(self.device))
            # 得到随机后验分布
            posterior_z = self.planet.repr_model(h_state, encoded_obs)
            # 根据后验状态获取潜在状态
            z_state = posterior_z.sample()
            # Get best action by planning in latent space through open-loop prediction
            # 预测最好的动作
            action = self.plan_action_with_cem(h_state, z_state)
            obs, reward, terminated, truncated, info = self.test_env.step(action.to('cpu').numpy())
            obs = torch.from_numpy(obs).to(self.d_type).to(self.device) # 将游戏观察转换为tensor
            ep_reward += reward
            # Reconstruct observation
            # 根据确定性隐藏状态和潜在状态对观察进行解码，重构prev_obs
            recon_obs = self.planet.decoder_model(h_state, z_state).mean
            reconstructed_frames.append(recon_obs.squeeze())
            # Get next latent state 预测下一个确定性隐藏状态
            h_state = self.planet.rnn_model(h_state, z_state, action.unsqueeze(dim=0))
            prev_obs = copy.deepcopy(obs)
            if terminated or truncated:
                break
        # 记录游戏观察以及重构的游戏观察
        observed_frames = torch.stack(observed_frames).unsqueeze(dim=0) + 0.5
        reconstructed_frames = torch.clip(torch.stack(reconstructed_frames).unsqueeze(dim=0) + 0.5, min=0.0, max=1.0)
        self.writer.add_scalar('Reward/test_episodes', ep_reward, step)
        common.save_best_model(ep_reward, self.planet.state_dict(), self.save_path, "planet_best")
        if step % self.params['eval_gif_freq'] == 0:
            self.writer.add_video(f'ObservedTestEpisode/{step}', observed_frames.transpose(3, 4))
            self.writer.add_video(f'ReconstructedTestEpisode/{step}', reconstructed_frames.transpose(3, 4))
            print('\rLearning progress evaluation complete! Saved the episode!')
        else:
            print('\rLearning progress evaluation is complete!')

    
    def plan_action_with_cem(self, init_h_state, init_z_state):
        '''
        param init_h_state: 上一个隐藏状态
        param init_z_state: 本次潜在状态，也就是后验分布的潜在状态

        return: 根据不断的迭代得到认为的最好的动作
        '''
        # todo `planning_horizon` 参数指定了在规划过程中，评估候选动作序列时所预见的未来时间步数。在基于 Planet 算法的实现中，这个值控制了模型在每次规划时向前预测多少步，以便选择出最优的动作计划。
        action_dist = Independent(Normal(loc=torch.zeros(self.params['planning_horizon'], self.action_dim), scale=1.0), reinterpreted_batch_ndims=2)
        # todo plan_optimization_iter参数指定了在规划过程中，通过多次迭代优化候选动作序列，以便搜索出最优计划的迭代次数。也就是说，规划器会在10次迭代中不断改进候选计划
        for _ in range(self.params['plan_optimization_iter']):
            reward_buffer = list()
            # n_plans 数指定了在每次优化迭代中将采样多少条候选动作序列（计划）。也就是说，在每次规划过程中，会随机采样 1000 个动作序列，然后通过后续的评估机制选择出表现最佳的候选计划，以便在环境中执行
            # 也就是说随机生成n_plans个状态分布进行选择评估？
            h_state = torch.clone(init_h_state).repeat(self.params['n_plans'], 1)
            z_state = torch.clone(init_z_state).repeat(self.params['n_plans'], 1)
            # 生成候选动作，shape = todo
            candidate_plan = torch.clip_(
                action_dist.sample(sample_shape=torch.Size([self.params['n_plans']])).to(self.d_type).to(self.device),
                min=self.params['min_action'], max=self.params['max_action'])
            # 对每一个时间步的动作进行评估，这个时间步包含对未来观察的预测吧
            for time_step in range(self.params['planning_horizon']):
                batched_ts_action = candidate_plan[:, time_step, :]
                # Use learnt dynamics to get next hidden state 根据动作得到下一次的确定性隐藏状态
                h_state = self.planet.rnn_model(h_state, z_state, batched_ts_action)
                # Get latent variables from transition model (prior) 得到下一个时间先验状态
                prior_z = self.planet.transition_model(h_state)
                z_state = prior_z.sample() # 先验状态模拟后验状态
                # 利用确定性状态+模拟本地后验状态预测奖励分布
                predicted_reward = self.planet.reward_model(h_state, z_state)
                # 对奖励进行裁剪，因为可能存在跳帧，则认为跳帧的每次动作的到的奖励都一样，那么跳帧期间的奖励=单帧的奖励*次数
                sampled_reward = torch.clip(predicted_reward.mean,
                                            min=self.params['min_reward'], max=(1+self.params['action_repeat'])*self.params['max_reward'])
                # 保存奖励缓冲区
                reward_buffer.append(sampled_reward)
            # 展平所有时间步的奖励
            plan_reward = torch.stack(reward_buffer).squeeze().sum(dim=0)
            # 根据奖励的大小，选择topK个奖励最高的动作分布，得到新的动作分布和均值
            # 再创建新的动作分布，重新进行动作采样重新进行评估
            chosen_actions = candidate_plan[torch.topk(plan_reward, k=self.params['top_k']).indices]
            action_mu, action_std = chosen_actions.mean(dim=0), chosen_actions.std(dim=0)
            action_dist = Independent(Normal(loc=action_mu, scale=action_std+1e-6), reinterpreted_batch_ndims=2)
        optimized_next_action = action_dist.mean[0]
        return optimized_next_action

    
    def evaluate_video_prediction(self, step):
        '''
        进行模型预测观察的一次评估，应该是全部都是由模型对未来进行走势进行评估
        '''
        if step % self.params['vp_eval_freq'] == 0:
            print('\rEvaluating video prediction ability ...', end='')
            n_context_frames = 5 # 预测的帧数，这里仅预测5帧
            n_predicted_frames = 50
            self.planet.eval()
            # 获取初始化观察
            prev_obs, _ = self.test_env.reset()
            # 将观察转换为tensor
            prev_obs = torch.from_numpy(prev_obs).to(self.d_type).to(self.device)
            # 初始化确定性隐藏状态
            h_state = self.planet.get_init_h_state(batch_size=1)

            # observed_frames存储真实观察
            observed_frames, predicted_frames = list(), list()
            observed_frames.append(prev_obs)
            # Feed context
            for _ in range(n_context_frames):
                # 对环境进行编码
                encoded_obs = self.planet.obs_encoder(prev_obs.unsqueeze(dim=0).to(self.device))
                # 生成后验状态分布
                posterior_z = self.planet.repr_model(h_state, encoded_obs)
                # 采样后验分布，也就是随机性隐藏状态
                z_state = posterior_z.sample()
                # Get best action by planning in latent space through open-loop prediction
                # 获取预测的未来最好动作
                action = self.plan_action_with_cem(h_state, z_state)
                # 执行动作
                obs, reward, terminated, truncated, info = self.test_env.step(action.to('cpu').numpy())
                observed_frames.append(torch.from_numpy(obs).to(self.d_type).to(self.device))
                # Reconstruct observation
                # 对观察进行重构预测，并存储在predicted_frames
                recon_obs = self.planet.decoder_model(h_state, z_state).mean
                predicted_frames.append(recon_obs.squeeze())
                # 得到下一个的确定性状态
                h_state = self.planet.rnn_model(h_state, z_state, action.unsqueeze(dim=0))
                prev_obs = copy.deepcopy(obs)
                prev_obs = torch.from_numpy(prev_obs).to(self.d_type).to(self.device)

                # Generate prediction 预测未来的5帧的观察变化
                for _ in range(n_predicted_frames):
                    # 获取先验状态分布
                    prior_z = self.planet.transition_model(h_state)
                    # 获取随机性隐藏状态
                    z_state = prior_z.sample()
                    # 预测未来最好的动作
                    action = self.plan_action_with_cem(h_state, z_state)
                    # 执行动作
                    obs, reward, terminated, truncated, info = self.test_env.step(action.to('cpu').numpy())
                    # 存储o真实obs
                    observed_frames.append(torch.from_numpy(obs).to(self.d_type).to(self.device))
                    # Reconstruct observation
                    # 存储预测的obs，这里的预测obs就和真实的obs没有关系了，存粹的预测了
                    recon_obs = self.planet.decoder_model(h_state, z_state).mean
                    predicted_frames.append(recon_obs.squeeze())
                    # 获取下一个确定性状态
                    h_state = self.planet.rnn_model(h_state, z_state, action.unsqueeze(dim=0))
            
            # 将观察重构到0～1，。合并obs帧
            observed_frames = 0.5 + torch.stack(observed_frames[:-1]).to(self.device)
            # 将预测的观察重构到0～1，合并obs帧
            predicted_frames = torch.clip(0.5 + torch.stack(predicted_frames), min=0.0, max=1.0)
            # 将真实帧和预测帧对比合并起来
            overlay_frames = torch.clip(0.5*(1 - observed_frames) + 0.5*predicted_frames, min=0.0, max=1.0)

            # 将以上三个观察帧合并起来并记录到tensorboard中
            combined_frame = torch.cat([observed_frames, predicted_frames, overlay_frames], dim=3).unsqueeze(dim=0)
            self.writer.add_video(f'VideoPrediction/after_training_step_{step}', combined_frame.transpose(3, 4))
            print('\rVideo prediction evaluation is complete! Saved the episode!')

    
    def collect_episode(self, tb_tracker):
        '''
        收集新的episode
        '''
        print('\rCollecting a new episode with CEM-based planning ...', end='')
        self.prev_len = len(self.buffer)
        self.planet.eval()

        while True:
            self.global_step += 1
            self.buffer.populate(1)
            rewards_steps = self.exp_source.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                tb_tracker.track("episode_steps", steps[0], self.global_step)
                self.tracker.reward(rewards[0], self.global_step)
                break


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument('--configs', nargs='+', default=['defaults'])
    parser.add_argument("-n", "--name", default='pendulum', help="Name of the run")
    args, remaining = parser.parse_known_args()
    device = common.select_device(args=args)

    # parser.add_argument('--configs', nargs='+', required=True)
    # Comment the line above and comment out the line below if you want to debug in IDE like PyCharm
    # Update from configs.yaml
    configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'config/pendulum_configs.yaml').read_text())
    default_params = dict()
    for name in args.configs:
        default_params.update(configs[name])
    # Update from cli
    for key, value in default_params.items():
        parser.add_argument('--' + key, type=type(value), default=value)
    args = parser.parse_args(remaining)
    params = vars(args)
    params['min_reward'] = params['min_reward'] * params['action_repeat']

    trainer = Trainer(params, device)
    trainer.load_trainer()
    trainer.collect_seed_episodes()
    trainer.train()
