
import gymnasium as gym
import ptan
import argparse
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
import multiprocessing
from PIL import ImageDraw

import time
import yaml
import pathlib
import sys
import os
from copy import deepcopy
import wandb
import torchvision

from lib import model, common

class Trainer:

    def __init__(self, params, device):
        self.device = device
        self.log_every = 20 # 统计指标的间隔
        self.frame_idx = 0
        self.train_count = 0
        self.pre_train = False

        self.save_path = os.path.join("saves", "ppo_lstm_rnd", params['name'])
        os.makedirs(self.save_path, exist_ok=True)
        wandb.require(experiment='ppo_lstm_rnd_' + self.params['name'])
        wandb.init(config=self.params, project=self.params['name'], group=self.params['group'], mode=self.params['wandb'])

        self.params = dict(wandb.config)
        self.logger = common.setup_logger(self.save_path)

        self.build_env()
        self.build_model()
        if self.pre_train:
            self.build_predict_buffer()
        else:
            self.build_random_buffer()

        # metrics that won't be summarized, the last value will be used instead
        # 这些指标在训练过程中不会被汇总，而是直接使用最后一个值
        except_keys = ['buffer/size', 'buffer/total_reward', 'buffer/num_episodes']
        # 这里应该是设置了一个指标汇总器，用于收集和汇总训练过程中的指标
        self.summarizer = common.MetricsSummarizer(except_keys=except_keys)
        self.last_eval = 0 # todo
        self.total_eval_time = 0 # todo


    def build_env(self):
        self.env = common.wrap_dqn(self.params['env_name'])  
        self.obs_shape = self.env.observation_space.shape
        self.action_shape = self.env.action_space.n
        self.action_meanings = self.env.get_action_meanings()


    def build_model(self):
        self.agent = model.Agent(self.params, self.env.action_space.n).to(self.device)


    def build_random_buffer(self):
        self.randomAgent = ptan.agent.EnvRandomSampleAgent(self.env, self.device)
        self.experence = ptan.experience.ExperienceSourceRAW(self.env, self.randomAgent, steps_count=1)
        self.replayBuffer = common.ExperienceRawReplayBuffer(self.experence, self.params['buffer_capacity'], device=self.device)

    
    def build_predict_buffer(self):
        self.twmAgent = model.TWMAgent(self.params, self.agent.wm, self.agent.ac, self.replayBuffer, self.device)
        self.experence = ptan.experience.ExperienceSourceRAW(self.env, self.twmAgent, steps_count=1)
        if self.replayBuffer is None:
            self.replayBuffer = common.ExperienceRawReplayBuffer(self.experence, self.params['buffer_capacity'], device=self.device)
        else:
            self.replayBuffer.set_experience_source(self.experence)

    
    def load_model(self):
        # 增加加载模型的代码
        if os.path.exists(self.save_path) and len(os.listdir(self.save_path)) > 0:
            # 增加加载模型的代码
            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(self.save_path)),
                                key=lambda x: int(x.split('_')[2].split('.')[0]))

            if len(checkpoints) > 0:
                checkpoint = torch.load(os.path.join(self.save_path, checkpoints[-1]), map_location=self.device, weights_only=False)

                self.frame_idx = checkpoint['frame_idx']
                self.train_count = checkpoint['train_count']
                self.agent.load_state_dict(checkpoint['agent'])
                self.params = checkpoint['params']
                self.pre_train = checkpoint['pre_train']

                print("加载模型成功")


    def save_model(self):
        checkpoint = {
            "params": self.params,
            "train_count": self.train_count,
            "frame_idx": self.frame_idx,
            "net": self.agent.state_dict(),
            "params": self.params,
            "pre_train" : self.pre_train
        }

        common.save_checkpoints(self.train_count, checkpoint, self.save_path, "twm", keep_last=5)
        print(f"Saved checkpoint to {self.save_path}")


    def __train(self, tracker):
        eval_metrics = self.eval_model(is_final=False)
        metrics.update(eval_metrics)
        self.summarizer.append(metrics)
        wandb.log(self.summarizer.summarize())

        budget = self.params['budget'] - self.params['pretrain_budget'] # 计算剩余的预算
        budget_per_step = 0
        budget_per_step += self.params['wm_train_steps'] * self.params['wm_batch_size'] * self.params['wm_sequence_length']
        budget_per_step += self.params['ac_batch_size'] * self.params['ac_horizon'] # 这里应该是计算每一步的预算
        num_batches = budget / budget_per_step # 计算总共需要多少个批次来完成训练
        train_every = (self.replayBuffer.capacity - self.params['buffer_prefill']) / num_batches # 这里确实是在训练剩余需要收集的数据量平均分配给每个批次，用于控制训练和收集的频率

        step_counter = 0
        while self.replayBuffer.size < self.replayBuffer.capacity:
            # collect data in real environment
            should_log = False
            while step_counter <= train_every and self.replayBuffer.size < self.replayBuffer.capacity:
                if self.replayBuffer.size - self.last_eval >= self.params['eval_every']:
                    metrics = self._evaluate(is_final=False)
                    common.update_metrics(metrics, self.replayBuffer.metrics(), prefix='buffer/')
                    self.summarizer.append(metrics)
                    wandb.log(self.summarizer.summarize())

                self.replayBuffer.populate(1)
                rewards_steps = self.experence.pop_rewards_steps()
                if rewards_steps:
                    rewards, steps = zip(*rewards_steps)
                    tracker.reward(np.mean(rewards), self.frame_idx)
                step_counter += 1
                self.frame_idx += 1

                if self.replayBuffer.size % self.log_every == 0:
                    should_log = True

            # train world model and actor-critic
            # 在这里训练世界模型和ac模型
            metrics_hist = []
            while step_counter >= train_every:
                step_counter -= train_every
                metrics = self._train_step()
                self.train_count += 1
                metrics_hist.append(metrics)

            self.save_model()
            metrics = common.mean_metrics(metrics_hist)
            common.update_metrics(metrics, self.replayBuffer.metrics(), prefix='buffer/')

            # evaluate
            if len(self.replayBuffer) - self.last_eval >= self.params['eval_every'] and \
                    len(self.replayBuffer) < self.replayBuffer.capacity:
                eval_metrics = self.eval_model(is_final=False)
                metrics.update(eval_metrics)
                should_log = True

            self.summarizer.append(metrics)
            if should_log:
                wandb.log(self.summarizer.summarize())

        # final evaluation
        metrics = self.eval_model(is_final=True)
        common.update_metrics(metrics, self.replayBuffer.metrics(), prefix='buffer/')
        self.summarizer.append(metrics)
        wandb.log(self.summarizer.summarize())
    

    def pre_train_model(self, tracker):
        # 这里使用的是随机策略来填充缓冲区
        for _ in range(self.params['buffer_prefill'] - 1):
            self.replayBuffer.populate(1)
            rewards_steps = self.experence.pop_rewards_steps()
            if rewards_steps:
                rewards, steps = zip(*rewards_steps)
                tracker.reward(np.mean(rewards), self.frame_idx)

            metrics = {}
            # 更新打印缓冲区的指标
            common.update_metrics(metrics, self.replayBuffer.metrics(), prefix='buffer/')
            self.summarizer.append(metrics)
            if len(self.replayBuffer) % self.log_every == 0:
                wandb.log(self.summarizer.summarize())
            self.frame_idx += 1

        self.replayBuffer.populate(1)
        self.frame_idx += 1
        metrics = {}
        # 更新打印缓冲区的指标
        common.update_metrics(metrics, self.replayBuffer.metrics(), prefix='buffer/')
        config = self.params
        agent = self.agent
        device = next(agent.parameters()).device
        wm = agent.wm
        obs_model = wm.obs_model
        ac = agent.ac
        replay_buffer = self.replayBuffer

        # if self.pre_train:
        #     print("模型已经预训练过了，跳过预训练")
        #     return

        # pretrain observation model
        # 这里是计算总共要采样的样本数量
        wm_total_batch_size = config['wm_batch_size'] * config['wm_sequence_length']
        # 这里是用于控制器预训练的计算量，投入多少个样本
        budget = config['pretrain_budget'] * config['pretrain_obs_p']
        # 这里的预训练主要是用于预训练观察模型
        # 这里训练的数据是不连续的
        while budget > 0:
            # 随机打乱replay_buffer的索引 可能输出：tensor([3, 1, 4, 2, 0])
            indices = torch.randperm(len(replay_buffer), device=replay_buffer.device)
            while len(indices) > 0 and budget > 0:
                idx = indices[:wm_total_batch_size] # 获取前wm_total_batch_size个索引，shape is [wm_total_batch_size]
                indices = indices[wm_total_batch_size:] # 删除前wm_total_batch_size个索引
                o = replay_buffer.get_obs(idx) # 根据索引获取观察数据 shape [wm_total_batch_size, frame_stack, h, w, c] 或者 [1 + prefix + wm_total_batch_size, frame_stack, h, w, c]
                o = torch.tensor(o, device=self.device)
                _ = wm.optimize_pretrain_obs(o.unsqueeze(1)) # o shape is [wm_total_batch_size, 1, frame_stack, h, w, c] 或者 [1 + prefix + wm_total_batch_size, 1, frame_stack, h, w, c] 进行预训练
                budget -= idx.numel() # 计算已经使用的观察数据量，减去budget，用于更新还剩多少个样本用于训练
            self.save_model()

        # encode all observations once, since the encoder does not change anymore
        # 创建一个索引张量，范围从0到replay_buffer.size-1 shape is [replay_buffer.size]
        indices = np.arange(len(replay_buffer), dtype=torch.long, device=self.device)
        # 根据索引获取观察数据，prefix=1表示使用前缀长度为1的观察数据
        # indices.unsqueeze(0): shape is [1, replay_buffer.size]
        # 这里的prefix=1表示使用前缀长度为1的观察数据，返回的o shape is [1, 1 + 1 + replay_buffer.size + 1, frame_stack, h, w, c]
        o = replay_buffer.get_obs(indices.unsqueeze(0), prefix=1, return_next=True)[0]  # 1 for context
        o = torch.tensor(o, device=self.device)
        o = o.squeeze(0).unsqueeze(1) # shape is [1 + 1 + replay_buffer.size + 1, 1, frame_stack, h, w, c]，这里的增加的第二个维度就是时间time的维度，1步
        with torch.no_grad():
            z_dist = obs_model.eval().encode(o) # 获取提取出来的z分布，其采样分布 shape is [1 + 1 + replay_buffer.size + 1, 1(time), z_categoricals, z_categories]

        # pretrain dynamics model
        # 预训练动态模型
        budget = config['pretrain_budget'] * config['pretrain_dyn_p']
        while budget > 0:
            for idx in common.generate_uniform_indices(
                    config['wm_batch_size'], config['wm_sequence_length'], extra=2):  # 2 for context + next
                # idx表示用于训练的起始索引位置 shape 应该是 （1, sequence_length + extra)，z是环境分布采样的数据，logits是环境特征提取后的分布
                z, logits = obs_model.sample_z(z_dist, idx=idx.flatten(), return_logits=True) # 获取采样后的 z和logits shape (batch_size（sequence_length + extra）, 1(time), z_categoricals * z_categories)
                # x.squeeze(1): (batch_size, z_categoricals * z_categories)
                # x.squeeze(1).unflatten(0, idx.shape)：（1，sequence_length + extra， z_categoricals * z_categories）= z和logits
                z, logits = [x.squeeze(1).unflatten(0, idx.shape) for x in (z, logits)]
                z = z[:, :-1] # z shape is （1，sequence_length + extra - 1， z_categoricals * z_categories）
                target_logits = logits[:, 2:] # target_logits: （1，-2 + sequence_length + extra， z_categoricals * z_categories）
                idx = idx[:, :-2] # idx shape is (1, sequence_length + extra - 2)
                # todo 跟进下这边获取的shape具体流程
                _, a, r, terminated, truncated, _ = replay_buffer.get_data(idx, device=device, prefix=1)
                # 优化预训练动态模型
                _ = wm.optimize_pretrain_dyn(z, a, r, terminated, truncated, target_logits)
                budget -= idx.numel()
                if budget <= 0:
                    break
            self.save_model()

        # pretrain ac
        # 这里应该就是在预训练actor-critic模型
        budget = config['pretrain_budget'] * (1 - config['pretrain_obs_p'] + config['pretrain_dyn_p'])
        while budget > 0:
            for idx in common.generate_uniform_indices(
                    config['ac_batch_size'], config['ac_horizon'], extra=2):  # 2 for context + next
                # idx表示用于训练的起始索引位置 shape 应该是 （batch_size, sequence_length + extra)，z是环境分布采样的数据
                z = obs_model.sample_z(z_dist, idx=idx.flatten()) # z shape is (batch_size (sequence_length + extra), 1(time), z_categoricals * z_categories) 
                z = z.squeeze(1).unflatten(0, idx.shape) # z shape is (1, sequence_length + extra, z_categoricals * z_categories)
                idx = idx[:, :-2]# （batch_size, sequence_length + extra - 2)
                # 获取动作、奖励、终止和截断标志
                # a shape is (batch_size, sequence_length + extra - 1, num_actions)
                # r shape is (batch_size, sequence_length + extra - 1, 1)
                # terminated shape is (batch_size, sequence_length + extra - 1)
                # truncated shape is (batch_size, sequence_length + extra - 1)
                _, a, r, terminated, truncated, _ = replay_buffer.get_data(idx, device=device, prefix=1)
                # d shape is (batch_size, sequence_length + extra - 1)
                d = torch.logical_or(terminated, truncated)
                if config['ac_input_h']:
                    # 获取一个折扣因子的矩阵，如果有位置被终止或截断，则该位置的折扣因子为0
                    g = wm.to_discounts(terminated)
                    # ac_horizon表示向前预测的步数
                    tgt_length = config['ac_horizon'] + 1
                    with torch.no_grad():
                        # 利用动态模型预测下一个状态的隐藏状态h shape is (batch_size, tgt_length(- 1 + sequence_length + extra), dyn_embed_dim)
                        _, h, _ = wm.dyn_model.eval().predict(z[:, :-1], a, r, g, d[:, :-1], tgt_length)
                else:
                    h = None
                # 获取奖励和折扣因子
                # g shape is (batch_size, sequence_length + extra - 1)
                g = wm.to_discounts(d)
                # z shape is (batch_size, - 1 + sequence_length + extra , z_categoricals * z_categories)
                # r shape is (batch_size, - 1 + sequence_length + extra , 1)
                # g shape is (batch_size, - 1 + sequence_length + extra - 1)
                # d shape is (batch_size, - 1 + sequence_length + extra - 1)
                z, r, g, d = [x[:, 1:] for x in (z, r, g, d)]
                _ = ac.optimize_pretrain(z, h, r, g, d)
                budget -= idx.numel()
                if budget <= 0:
                    break
            self.save_model()
        
        # 完成预训练后，将动作价值网络完全同步一次到目标网络
        ac.sync_target()
        self.pre_train = True


    def _train_step(self):
        config = self.params
        agent = self.agent
        device = next(agent.parameters()).device
        wm = agent.wm
        ac = agent.ac
        replay_buffer = self.replayBuffer

        # train wm
        for _ in range(config['wm_train_steps']):
            metrics_i = {}
            idx = replay_buffer.sample_indices(config['wm_batch_size'], config['wm_sequence_length']) # idx shape is wm_batch_size, wm_sequence_length 得到采样的索引
            '''
            obs shape is [wm_total_batch_size, wm_sequence_length, h, w, c]
            actions shape is [wm_total_batch_size, wm_sequence_length]
            rewards shape is [wm_total_batch_size, wm_sequence_length]
            terminated shape is [wm_total_batch_size, wm_sequence_length]
            truncated shape is [wm_total_batch_size, wm_sequence_length]
            timesteps shape is [wm_total_batch_size, wm_sequence_length]
            '''
            o, a, r, terminated, truncated, _ = \
                replay_buffer.get_data(idx, device=device, prefix=1, return_next_obs=True)  # 1 for context

            o = torch.tensor(o, device=device)  # o shape is [wm_total_batch_size, wm_sequence_length, h, w, c]
            a = torch.tensor(a, device=device)  # a shape is [wm_total_batch
            r = torch.tensor(r, device=device)  # r shape is [wm_total_batch_size, wm_sequence_length]
            terminated = torch.tensor(terminated, device=device)  # terminated shape is [wm_total
            truncated = torch.tensor(truncated, device=device)
            '''
            这里开始训练世界模型和预测模型

            z shape is (batch_size, tgt_length / num_modalities - 1, z_categoricals * z_categories)
            h shape is (batch_size, tgt_length / num_modalities, h_dim)
            '''
            z, h, met = wm.optimize(o, a, r, terminated, truncated)
            common.update_metrics(metrics_i, met, prefix='wm/')

            o, a, r, terminated, truncated = [x[:, :-1] for x in (o, a, r, terminated, truncated)]
            '''
            o shape is [wm_total_batch_size, wm_sequence_length - 1, h, w, c]
            a shape is [wm_total_batch_size, wm_sequence_length - 1]
            r shape is [wm_total_batch_size, wm_sequence_length - 1]
            terminated shape is [wm_total_batch_size, wm_sequence_length - 1]
            truncated shape is [wm_total_batch_size, wm_sequence_length - 1]
            '''

        metrics = metrics_i  # only use last metrics

        # train actor-critic 这里就是开始训练动作价值网络
        # 以下应该就是利用滑动窗口，将环境分为每2个时间步的样本后，再对应上环境状态变化后的奖励，执行的动作，中断，结束状态
        # todo 调试验证这边的shape是否正确
        create_start = lambda x, size: common.windows(x, size).flatten(0, 1)
        start_z = create_start(z, 2) # start_z shape is (batch_size * num_windows, z_categoricals * z_categories)
        start_a = create_start(a, 1) # start_a shape is (batch_size * num_windows, num_actions)
        start_r = create_start(r, 1) # start_r shape is (batch_size * num_windows, 1)
        start_terminated = create_start(terminated, 1) # start_terminated shape is (batch_size * num_windows, 1)
        start_truncated = create_start(truncated, 1) # start_truncated shape is (batch_size * num_windows, 1)

        # 随机选择config['ac_batch_size']个样本进行训练
        idx = common.random_choice(start_z.shape[0], config['ac_batch_size'], device=start_z.device)
        start_z, start_a, start_r, start_terminated, start_truncated = \
            [x[idx] for x in (start_z, start_a, start_r, start_terminated, start_truncated)]
        '''
        start_z shape is (ac_batch_size, z_categoricals * z_categories)
        start_a shape is (ac_batch_size, num_actions)
        start_r shape is (ac_batch_size, 1)
        start_terminated shape is (ac_batch_size, 1)
        start_truncated shape is (ac_batch_size, 1)
        '''

        dreamer = model.Dreamer(config, wm, mode='imagine', ac=ac, store_data=True,
                          start_z_sampler=self._create_start_z_sampler(temperature=1))
        dreamer.imagine_reset(start_z, start_a, start_r, start_terminated, start_truncated)
        for _ in range(config['ac_horizon']):
            a = dreamer.act() # 这里随机选择动作，但是不进行反向传说
            dreamer.imagine_step(a) # 这里面累计每一次的想象，仅根据动作和初始的状态
        # 获取想象轨迹结果
        z, o, h, a, r, g, d, weights = dreamer.get_data()
        if config['wm_discount_threshold'] == 0:
            d = None  # save some computation, since all dones are False in this case
        # 这里训练AC，动作策略网络不再真实的观察中测试，而是在想象的轨迹中吗？todo
        ac_metrics = ac.optimize(z, h, a, r, g, d, weights)
        common.update_metrics(metrics, ac_metrics, prefix='ac/')

        return metrics

    
    def train_model(self):
        with common.RewardTracker(stop_reward=99999) as tracker:
            self.pre_train_model(tracker)
            self.build_predict_buffer()
            self.__train(tracker)


    @torch.no_grad()
    def eval_model(self, is_final):
        '''
        is_final: 是否是最终评估，从这里看true或者false的区别仅仅只是dreamer的想象长度
        '''
        start_time = time.time()
        config = self.params
        agent = self.agent
        device = next(agent.parameters()).device
        wm = agent.wm
        ac = agent.ac
        replay_buffer = self.replayBuffer

        metrics = {}
        metrics['buffer/visits'] = replay_buffer.visit_histogram()
        metrics['buffer/sample_probs'] = replay_buffer.sample_probs_histogram()
        # 验证世界模型的想象效果，用真实图片
        recon_img, imagine_img = self._create_eval_images(is_final)
        metrics['eval/recons'] = wandb.Image(recon_img)
        if imagine_img is not None:
            metrics['eval/imagine'] = wandb.Image(imagine_img)

        # similar to evaluation proposed in https://arxiv.org/pdf/2007.05929.pdf (SPR) section 4.1
        # 创建多个环境用于评估，但是采用的是向量环境
        num_episodes = config['final_eval_episodes'] if is_final else config['eval_episodes']
        num_envs = max(min(num_episodes, int(multiprocessing.cpu_count() * config['cpu_p'])), 1)
        eval_env = common.create_vector_env(num_envs, common.wrap_dqn(config['env_name'], eval=True))

        seed = ((config['seed'] + 13) * 7919 + 13) if config['seed'] is not None else None
        start_obs, _ = eval_env.reset(seed=seed)
        start_obs = common.preprocess_atari_obs(start_obs, device).unsqueeze(1)

        dreamer = model.Dreamer(config, wm, mode='observe', ac=ac, store_data=False)
        # 这里是观察重置，主要是将初始的观察数据传入到dreamer中，唯一不同的地方就是单条数据
        dreamer.observe_reset_single(start_obs)

        scores = []
        current_scores = np.zeros(num_envs)
        finished = np.zeros(num_envs, dtype=bool) #是否结束
        num_truncated = 0
        while len(scores) < num_episodes:
            a = dreamer.act() # 想象模型得到的动作
            o, r, terminated, truncated, infos = eval_env.step(a.squeeze(1).cpu().numpy()) # 执行动作

            not_finished = ~finished
            current_scores[not_finished] += r[not_finished] # 记录分数，如果结束则不再记录，因为时向量环境，所以如果还有没有结束的会继续记录
            lives = infos['lives']
            for i in range(num_envs):
                if not finished[i]:
                    if truncated[i]:
                        num_truncated += 1
                        finished[i] = True
                    elif terminated[i] and lives[i] == 0:
                        finished[i] = True

            # 将真实的观察数据传入到dreamer中
            o = common.preprocess_atari_obs(o, device).unsqueeze(1)
            r = torch.as_tensor(r, dtype=torch.float, device=device).unsqueeze(1)
            terminated = torch.as_tensor(terminated, device=device).unsqueeze(1)
            truncated = torch.as_tensor(truncated, device=device).unsqueeze(1)
            # 想象模型根据真实的观察数据执行一步
            z, h, _, d, _ = dreamer.observe_step(a, o, r, terminated, truncated)

            if np.all(finished):
                # 仅当所有的环境都结束时才重置
                # only reset if all environments are finished to remove bias for shorter episodes
                scores.extend(current_scores.tolist())
                num_scores = len(scores)
                # 如果已经收集到足够的分数，则截断，过早的分数不纳入统计
                if num_scores >= num_episodes:
                    if num_scores > num_episodes:
                        scores = scores[:num_episodes]  # unbiased, just pick first
                    break
                current_scores[:] = 0
                finished[:] = False
                if seed is not None:
                    seed = seed * 3 + 13 + num_envs
                start_o, _ = eval_env.reset(seed=seed, options={'force': True})
                start_o = common.preprocess_atari_obs(start_o, device).unsqueeze(1)
                dreamer = model.Dreamer(config, wm, mode='observe', ac=ac, store_data=False)
                # 想象模型也要重制起始状态
                dreamer.observe_reset_single(start_o)
        eval_env.close(terminate=True)
        if num_truncated > 0:
            print(f'{num_truncated} episode(s) truncated')

        # 以下就是记录各种指标
        score_mean = np.mean(scores)
        score_metrics = {
            'score_mean': score_mean,
            'score_std': np.std(scores),
            'score_median': np.median(scores),
            'score_min': np.min(scores),
            'score_max': np.max(scores),
        }
        metrics.update({f'eval/{key}': value for key, value in score_metrics.items()})
        if is_final:
            metrics.update({f'eval/final_{key}': value for key, value in score_metrics.items()})

        end_time = time.time()
        eval_time = end_time - start_time

        self.total_eval_time += eval_time
        metrics['eval/total_time'] = self.total_eval_time

        self.last_eval = replay_buffer.size
        return metrics
    

    def print_stats(self):
        count_params = lambda module: sum(p.numel() for p in module.parameters() if p.requires_grad)
        agent = self.agent
        wm = agent.wm
        ac = agent.ac
        print('# Parameters')
        print('Observation model:', count_params(wm.obs_model))
        print('Dynamics model:', count_params(wm.dyn_model))
        print('Actor:', count_params(ac.actor_model))
        print('Critic:', count_params(ac.critic_model))
        print('World model:', count_params(wm))
        print('Actor-critic:', count_params(ac))
        print('Observation encoder + actor:', count_params(wm.obs_model.encoder) + count_params(ac.actor_model))
        print('Total:', count_params(agent))


    @torch.no_grad()
    def _create_eval_images(self, is_final=False):
        config = self.config
        agent = self.agent
        replay_buffer = self.replayBuffer
        obs_model = agent.wm.obs_model.eval()

        # recon_img
        # 随机从缓冲区采样得到
        idx = common.random_choice(replay_buffer.size, 10, device=replay_buffer.device).unsqueeze(1)
        o = replay_buffer.get_obs(idx)
        z = obs_model.encode_sample(o, temperature=0)
        recons = obs_model.decode(z)
        # use last frame of frame stack
        o = o[:, :, -1:]
        recons = recons[:, :, -1:]
        # 以下处理彩色观察还是会读观察
        if config['env_grayscale']:
            recon_img = [o.unsqueeze(-3), recons.unsqueeze(-3)]  # unsqueeze channel
        else:
            recon_img = [o.permute(0, 1, 2, 5, 3, 4), recons.permute(0, 1, 2, 5, 3, 4)]
        recon_img = torch.cat(recon_img, dim=0).squeeze(1).transpose(0, 1).flatten(0, 1)
        recon_img = torchvision.common.make_grid(recon_img, nrow=o.shape[0], padding=2)
        recon_img = common.to_image(recon_img)

        # imagine_img
        # 仅提取前5个索引
        idx = idx[:5]
        o, a, r, terminated, truncated, _ = replay_buffer.get_data(idx, device=self.device, prefix=1)
        start_o = torch.tensor(o, device=self.device)  # 1 for context
        start_a = torch.tensor(a, device=self.device)[:, :-1]
        start_r = torch.tensor(r, device=self.device)[:, :-1]
        start_terminated = torch.tensor(terminated, device=self.device)[:, :-1]
        start_truncated = torch.tensor(truncated, device=self.device)[:, :-1]
        start_z = obs_model.encode_sample(start_o, temperature=0)

        # 这边应该是想象的长度
        horizon = 100 if is_final else config['wm_sequence_length']
        dreamer = model.Dreamer(config, agent.wm, mode='imagine', ac=agent.ac, store_data=True,
                          start_z_sampler=self._create_start_z_sampler(temperature=0), always_compute_obs=True)
        dreamer.imagine_reset(start_z, start_a, start_r, start_terminated, start_truncated, keep_start_data=True)
        for _ in range(horizon):
            a = dreamer.act()
            dreamer.imagine_step(a, temperature=1)
        z, o, _, a, r, g, d, weights = dreamer.get_data()

        o = o[:, :-1, -1:]  # remove last time step and use last frame of frame stack
        a, r, g, weights = [x.cpu().numpy() for x in (a, r, g, weights)]

        imagine_img = o
        if config['env_grayscale']:
            imagine_img = imagine_img.unsqueeze(3)
        else:
            imagine_img = imagine_img.permute(0, 1, 2, 5, 3, 4)
        imagine_img = imagine_img.unsqueeze(1)
        imagine_img = imagine_img.transpose(2, 3).flatten(0, 3)
        pad = 2
        extra_pad = 38
        imagine_img = common.make_grid(imagine_img, nrow=o.shape[1], padding=(pad + extra_pad, pad))
        imagine_img = common.to_image(imagine_img[:, extra_pad:])

        # 这边会绘制世界模型想象的游戏画面
        draw = ImageDraw.Draw(imagine_img)
        h, w = o.shape[3:5]
        for t in range(r.shape[1]):
            for i in range(r.shape[0]):
                x = pad + t * (w + pad)
                y = pad + i * (h + extra_pad + pad) + h
                weight = weights[i, t]
                reward = r[i, t]

                if abs(reward) < 1e-4:
                    color_rgb = int(weight * 255)
                    color = (color_rgb, color_rgb, color_rgb)  # white
                elif reward > 0:
                    color_rb = int(weight * 100)
                    color_g = int(weight * (255 + reward * 255) / 2)
                    color = (color_rb, color_g, color_rb)  # green
                else:
                    color_gb = int(weight * 80)
                    color_r = int(weight * (255 + (-reward) * 255) / 2)
                    color = (color_r, color_gb, color_gb)  # red
                draw.text((x + 2, y + 2), f'a: {self.action_meanings[a[i, t]][:7]: >7}', fill=color)
                draw.text((x + 2, y + 2 + 10), f'r: {r[i, t]: .4f}', fill=color)
                draw.text((x + 2, y + 2 + 20), f'g: {g[i, t]: .4f}', fill=color)
        return recon_img, imagine_img