#!/usr/bin/env python3
'''
完成适配,纯卷积离散动作版本

参考链接：
1. https://github.com/danijar/dreamerv3
2. https://github.com/NM512/dreamerv3-torch?tab=readme-ov-file（推荐参考这个）
3. https://github.com/nyuolab/VIPER-torch
4. https://github.com/sven1977/dreamer_v3
5. https://github.com/burchim/DreamerV3-PyTorch
6. https://github.com/ray-project/ray/blob/master/rllib/algorithms/dreamerv3/dreamerv3.py(这里面还有其他的算法，看看能否用)


todo 继续拆解这个dreamv3算法，不同的组合应对不同的游戏

训练记录：
在腾讯云上训练
20250512: 训练分数-7.556，测试分数未生成
20250513: 训练分数-7.4，测试分数未生成
训练太慢，暂停训练，提高训练效率
'''
import os
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm

from lib import model, common, config

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)

dreamer_config = config.Config()
scales_config = dict(
        reward=dreamer_config.reward_head["loss_scale"],
        cont=dreamer_config.cont_head["loss_scale"],
)

def preprocess_obs(obs, device):
    # 1. 转换为tensor
    obs['image'] = torch.tensor(obs['image'], device=device).float().unsqueeze(0)
    # 2. 归一化
    obs['image'] = obs['image'] / 255.0
    return obs

@torch.no_grad()
def test_net(rssm_model, obs_encoder_model, action_model, env, dreamer_config, count=10, device="cpu"):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    for _ in range(count):
        done = True
        latent = None
        action = None
        obs, _ = env.reset()
        while True:
            obs = preprocess_obs(obs, device)
            embed = obs_encoder_model(obs)    
            latent, _ = rssm_model.obs_step(latent, action, embed, obs["is_first"])
            if dreamer_config.eval_state_mean:
                latent["stoch"] = latent["mean"]
            feat = rssm_model.get_feat(latent)
            actor, _ = action_model(feat)
            action = actor.model()
            logprob = actor.log_prob(action)
            latent = {k: v.detach() for k, v in latent.items()}
            action = action.detach()
            if dreamer_config.actor["dist"] == "onehot_gumble":
                action = torch.one_hot(
                    torch.argmax(action, dim=-1), dreamer_config.num_actions
                )
        
            # 然后执行动作得到下一个
            obs, reward, done, truncated, _ = env.step(action.squeeze(0).cpu().numpy())
            rewards += reward
            steps += 1
            if done or truncated:
                break
    return rewards / count, steps / count


@torch.no_grad()
def save_every(rssm_model, obs_encoder_model, obs_decoder_model, action_model, device, save_path, test_env, checkpoints, best_reward, frame_idx, writer):
    # 测试并保存最好测试结果的庶数据
    ts = time.time()
    rssm_model.eval()
    obs_encoder_model.eval()
    obs_decoder_model.eval()
    action_model.eval()
    rewards, steps = test_net(rssm_model, obs_encoder_model, action_model, env=test_env, count=10, device=device)
    rssm_model.train()
    obs_encoder_model.train()
    obs_decoder_model.train()
    action_model.train()
    print("Test done in %.2f sec, reward %.3f, steps %d" % (
        time.time() - ts, rewards, steps))
    writer.add_scalar("test_reward", rewards, frame_idx)
    writer.add_scalar("test_steps", steps, frame_idx)
    if best_reward is None or best_reward < rewards:
        if best_reward is not None:
            print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
        best_reward = rewards
    common.save_best_model(rewards, checkpoints, save_path, 'dreamer_v3_best')

    return best_reward



def train_world_model(buffer, obs_encoder, obs_decoder, rssm_model, reward_model, continue_model, dreamer_config, world_opt, device):
    obs, actions, nonterms, discount, fisrsts = buffer
    # 处理完成后，data还是一个字典类型，key时action\image\discount\cont\is_first\is_terminal这些

    obs = common.preprocess(obs, device)
    # 暂时不开启混合精度计算
    # 是 PyTorch 自动混合精度（Automatic Mixed Precision, AMP）的一部分，用于在训练深度学习模型时自动选择适当的数值精度（如 FP16 或 FP32），以提高计算效率并减少显存占用。
    # with torch.amp.autocast('cuda', self._use_amp):
    # embed shape = (batch_size, batch_length, embed_size)
    embed = obs_encoder(obs)
    # 获取先验和后验概率
    post, prior = rssm_model.observe(
        embed, actions['action'], obs['is_first']
    )
    kl_free = dreamer_config.kl_free
    dyn_scale = dreamer_config.dyn_scale
    rep_scale = dreamer_config.rep_scale
    # 计算先验和后验的KL散度
    kl_loss, kl_value, dyn_loss, rep_loss = rssm_model.kl_loss(
        post, prior, kl_free, dyn_scale, rep_scale
    )
    assert kl_loss.shape == embed.shape[:2], kl_loss.shape
    preds = {}
    # 遍历每一个预测头
    for name, head in {"decoder": obs_decoder, "reward": reward_model, "cont": continue_model}.items():
        grad_head = name in dreamer_config.grad_heads
        # 把后验状态中的随机状态和确定性状态拼接起来
        # 后验状态中的deter就是先验的deter
        # 而stoch则是结合实际状态编码和先验deter预测得到的动作分布
        feat = rssm_model.get_feat(post)
        # todo grad_head是什么，从配置可知：'decoder', 'reward', 'cont'都是True
        feat = feat if grad_head else feat.detach()
        # 根据不同的预测头，获取不同的预测结果，比如图像、奖励、是否继续等
        # 根据后验特征获取每一步预测的结果
        pred = head(feat)
        if type(pred) is dict:
            preds.update(pred)
        else:
            preds[name] = pred
    losses = {}
    for name, pred in preds.items():
        loss = -pred.log_prob(obs[name])
        assert loss.shape == embed.shape[:2], (name, loss.shape)
        losses[name] = loss
    scaled = {
        key: value * scales_config.get(key, 1.0)
        for key, value in losses.items()
    }
    model_loss = sum(scaled.values()) + kl_loss
    world_opt(torch.mean(model_loss))

    # metrics.update({f"{name}_loss": to_np(loss) for name, loss in losses.items()})
    # metrics["kl_free"] = kl_free
    # metrics["dyn_scale"] = dyn_scale
    # metrics["rep_scale"] = rep_scale
    # metrics["dyn_loss"] = to_np(dyn_loss)
    # metrics["rep_loss"] = to_np(rep_loss)
    # metrics["kl"] = to_np(torch.mean(kl_value))
    # with torch.cuda.amp.autocast(self._use_amp):
    #     metrics["prior_ent"] = to_np(
    #         torch.mean(self.dynamics.get_dist(prior).entropy())
    #     )
    #     metrics["post_ent"] = to_np(
    #         torch.mean(self.dynamics.get_dist(post).entropy())
    #     )
    context = dict(
        embed=embed,
        feat=rssm_model.get_feat(post),
        kl=kl_value,
        postent=rssm_model.get_dist(post).entropy(),
    )
    post = {k: v.detach() for k, v in post.items()}
    # 后验状态，一些上下文信息，一些度量信息
    return post, context#, metrics


def _imagine(start, policy, horizon, rssm_model):
        '''
        动作想象
        param start: 后验状态
        param policy: 动作策略
        param horizon: todo 这个参数是什么，好像传入的是一个常量
        '''
        # rssm模型
        flatten = lambda x: x.reshape([-1] + list(x.shape[2:]))
        # 展品后验状态的前两个维度，也就是batch_size和batch_length
        start = {k: flatten(v) for k, v in start.items()}

        def step(prev, _):
            '''
            prev：后验状态 (start, None, None)
            '''
            state, _, _ = prev
            # 提取特征（结合随机性状态和确定性状态）
            feat = rssm_model.get_feat(state)
            inp = feat.detach()
            # 预测执行的动作
            action = policy(inp).sample()
            # 在这里作为上一个状态和上一个动作，预测到先验状态
            succ = rssm_model.img_step(state, action)
            return succ, feat, action

        # 遍历每一个时间步，获取每一个时间步的特征、状态和动作
        # 得到预测的先验状态、特征（结合随机性状态和确定性状态）和动作
        succ, feats, actions = common.static_scan(
            step, [torch.arange(horizon)], (start, None, None)
        )
        # 这边好像是将后验状态和预测的先验状态拼接起来
        # todo 这里拼接的形状
        states = {k: torch.cat([start[k][None], v[:-1]], 0) for k, v in succ.items()}

        # 得到预测的先验状态、特征（结合随机性状态和确定性状态），预测的先验状态和传入的后验状态拼接，预测的动作
        return feats, states, actions


def _compute_target(imag_feat, imag_state, reward, rssm_model, continue_model, value_model, dreamer_config):
        # 又是获取随机状态和确定性状态的组合
        inp = rssm_model.get_feat(imag_state)
        # 预测折扣
        discount = dreamer_config.discount * continue_model(inp).mean
        # 预测价值
        value = value_model(imag_feat).mode()
        target = common.lambda_return(
            reward[1:],
            value[:-1],
            discount[1:],
            bootstrap=value[-1],
            lambda_=dreamer_config.discount_lambda,
            axis=0,
        )
        # 计算折扣因子
        weights = torch.cumprod(
            torch.cat([torch.ones_like(discount[:1]), discount[:-1]], 0), 0
        ).detach()
        return target, weights, value[:-1]


def _compute_actor_loss(
        imag_feat,
        imag_action,
        target,
        weights,
        base,
        actor_model,
        value_model,
        dreamer_config,
        reward_ema,
        ema_vals
    ):
        metrics = {}
        inp = imag_feat.detach()
        policy = actor_model(inp)
        # Q-val for actor is not transformed using symlog
        # target = torch.stack(target, dim=1)
        if dreamer_config.reward_EMA:
            offset, scale = reward_ema(target, ema_vals)
            normed_target = (target - offset) / scale
            normed_base = (base - offset) / scale
            adv = normed_target - normed_base
            metrics.update(common.tensorstats(normed_target, "normed_target"))
            metrics["EMA_005"] = common.to_np(ema_vals[0])
            metrics["EMA_095"] = common.to_np(ema_vals[1])

        if dreamer_config.imag_gradient == "dynamics":
            actor_target = adv
        elif dreamer_config.imag_gradient == "reinforce":
            actor_target = (
                policy.log_prob(imag_action)[:-1][:, :, None]
                * (target - value_model(imag_feat[:-1]).mode()).detach()
            )
        elif dreamer_config.imag_gradient == "both":
            actor_target = (
                policy.log_prob(imag_action)[:-1][:, :, None]
                * (target - value_model(imag_feat[:-1]).mode()).detach()
            )
            mix = dreamer_config.imag_gradient_mix
            actor_target = mix * target + (1 - mix) * actor_target
            metrics["imag_gradient_mix"] = mix
        else:
            raise NotImplementedError(dreamer_config.imag_gradient)
        actor_loss = -weights[:-1] * actor_target
        return actor_loss, metrics


def train_actor_critc(start, objective, world_list, actor_list, value_list, target_value_model, dreamer_config, actor_opt, value_opt, use_amp, ema_vals, device):
    '''
    start: 后验状态
    objective: 奖励预测函数
    '''

    # with tools.RequiresGrad(self.actor):
    obs_encoder, rssm_model, reward_model, obs_decoder, continue_model =  world_list 
    actor_model =  actor_list[0]
    value_model = value_list[0]
    with common.FreezeParameters(world_list + value_list):
        with torch.amp.autocast(device.type, enabled=use_amp):
            # self._config.imag_horizon参数的作用
            imag_feat, imag_state, imag_action = _imagine(
                start, actor_model, dreamer_config.imag_horizon, rssm_model=rssm_model
            )
            # 将预测的动作和各种状态传入奖励预测函数
            reward = objective(imag_feat, imag_state, imag_action)
            # 传入相同的确定性状态和随机状态的结合预测动作的熵
            actor_ent = actor_model(imag_feat).entropy()
            # todo 这里好像并没有将state_ent纳入计算，这里也是获取动作的分布
            state_ent = rssm_model.get_dist(imag_state).entropy()
            # this target is not scaled by ema or sym_log.
            target, weights, base = _compute_target(
                imag_feat, imag_state, reward, rssm_model, continue_model, value_model, dreamer_config
            )
            actor_loss, mets = _compute_actor_loss(
                imag_feat, imag_action, target, weights, base, actor_model, value_model, dreamer_config, reward_ema, ema_vals
            )
            actor_loss -= dreamer_config.actor["entropy"] * actor_ent[:-1, ..., None]
            actor_loss = torch.mean(actor_loss)
            # metrics.update(mets)
            value_input = imag_feat

    # with tools.RequiresGrad(self.value):
    with common.FreezeParameters(world_list + actor_list):
        with torch.amp.autocast(device.type, enabled=use_amp):
            value = value_model(value_input[:-1].detach())
            # target = torch.stack(target, dim=1)
            # (time, batch, 1), (time, batch, 1) -> (time, batch)
            value_loss = -value.log_prob(target.detach())
            slow_target = target_value_model(value_input[:-1].detach())
            if dreamer_config.critic["slow_target"]:
                value_loss -= value.log_prob(slow_target.mode().detach())
            # (time, batch, 1), (time, batch, 1) -> (1,)
            value_loss = torch.mean(weights[:-1] * value_loss[:, :, None])

    # with tools.RequiresGrad(self):
    with common.FreezeParameters(world_list):
        actor_opt(actor_loss)
        value_opt(value_loss)
    return imag_feat, imag_state, imag_action, weights#, metrics



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="Jamesbound", help="Name of the run")
    args = parser.parse_args()
    device = common.select_device(args=args)

    save_path = os.path.join("saves", "dreamer-v3-" + args.name)
    os.makedirs(save_path, exist_ok=True)
    save_path_buffer = os.path.join("saves", "dreamer-v3-" + args.name + "-buffer")
    os.makedirs(save_path_buffer, exist_ok=True)

    # todo 可以对比frameskip=4和0的区别
    env = common.wrap_dqn(gym.make('ALE/IceHockey-v5', render_mode='rgb_array', frameskip=1, repeat_action_probability=0.0), gray=dreamer_config.gray)
    test_env = common.wrap_dqn(gym.make('ALE/IceHockey-v5', render_mode='rgb_array', frameskip=1, repeat_action_probability=0.0))

    dreamer_config.action_size = env.action_space.shape[0]
    dreamer_config.obs_shape = env.observation_space.shape
    dreamer_config.device = device

    if dreamer_config.dyn_discrete:
        feat_size = dreamer_config.dyn_stoch * dreamer_config.dyn_discrete + dreamer_config.dyn_deter
    else:
        feat_size = dreamer_config.dyn_stoch + dreamer_config.dyn_deter

    # 构建动作网络和评价网络
    
    reward_model = model.DenseModel(
            feat_size,
            (255,) if dreamer_config.reward_head["dist"] == "symlog_disc" else (),
            dreamer_config.reward_head["layers"],
            dreamer_config.units,
            dreamer_config.act,
            dreamer_config.norm,
            dist=dreamer_config.reward_head["dist"],
            outscale=dreamer_config.reward_head["outscale"],
            device=dreamer_config.device,
            name="Reward",).to(device=device)
    continue_model = model.DenseModel(
            feat_size,
            (),
            dreamer_config.cont_head["layers"],
            dreamer_config.units,
            dreamer_config.act,
            dreamer_config.norm,
            dist="binary",
            outscale=dreamer_config.cont_head["outscale"],
            device=dreamer_config.device,
            name="Cont",).to(device=device)
    
    actor_model = model.DreamerActorModel(
        feat_size,
        (dreamer_config.action_size,),
        dreamer_config.actor["layers"],
        dreamer_config.units,
        dreamer_config.act,
        dreamer_config.norm,
        dreamer_config.actor["dist"],
        dreamer_config.actor["std"],
        dreamer_config.actor["min_std"],
        dreamer_config.actor["max_std"],
        absmax=1.0,
        temp=dreamer_config.actor["temp"],
        unimix_ratio=dreamer_config.actor["unimix_ratio"],
        outscale=dreamer_config.actor["outscale"],
        name="Actor").to(device=device)
    value_model = model.DreamerValueModel(
        feat_size,
        (255,) if dreamer_config.critic["dist"] == "symlog_disc" else (),
        dreamer_config.critic["layers"],
        dreamer_config.units,
        dreamer_config.act,
        dreamer_config.norm,
        dreamer_config.critic["dist"],
        outscale=dreamer_config.critic["outscale"],
        device=dreamer_config.device,
        name="Value",).to(device=device)
    target_value_model = ptan.agent.TargetNet(value_model)
    reward_ema = model.RewardEMA(device=dreamer_config.device)
    
            # 构建环境观察编码器和解码器（分像素空间和线性空间）
    obs_encoder = model.ObsEncoder(
        shapes=dreamer_config.obs_shape,
        act=torch.nn.SiLU,
        norm=True,
        cnn_depth=32,
        kernel_size=4,
        minres=4,
        ).to(device=device)
    obs_decoder = model.ObsDecoder(
        feat_size = feat_size,
        shapes = dreamer_config.obs_shape,
        act=torch.nn.SiLU,
        norm=True,
        cnn_depth=32,
        kernel_size=4,
        minres=4,
        cnn_sigmoid=dreamer_config.decoder['cnn_sigmoid'],
        image_dist=dreamer_config.decoder['image_dist'],
        outscale=dreamer_config.decoder['outscale'],
       ).to(device=device)
        
    rssm_model = model.RssmModel(
            stoch=dreamer_config.dyn_stoch,
            deter=dreamer_config.dyn_deter,
            hidden=dreamer_config.dyn_hidden,
            rec_depth=dreamer_config.dyn_rec_depth,
            discrete=dreamer_config.dyn_discrete,
            act=dreamer_config.act,
            norm=dreamer_config.norm,
            mean_act=dreamer_config.dyn_mean_act, 
            std_act=dreamer_config.dyn_std_act,
            min_std=dreamer_config.dyn_min_std,
            unimix_ratio=dreamer_config.unimix_ratio,
            initial=dreamer_config.initial,
            num_actions=dreamer_config.action_size,
            embed=obs_encoder.outdim,
            device=dreamer_config.device).to(device=device)


    print(rssm_model)
    print(reward_model)
    print(actor_model)
    print(value_model)
    print(obs_encoder)
    print(obs_decoder)

    ema_vals = torch.zeros((2,), device=device)


    # 包含环境编码器、RSSM、奖励解码器、观察解码器和折扣模型
    world_list = [obs_encoder, rssm_model, reward_model, obs_decoder, continue_model]
    actor_list = [actor_model]
    value_list = [value_model]
    # 包含策略和价值网络
    model_optimizer = common.Optimizer("model", 
                                       common.get_parameters(world_list),
                                       dreamer_config.model_learning_rate,
                                        dreamer_config.model_lr_eps,
                                        dreamer_config.grad_clip,
                                        dreamer_config.weight_decay,
                                        opt='adam',
                                        use_amp=False)
    actor_optimizer = common.Optimizer("actor", 
                                        common.get_parameters(actor_list),
                                        dreamer_config.actor['lr'],
                                        dreamer_config.actor['eps'],
                                        dreamer_config.actor['grad_clip'],
                                        dreamer_config.weight_decay,
                                        opt='adam',
                                        use_amp=False)
    value_optimizer = common.Optimizer("value", 
                                        common.get_parameters(value_list),
                                        dreamer_config.critic['lr'],
                                        dreamer_config.critic['eps'],
                                        dreamer_config.critic['grad_clip'],
                                        dreamer_config.weight_decay,
                                        opt='adam',
                                        use_amp=False)

    # model_opt_scheduler = optim.lr_scheduler.StepLR(model_optimizer, step_size=50000, gamma=0.9)
    # act_opt_scheduler = optim.lr_scheduler.StepLR(actor_optimizer, step_size=50000, gamma=0.9)
    # value_opt_scheduler = optim.lr_scheduler.StepLR(value_optimizer, step_size=50000, gamma=0.9)


    writer = SummaryWriter(comment="-dreamer-v3_" + args.name)
    # 构建DDPG代理
    preheat_agent = common.EnvSampleAgent(env=test_env, device=device, num_actions=dreamer_config.action_size, num_envs=1)
    dreamerv3_agent = common.EnvDreamerPredictAgent(actor_model=actor_model, rssm=rssm_model, obs_encoder=obs_encoder, action_size=dreamer_config.action_size, device=device, eval_state_mean=dreamer_config.eval_state_mean, dist=dreamer_config.actor["dist"])
    exp_source = ptan.experience.ExperienceSourceRAW(env, preheat_agent, steps_count=dreamer_config.reward_step)
    buffer = ptan.experience.ExperienceReplayChunkBuffer(exp_source, buffer_size=dreamer_config.replay_size)

    frame_idx = 1
    train_count = 1
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            rssm_model.load_state_dict(checkpoint['rssm_model'])
            reward_model.load_state_dict(checkpoint['reward_model'])
            actor_model.load_state_dict(checkpoint['actor_model'])
            value_model.load_state_dict(checkpoint['value_model'])
            obs_encoder.load_state_dict(checkpoint['obs_encoder'])
            obs_decoder.load_state_dict(checkpoint['obs_decoder'])
            continue_model.load_state_dict(checkpoint['continue_model'])
            model_optimizer.load_state_dict(checkpoint['model_optimizer'])
            actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
            value_optimizer.load_state_dict(checkpoint['value_optimizer'])
            # model_opt_scheduler.load_state_dict(checkpoint['model_opt_scheduler'])
            # act_opt_scheduler.load_state_dict(checkpoint['act_opt_scheduler'])
            # value_opt_scheduler.load_state_dict(checkpoint['value_opt_scheduler'])
            ema_vals = checkpoint['ema_vals']

            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']

            checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path_buffer)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
            if len(checkpoints) > 0:
                buffer = torch.load(os.path.join(save_path_buffer, checkpoints[-1]))
                print("加载buffer成功")

        print("加载模型成功")

    print("模型预热")
    if len(buffer) < dreamer_config.replay_initial:
        prefilee = dreamer_config.replay_initial - len(buffer)
        for _ in tqdm(range(prefilee)):
            buffer.populate(1)
        print("模型预热完成")

    exp_source = ptan.experience.ExperienceSourceRAW(env, dreamerv3_agent, steps_count=dreamer_config.reward_step)
    buffer.set_exp_source(exp_source)
    best_reward = None
    prev_action = torch.zeros(1, dreamer_config.action_size).to(device=device)

    with ptan.common.utils.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=100) as tb_tracker:
            while True:
                frame_idx += 1
                buffer.populate(1)
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    # 记录当前的训练进度并判断是否达到了奖励目标
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], frame_idx)
                    tracker.reward(rewards[0], frame_idx)
                
                
                if train_count < 100 or frame_idx % dreamer_config.train_every == 0:
                    metrics = {}
                    # 从缓冲区里面采样数据
                    observations = {
                        "image":np.empty((dreamer_config.batch_size, dreamer_config.chunk_size) + env.observation_space.shape, dtype=np.uint8),
                        "is_terminal":np.empty((dreamer_config.batch_size, dreamer_config.chunk_size), dtype=np.bool_),
                        "is_first":np.empty((dreamer_config.batch_size, dreamer_config.chunk_size), dtype=np.bool_),
                        "reward":np.empty((dreamer_config.batch_size, dreamer_config.chunk_size), dtype=np.float32)
                    }
                    # observations = [[] for _ in range(dreamer_config.batch_size)]
                    actions = {
                        "action": np.empty((dreamer_config.batch_size, dreamer_config.chunk_size, env.action_space.shape[0]), dtype=np.float32),
                        "logprob": np.empty((dreamer_config.batch_size, dreamer_config.chunk_size), dtype=np.float32),
                    }
                    # actions = [[] for _ in range(dreamer_config.batch_size)]
                    # rewards = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size), dtype=np.float32)
                    discounts = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size), dtype=np.float32)
                    non_dones = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size, 1), dtype=np.bool_)
                    is_first = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size, 1), dtype=np.bool_)
                    batch = buffer.sample(dreamer_config.batch_size, dreamer_config.chunk_size)
                    for batch_idx in range(0, dreamer_config.batch_size):
                        cur_batch = batch[batch_idx]
                        for step_idx in range(dreamer_config.chunk_size):
                            # observations[batch_idx].append(cur_batch[step_idx][0][0])
                            observations['image'][batch_idx][step_idx] = cur_batch[step_idx][0][0]['image']
                            observations['is_terminal'][batch_idx][step_idx] = cur_batch[step_idx][0][0]['is_terminal']
                            observations['is_first'][batch_idx][step_idx] = cur_batch[step_idx][0][0]['is_first']
                            # actions[batch_idx].append(cur_batch[step_idx][0][1])
                            actions['action'][batch_idx][step_idx] = cur_batch[step_idx][0][1]['action']
                            actions['logprob'][batch_idx][step_idx] = cur_batch[step_idx][0][1]['logprob']
                            observations['reward'][batch_idx][step_idx] = cur_batch[step_idx][0][2]
                            non_dones[batch_idx][step_idx] = not cur_batch[step_idx][0][3]
                            is_first[batch_idx][step_idx] = cur_batch[step_idx][0][5][2]
                            if cur_batch[step_idx][0][3]:
                                discounts[batch_idx][step_idx] = 0
                            else:
                                discounts[batch_idx][step_idx] = 1 * dreamer_config.discount
                #
                    # observations_n = np.array(observations).transpose(1, 0, 2, 3, 4).reshape((dreamer_config.chunk_size, dreamer_config.batch_size) + env.observation_space.shape) / 255.0
                    # actions_n = np.array(actions).transpose(1, 0, 2).reshape(dreamer_config.chunk_size, dreamer_config.batch_size, -1)
                    # rewards_n = np.array(rewards).transpose(1, 0).reshape(dreamer_config.chunk_size, dreamer_config.batch_size)
                    # non_dones_n = np.array(non_dones).transpose(1, 0, 2).reshape(dreamer_config.chunk_size, dreamer_config.batch_size, 1)
                    # discount_n = np.array(discounts).transpose(1, 0).reshape(dreamer_config.chunk_size, dreamer_config.batch_size)

                    # obs = torch.tensor(observations, dtype=torch.float32).to(device)                         #t, t+seq_len 
                    obs = {key: torch.tensor(observations[key]).to(device=device) for key in observations}
                    # actions = torch.tensor(actions, dtype=torch.float32).to(device)                 #t-1, t+seq_len-1
                    actions = {key: torch.tensor(actions[key]).to(device=device) for key in actions}
                    # rewards = torch.tensor(rewards, dtype=torch.float32).unsqueeze(-1).to(device)   #t-1 to t+seq_len-1
                    nonterms = torch.tensor(non_dones, dtype=torch.float32).to(device) #t-1 to t+seq_len-1
                    discount = torch.tensor(discounts, dtype=torch.float32).to(device) #t-1 to t+seq_len-1
                    firsts = torch.tensor(is_first, dtype=torch.float32).to(device) #t-1 to t+seq_len-1

                    # 完成世界模型的训练
                    post, context = train_world_model(
                        (obs, actions, nonterms, discount, firsts), 
                        obs_encoder, 
                        obs_decoder, 
                        rssm_model, 
                        reward_model, 
                        continue_model, 
                        dreamer_config,
                        model_optimizer, 
                        device)
                    start = post
                    # 奖励预测
                    reward = lambda f, s, a: reward_model(
                        rssm_model.get_feat(s)
                    ).mode()
                    # 训练动作、价值预测网络
                    train_actor_critc(start, reward, world_list, actor_list, value_list, target_value_model, dreamer_config, actor_optimizer, value_optimizer, False, ema_vals, device)
                    
                    train_count += 1
                    if train_count % dreamer_config.save_iters == 0:
                        checkpoints = {
                            'rssm_model': rssm_model.state_dict(),
                            'reward_model': reward_model.state_dict(),
                            'actor_model': actor_model.state_dict(),
                            'value_model': value_model.state_dict(),
                            'obs_encoder': obs_encoder.state_dict(),
                            'obs_decoder': obs_decoder.state_dict(),
                            'continue_model': continue_model.state_dict(),
                            'model_optimizer': model_optimizer.state_dict(),
                            'actor_optimizer': actor_optimizer.state_dict(),
                            'value_optimizer': value_optimizer.state_dict(),
                            # 'model_opt_scheduler': model_opt_scheduler.state_dict(),
                            # 'act_opt_scheduler': act_opt_scheduler.state_dict(),
                            # 'value_opt_scheduler': value_opt_scheduler.state_dict(),
                            'frame_idx': frame_idx,
                            'train_count': train_count,
                            'ema_vals': ema_vals
                        }
                        common.save_checkpoints(train_count, checkpoints, save_path, 'dreamer_v3')
                elif frame_idx % dreamer_config.slow_target_update == 0:
                    target_value_model.alpha_sync(0.98)
                elif train_count % dreamer_config.test_iters == 0:
                    checkpoints = {
                        'rssm_model': rssm_model.state_dict(),
                        'reward_model': reward_model.state_dict(),
                        'actor_model': actor_model.state_dict(),
                        'value_model': value_model.state_dict(),
                        'obs_encoder': obs_encoder.state_dict(),
                        'obs_decoder': obs_decoder.state_dict(),
                        'continue_model': continue_model.state_dict(),
                        'frame_idx': frame_idx,
                        'train_count': train_count
                    }
                    best_reward = save_every(
                        rssm_model=rssm_model, 
                        obs_encoder_model=obs_encoder, 
                        obs_decoder_model=obs_decoder, 
                        action_model=actor_model, 
                        device=device, 
                        save_path=save_path, 
                        test_env=test_env,
                        checkpoints=checkpoints,
                        best_reward=best_reward,
                        frame_idx=frame_idx,
                        writer=writer)
    pass
