#!/usr/bin/env python3
'''
完成适配

参考链接：
1. https://github.com/RajGhugare19/dreamerv2?tab=readme-ov-file（推荐参考这个）
2. https://github.com/jurgisp/pydreamer（对比这个）
3. https://github.com/vincent-thevenin/DreamerV2-Pytorch
4. https://github.com/esteveste/dreamerV2-pytorch
5. https://github.com/ucd-dare/CarDreamer


训练记录
在2号机上训练
20250331:测试分数_-5.7，训练分数没有记录，继续训练，太吃显存了
20250401:继续训练，学习率未变化，测试分数-5.7，训练分数无训练，吃显存+训练太慢，调整为灰度图试试，然后重新训练
20250402：重新调整代码，采用帧堆叠，灰度图,归一化到0~1
20250402:测试分数-6.5，训练分数分数没有记录，继续训练
20250403：继续训练，测试分数-6.4，继续训练
20250404：加载模型成功，测试分数-6.3
20250408：加载模型成功，学习率未调整，测试分数-5.8，继续训练
20250409：加载模型成功
20250410:加载模型成功
20250411:暂停训练一天
20250412:加载模型成功，学习率未调整，测试分数-6.3，继续训练，感觉训练太慢了，继续训练
20250414:加载模型成功,测试分数没有提高，放入Carracing进行验证
'''
import os
import ptan
import time
import gymnasium as gym
import argparse
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm

from lib import model, common, config

import torch
import torch.optim as optim
import torch.nn.functional as F
import ale_py

gym.register_envs(ale_py)

dreamer_config = config.Config()


@torch.no_grad()
def test_net(rssm_model, obs_encoder_model, action_model, env, count=10, device="cpu"):
    '''
    count: 执行游戏的次数（每次都是执行到游戏结束）

    return: （平均奖励，平均步数）
    '''
    rewards = 0.0
    steps = 0
    for _ in range(count):
        prev_rssmstate = rssm_model._init_rssm_state(1)
        done = False
        prev_action = torch.zeros(1, env.action_space.shape[0]).to(device)
        obs, _ = env.reset()
        while True:
            embed = obs_encoder_model(torch.tensor(obs, dtype=torch.float32).unsqueeze(0).to(device=device) / 128.0 - 1)    
            _, posterior_rssm_state = rssm_model.rssm_observe(embed, prev_action, not done, prev_rssmstate)
            model_state = rssm_model.get_model_state(posterior_rssm_state)
            action, _ = action_model(model_state)
            prev_rssmstate = posterior_rssm_state
            prev_action = action
        
            # 然后执行动作得到下一个
            obs, reward, done, truncated, _ = env.step(action.squeeze(0).cpu().numpy())
            rewards += reward
            steps += 1
            if done or truncated:
                break
    return rewards / count, steps / count


@torch.no_grad()
def save_every(rssm_model, obs_encoder_model, obs_decoder_model, action_model, device, save_path, test_env, checkpoints, best_reward, frame_idx, writer):
    # 测试并保存最好测试结果的庶数据
    ts = time.time()
    rssm_model.eval()
    obs_encoder_model.eval()
    obs_decoder_model.eval()
    action_model.eval()
    rewards, steps = test_net(rssm_model, obs_encoder_model, action_model, env=test_env, count=10, device=device)
    rssm_model.train()
    obs_encoder_model.train()
    obs_decoder_model.train()
    action_model.train()
    print("Test done in %.2f sec, reward %.3f, steps %d" % (
        time.time() - ts, rewards, steps))
    writer.add_scalar("test_reward", rewards, frame_idx)
    writer.add_scalar("test_steps", steps, frame_idx)
    if best_reward is None or best_reward < rewards:
        if best_reward is not None:
            print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
        best_reward = rewards
    common.save_best_model(rewards, checkpoints, save_path, 'dreamer_v2_best')

    return best_reward


def _obs_loss(obs_dist, obs):
        # 利用实际的观察，获取对应的观察分布的概率的log值
        # 如果想要obs_loss最小，那么obs_dist.log_prob(obs)应该最大
        # 如果想要obs_dist.log_prob(obs)应该最大，obs_dist在obs处的概率应该最大
        # 从而达到优化的目的
        obs_loss = -torch.mean(obs_dist.log_prob(obs))
        return obs_loss


def _reward_loss(reward_dist, rewards):
    # 原理同_obs_loss
    reward_loss = -torch.mean(reward_dist.log_prob(rewards))
    return reward_loss


def _pcont_loss(pcont_dist, nonterms):
    # 将booean转换为float：布尔值可以转换为浮点数。布尔值 True 会被转换为 1.0，而布尔值 False 会被转换为 0.0
    pcont_target = nonterms.float()
    # 接下来原理同_obs_loss
    pcont_loss = -torch.mean(pcont_dist.log_prob(pcont_target))
    return pcont_loss


def _kl_loss(rssm_model, prior, posterior, dreamer_config):
    '''
    返回先验分布，后验分布，KL散度损失
    '''

    prior_dist = rssm_model.get_dist(prior)
    post_dist = rssm_model.get_dist(posterior)
    if dreamer_config.kl['use_kl_balance']:
        '''
        它控制是否使用 KL 平衡技术来计算 KL 损失
        当 use_kl_balance 设置为 True 时，算法会采用 KL 平衡技术，即计算两个方向的 KL 散度并加权平均：

kl_lhs：计算后验分布到先验分布的 KL 散度
kl_rhs：计算先验分布到后验分布的 KL 散度
然后通过以下公式计算平衡的 KL 损失：

其中 alpha 是平衡系数（kl_balance_scale），控制两个方向 KL 散度的权重。

为什么需要 KL 平衡？
KL 散度是非对称的，即 KL(P||Q) 不等于 KL(Q||P)。这两个方向的散度有不同的优化特性：

KL(posterior||prior)：倾向于使后验分布更加"模式覆盖"（mode-covering）
KL(prior||posterior)：倾向于使后验分布更加"模式寻找"（mode-seeking）
通过平衡两个方向的 KL 散度，可以综合两者的优点，使模型既能覆盖多种可能的模式，又能准确地定位特定模式。
        '''
        alpha = dreamer_config.kl['kl_balance_scale']
        # 计算两个分布之间的KL散度
        # 因为要尽可能接近，所以要计算两个分布之间的KL散度两次（颠倒顺序计算一次），通过计算这两个值，可以更全面地衡量先验和后验分布之间的差异
        # todo 这里使用detach是不是在计算损失时，不能存在两个互相可以计算梯度的张量？从而为了在计算 KL 散度时，防止梯度通过这些分布传播，从而避免影响模型的其他部分
        kl_lhs = torch.mean(torch.distributions.kl.kl_divergence(rssm_model.get_dist(rssm_model.rssm_detach(posterior)), prior_dist))
        kl_rhs = torch.mean(torch.distributions.kl.kl_divergence(post_dist, rssm_model.get_dist(rssm_model.rssm_detach(prior))))
        if dreamer_config.kl['use_free_nats']:
            '''
            在 free nats 技术中，其目的并不是限制 KL 损失的上限，而是设置一个下界，防止 KL 损失变得太小，从而使得模型忽略一定程度的正则化。也就是说，使用 torch.max 的目的是：

如果计算得到的 KL 值低于 free nats 阈值，则用 free nats 替换（即不允许 KL 损失低于这个值），从而保证有一定的 KL 惩罚存在；
如果默认 free_nats 为 0，则 torch.max(kl, 0) 没有实际影响，相当于没有启用该功能。
因此，如果目标是防止 KL 损失过低（而不是过高），那么使用 torch.max 是正确的做法。使用 torch.min 则会限制 KL 的上限，这并不是 free nats 的目的。
            '''
            free_nats = dreamer_config.kl['free_nats']
            kl_lhs = torch.max(kl_lhs,kl_lhs.new_full(kl_lhs.size(), free_nats))
            kl_rhs = torch.max(kl_rhs,kl_rhs.new_full(kl_rhs.size(), free_nats))
        # 通过 alpha 权重平衡两个 KL 散度的值，从而得到最终的 KL 损失
        kl_loss = alpha*kl_lhs + (1-alpha)*kl_rhs

    else: 
        # 这边仅计算一个KL散度损失
        kl_loss = torch.mean(torch.distributions.kl.kl_divergence(post_dist, prior_dist))
        if dreamer_config.kl['use_free_nats']:
            free_nats = dreamer_config.kl['free_nats']
            kl_loss = torch.max(kl_loss, kl_loss.new_full(kl_loss.size(), free_nats))
    return prior_dist, post_dist, kl_loss


def representation_loss(obs, actions, rewards, nonterms, obs_encoder, rssm_model, reward_decoder_model, discount_model, dreamer_config):
    '''
    param obs: 观察
    param actions: 动作
    param rewards: 奖励
    param nonterms: 非终止状态

    return model_loss, kl_loss, obs_loss, reward_loss, pcont_loss, prior, posterior
    '''

    # 得到观察的嵌入特征
    embed = obs_encoder(obs)                                         #t to t+seq_len   
    # 获取rssm初始化特征，每次训练计算损失时都需要初始化
    prev_rssm_state = rssm_model._init_rssm_state(dreamer_config.batch_size)   
    # 得到先验和后验状态时间序列
    prior, posterior = rssm_model.rollout_observation(dreamer_config.chunk_size, embed, actions, nonterms, prev_rssm_state)
    # 根据后验状态的确定性状态和随机状态得到模型状态
    # todo 后验是有根据实际的状态得到的，所以在实际使用时是如何使用的呢？
    post_modelstate = rssm_model.get_model_state(posterior)               #t to t+seq_len   
    # 输出一个观察分布，一个奖励分布，一个折扣因子分布
    # todo 为什么要输出分布而不是直接输出值
    # 以下预测输入的是时刻与计算损失的时刻不包含最后一个时刻
    obs_dist = obs_decoder(post_modelstate[:-1])                     #t to t+seq_len-1  
    reward_dist = reward_decoder_model(post_modelstate[:-1])               #t to t+seq_len-1  
    # todo 这个不是折扣模型吗？为什么和nonterms计算损失
    pcont_dist = discount_model(post_modelstate[:-1])                #t to t+seq_len-1   
    
    # todo 为啥计算观察损失时不包含最后一个时刻，和post_modelstate[:-1对应
    obs_loss = _obs_loss(obs_dist, obs[:-1])
    # todo 而计算奖励损失时不包含第一个时刻，上面偏差一个时刻，相当于和预测t+1时刻的奖励计算损失
    reward_loss = _reward_loss(reward_dist, rewards[1:])
    pcont_loss = _pcont_loss(pcont_dist, nonterms[1:])
    # 是的，在 DreamerV2 算法中，_kl_loss 损失用于衡量先验状态（prior state）和后验状态（posterior state）之间的差异。具体来说，_kl_loss 计算的是先验分布和后验分布之间的 Kullback-Leibler (KL) 散度。通过最小化这个损失，算法可以使先验状态和后验状态尽可能接近
    prior_dist, post_dist, div = _kl_loss(rssm_model, prior, posterior, dreamer_config=dreamer_config)

    # 汇总模型损失
    model_loss = dreamer_config.loss_scale['kl'] * div + reward_loss + obs_loss + dreamer_config.loss_scale['discount']*pcont_loss
    return model_loss, div, obs_loss, reward_loss, pcont_loss, prior_dist, post_dist, posterior


def _actor_loss(imag_reward, imag_value, discount_arr, imag_log_prob, policy_entropy, dreamer_config):
    '''
    param imag_reward: dreamerv2预测的奖励
    param imag_value: dreamerv2预测的价值
    param discount_arr: discount因子
    param imag_log_prob: dreamerv2的预测动作的对数概率 ，通过预测一个动作的对数概率，然后通过这个对数概率知道该往哪个方向走
    param policy_entropy: dreamerv2的预测动作的熵

    return 动作损失，折扣，lambda_returns（ppo的长期回报序列）
    '''

    # 计算预测的动作回报
    lambda_returns = common.compute_return(imag_reward[:-1], imag_value[:-1], discount_arr[:-1], bootstrap=imag_value[-1], lambda_=dreamer_config.lambda_)
    
    if dreamer_config.actor_grad == 'reinforce':
        # 基于策略梯度：传统的REINFORCE算法风格
        advantage = (lambda_returns-imag_value[:-1]).detach() # 使用优势函数：计算返回值与基线（价值估计）之间的差值作为优势
        objective = imag_log_prob[1:].unsqueeze(-1) * advantage # 通过动作的对数概率乘以优势函数来更新策略
        '''
        更高的方差
        更慢的收敛
        但在某些情况下可能探索性更好
        '''

    elif dreamer_config.actor_grad == 'dynamics':
        '''
        基于路径导数：直接通过世界模型的动态梯度进行反向传播
        使用返回值：直接最大化预期返回值
        梯度更新方式：梯度通过想象的状态-动作轨迹直接反向传播
        '''
        objective = lambda_returns

        '''
        在实践中，dynamics 策略是 DreamerV2 的默认选择，因为它通常提供更好的性能和更快的收敛。然而，提供 reinforce 选项可能是为了在特定环境或情况下提供替代方案。
        '''
    else:
        raise NotImplementedError

    discount_arr = torch.cat([torch.ones_like(discount_arr[:1]), discount_arr[1:]]) # 首先，将第一个折扣因子强制设为 1.0，确保从当前时间步开始的奖励权重是 100%
    discount = torch.cumprod(discount_arr[:-1], 0) # 然后，计算累积折扣乘积，表示未来奖励在当前决策中的重要性逐渐降低
    policy_entropy = policy_entropy[1:].unsqueeze(-1) # 选取策略熵（不包括第一个时间步
    '''
    策略熵是衡量行动"随机性"的指标。想象一个掷骰子的游戏：

    低熵：总是倾向于选择某个数字
    高熵：完全随机地选择任何数字
    '''

    # objective:主要目标（返回值或优势函数）
    # actor_entropy_scale * policy_entropy 加上策略熵（乘以一个系数来调整探索程度）
    # discount 加权平均,用折扣因子对每个时间步的目标进行加权,近期回报权重高，远期回报权重低
    # torch.mean 先对批次维度求均值 l n中的n
    # torch.sum 再对时间维度求和l n中的l
    actor_loss = -torch.sum(torch.mean(discount * (objective + dreamer_config.actor_entropy_scale * policy_entropy), dim=1)) 
    return actor_loss, discount, lambda_returns


def _value_loss(value_model, imag_modelstates, discount, lambda_returns):
    with torch.no_grad():
        value_modelstates = imag_modelstates[:-1].detach()
        value_discount = discount.detach()
        value_target = lambda_returns.detach()

    # 用动作模型预测的模型状态（后验的随机状态和确定状态）预测价值分布
    value_dist = value_model(value_modelstates) 
    # 为什么要乘以：value_discount
    '''
    为什么在 _value_loss 中要乘以 value_discount
    在 DreamerV2 的 _value_loss 方法中，乘以 value_discount 是一个重要的设计，其目的是按时间步长的重要性对值函数的损失进行加权。让我们详细解释原因：

    价值损失函数的计算
    为什么要乘以 value_discount
    时间步长加权：

    value_discount 是累积折扣因子，表示每个时间步长的相对重要性
    较近的未来时间步长有更高的折扣值（接近1），较远的未来时间步长有较低的折扣值
    通过乘以这个折扣，算法对近期预测的准确性给予更高的权重
    一致性与策略损失：

    在 _actor_loss 中，也使用了相同的折扣因子来加权目标
    这种一致性确保策略和值函数的训练是协调的，都更关注近期回报
    不确定性处理：

    预测越远的未来，不确定性就越大
    通过折扣，算法减少了对高不确定性远期预测的依赖
    终止状态的考虑：

    折扣因子考虑了可能的终止状态
    如果某条路径可能很快终止，那么其后续步骤的权重会相应降低
    实际效果
    这种设计的实际效果是：

    稳定训练：减少远期预测误差对训练的影响
    更好的近期预测：值函数更准确地预测近期回报
    更高效的学习：资源集中在更重要和更确定的预测上
    这种按折扣加权的值函数损失是 DreamerV2 能有效学习长期预测同时保持稳定性的关键技术之一。

    也就是说越远的价值预测越不准确，方差越高，所以要降低权重，越近的价值预测，方差越低，所以权重越高
    所以这里的价值预测关键优化点就在alue_dist.log_prob(value_target)，如果与value_target相差大，那么就会将对应的价值概率降低，如果与value_target相差小，那么就会将对应的价值概率提高
    因为这里预测的是价值分布，所以要通过价值分布的对数概率来计算价值损失
    '''
    value_loss = -torch.mean(value_discount*value_dist.log_prob(value_target).unsqueeze(-1))
    return value_loss



def actorcritc_loss(posterior, rssm_model, actor_model, value_model, target_value_model, discount_model, reward_model, dreamer_config):
    '''
    param posterior: 后验状态
    '''
    with torch.no_grad():
        # rssm_model.rssm_seq_to_batch 将RSSM状态转换为batch形式也就是将l n合并为一个维度
        # 然后再将后验状态分离出梯度计算
        batched_posterior = rssm_model.rssm_detach(rssm_model.rssm_seq_to_batch(posterior, dreamer_config.batch_size, dreamer_config.chunk_size-1))
    
    # 冻结非actor和value的参数
    with common.FreezeParameters(world_list):
        imag_rssm_states, imag_log_prob, policy_entropy = rssm_model.rollout_imagination(dreamer_config.planning_horizon, actor_model, batched_posterior)
    
    # 结合预测的想象状态中的确定性状态和随机状态，得到模型状态
    imag_modelstates = rssm_model.get_model_state(imag_rssm_states)
    # 冻结除动作模型以外的所有模型
    with common.FreezeParameters(world_list+value_list+[target_value_model.target_model]+[discount_model]):
        # 根据预测的状态计算奖励分布，有点像dqn的c51
        imag_reward_dist = reward_model(imag_modelstates)
        imag_reward = imag_reward_dist.mean
        # 预测价值分布
        imag_value_dist = target_value_model.target_model(imag_modelstates)
        imag_value = imag_value_dist.mean
        # 预测终止状态分布
        discount_dist = discount_model(imag_modelstates)
        discount_arr = dreamer_config.discount_*torch.round(discount_dist.base_dist.probs)              #mean = prob(disc==1)

    actor_loss, discount, lambda_returns = _actor_loss(imag_reward, imag_value, discount_arr, imag_log_prob, policy_entropy, dreamer_config)
    value_loss = _value_loss(value_model, imag_modelstates, discount, lambda_returns)     
    
    # 记录训练过程中的统计值
    mean_target = torch.mean(lambda_returns, dim=1)
    max_targ = torch.max(mean_target).item()
    min_targ = torch.min(mean_target).item() 
    std_targ = torch.std(mean_target).item()
    mean_targ = torch.mean(mean_target).item()
    target_info = {
        'min_targ':min_targ,
        'max_targ':max_targ,
        'std_targ':std_targ,
        'mean_targ':mean_targ,
    }

    # 返回动作损失，价值损失，统计信息
    return actor_loss, value_loss, target_info



def train_model(dreamer_config, buffer, obs_encoder, rssm_model, reward_decoder_model, discount_model, model_optimizer, actor_optimizer, value_optimizer, world_list, device):
    actor_l = []
    value_l = []
    obs_l = []
    model_l = []
    reward_l = []
    prior_ent_l = []
    post_ent_l = []
    kl_l = []
    pcont_l = []
    mean_targ = []
    min_targ = []
    max_targ = []
    std_targ = []

    for i in range(dreamer_config.collect_intervals):
        # 从缓冲区里面采样数据
        observations = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size - 1) + env.observation_space.shape, dtype=np.uint8)
        actions = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size - 1, env.action_space.shape[0]), dtype=np.float32)
        rewards = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size - 1), dtype=np.float32)
        non_dones = np.empty((dreamer_config.batch_size, dreamer_config.chunk_size - 1, 1), dtype=np.bool_)
        batch = buffer.sample(dreamer_config.batch_size, dreamer_config.chunk_size)
        for batch_idx in range(0, dreamer_config.batch_size):
            cur_batch = batch[batch_idx]
            for step_idx in range(dreamer_config.chunk_size - 1):
                observations[batch_idx][step_idx] = cur_batch[step_idx + 1][0][0]
                actions[batch_idx][step_idx] = cur_batch[step_idx][0][1]
                rewards[batch_idx][step_idx] = cur_batch[step_idx][0][2]
                non_dones[batch_idx][step_idx] = not cur_batch[step_idx][0][3]

        observations_n = np.array(observations).transpose(1, 0, 2, 3, 4).reshape((dreamer_config.chunk_size, dreamer_config.batch_size) + env.observation_space.shape) / 128.0 - 1.0
        actions_n = np.array(actions).transpose(1, 0, 2).reshape(dreamer_config.chunk_size, dreamer_config.batch_size, -1)
        rewards_n = np.array(rewards).transpose(1, 0).reshape(dreamer_config.chunk_size, dreamer_config.batch_size)
        non_dones_n = np.array(non_dones).transpose(1, 0, 2).reshape(dreamer_config.chunk_size, dreamer_config.batch_size, 1)

        obs = torch.tensor(observations_n, dtype=torch.float32).to(device)                         #t, t+seq_len 
        actions = torch.tensor(actions_n, dtype=torch.float32).to(device)                 #t-1, t+seq_len-1
        rewards = torch.tensor(rewards_n, dtype=torch.float32).unsqueeze(-1).to(device)   #t-1 to t+seq_len-1
        nonterms = torch.tensor(non_dones_n, dtype=torch.float32).to(device) #t-1 to t+seq_len-1

        # 计算损失
        model_loss, kl_loss, obs_loss, reward_loss, pcont_loss, prior_dist, post_dist, posterior = representation_loss(obs, actions, rewards, nonterms, obs_encoder, rssm_model, reward_decoder_model, discount_model, dreamer_config)
        
        model_optimizer.zero_grad()
        model_loss.backward()
        grad_norm_model = torch.nn.utils.clip_grad_norm_(common.get_parameters(world_list), dreamer_config.grad_clip_norm)
        model_optimizer.step()

        # 根据后验状态计算actor和critic的损失
        actor_loss, value_loss, target_info = actorcritc_loss(
            posterior=posterior,
            rssm_model=rssm_model,
            actor_model=actor_model,
            value_model=value_model,
            target_value_model=target_value_model,
            discount_model=discount_model,
            reward_model=reward_model,
            dreamer_config=dreamer_config)

        actor_optimizer.zero_grad()
        value_optimizer.zero_grad()

        actor_loss.backward()
        value_loss.backward()

        grad_norm_actor = torch.nn.utils.clip_grad_norm_(common.get_parameters(actor_list), dreamer_config.grad_clip_norm)
        grad_norm_value = torch.nn.utils.clip_grad_norm_(common.get_parameters(value_list), dreamer_config.grad_clip_norm)

        actor_optimizer.step()
        value_optimizer.step()
        # 到这里训练结束，后续是记录训练过程中的统计值

        with torch.no_grad():
            prior_ent = torch.mean(prior_dist.entropy())
            post_ent = torch.mean(post_dist.entropy())

        prior_ent_l.append(prior_ent.item())
        post_ent_l.append(post_ent.item())
        actor_l.append(actor_loss.item())
        value_l.append(value_loss.item())
        obs_l.append(obs_loss.item())
        model_l.append(model_loss.item())
        reward_l.append(reward_loss.item())
        kl_l.append(kl_loss.item())
        pcont_l.append(pcont_loss.item())
        mean_targ.append(target_info['mean_targ'])
        min_targ.append(target_info['min_targ'])
        max_targ.append(target_info['max_targ'])
        std_targ.append(target_info['std_targ'])

    # train_metrics['model_loss'] = np.mean(model_l)
    # train_metrics['kl_loss']=np.mean(kl_l)
    # train_metrics['reward_loss']=np.mean(reward_l)
    # train_metrics['obs_loss']=np.mean(obs_l)
    # train_metrics['value_loss']=np.mean(value_l)
    # train_metrics['actor_loss']=np.mean(actor_l)
    # train_metrics['prior_entropy']=np.mean(prior_ent_l)
    # train_metrics['posterior_entropy']=np.mean(post_ent_l)
    # train_metrics['pcont_loss']=np.mean(pcont_l)
    # train_metrics['mean_targ']=np.mean(mean_targ)
    # train_metrics['min_targ']=np.mean(min_targ)
    # train_metrics['max_targ']=np.mean(max_targ)
    # train_metrics['std_targ']=np.mean(std_targ)

    # return train_metrics



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default="icehockey", help="Name of the run")
    args = parser.parse_args()
    device = common.select_device(args=args)

    save_path = os.path.join("saves", "dreamer-v2-" + args.name)
    os.makedirs(save_path, exist_ok=True)

    # todo 可以对比frameskip=4和0的区别
    env = common.wrap_dqn(gym.make('ALE/IceHockey-v5', render_mode='rgb_array', frameskip=4, repeat_action_probability=0.0))
    test_env = common.wrap_dqn(gym.make('ALE/IceHockey-v5', render_mode='rgb_array', frameskip=4, repeat_action_probability=0.0))

    dreamer_config.action_size = env.action_space.shape[0]
    dreamer_config.obs_shape = env.observation_space.shape

    if dreamer_config.rssm_type == 'continuous':
        stoch_size = dreamer_config.rssm_info['stoch_size']
    elif dreamer_config.rssm_type == 'discrete':
        category_size = dreamer_config.rssm_info['category_size']
        class_size = dreamer_config.rssm_info['class_size']
        stoch_size = category_size*class_size

    dreamer_config.modelstate_size = stoch_size + dreamer_config.rssm_info['deter_size']

    # 构建动作网络和评价网络
    rssm_model = model.RssmModel(
        action_size=dreamer_config.action_size, 
        rssm_node_size=dreamer_config.rssm_node_size, 
        embedding_size=dreamer_config.embedding_size, 
        rssm_type=dreamer_config.rssm_type, 
        info=dreamer_config.rssm_info, device=device).to(device=device)
    reward_model = model.DenseModel(
        output_shape=(1,), 
        input_size=dreamer_config.modelstate_size, 
        info=dreamer_config.reward).to(device=device)
    actor_model = model.DreamerActorModel(
        action_size=dreamer_config.action_size, 
        deter_size=dreamer_config.rssm_info['deter_size'], 
        stoch_size=stoch_size, 
        embedding_size=dreamer_config.embedding_size, 
        actor_info=dreamer_config.actor, 
        expl_info=dreamer_config.expl).to(device=device)
    value_model = model.DenseModel(
        output_shape=(1,), 
        input_size=dreamer_config.modelstate_size, 
        info=dreamer_config.critic).to(device=device)
    target_value_model = ptan.agent.TargetNet(value_model)

    discount_model = model.DenseModel(
        output_shape=(1,), 
        input_size=dreamer_config.modelstate_size, 
        info=dreamer_config.discount).to(device=device)
    
            # 构建环境观察编码器和解码器（分像素空间和线性空间）
    if dreamer_config.pixel:
        obs_encoder = model.ObsEncoder(
            input_shape=dreamer_config.obs_shape, 
            embedding_size=dreamer_config.embedding_size, 
            info=dreamer_config.obs_encoder).to(device=device)
        obs_decoder = model.ObsDecoder(
            output_shape=dreamer_config.obs_shape, 
            embed_size=dreamer_config.modelstate_size, 
            info=dreamer_config.obs_decoder).to(device=device)
    else:
        obs_encoder = model.DenseModel(
            output_shape=(dreamer_config.embedding_size,), 
            input_size=int(np.prod(dreamer_config.obs_shape)), 
            info=dreamer_config.obs_encoder).to(device=device)
        obs_decoder = model.DenseModel(
            output_shape=dreamer_config.obs_shape, 
            input_size=dreamer_config.modelstate_size, 
            info=dreamer_config.obs_decoder).to(device=device)


    print(rssm_model)
    print(reward_model)
    print(actor_model)
    print(value_model)
    print(discount_model)
    print(obs_encoder)
    print(obs_decoder)


    # 包含环境编码器、RSSM、奖励解码器、观察解码器和折扣模型
    world_list = [obs_encoder, rssm_model, reward_model, obs_decoder, discount_model]
    actor_list = [actor_model]
    value_list = [value_model]
    # 包含策略和价值网络
    actorcritic_list = [actor_model, value_model]
    model_optimizer = optim.Adam(common.get_parameters(world_list), dreamer_config.model_learning_rate)
    actor_optimizer = optim.Adam(common.get_parameters(actor_list), dreamer_config.actor_learning_rate)
    value_optimizer = optim.Adam(common.get_parameters(value_list), dreamer_config.value_learning_rate)

    model_opt_scheduler = optim.lr_scheduler.StepLR(model_optimizer, step_size=50000, gamma=0.9)
    act_opt_scheduler = optim.lr_scheduler.StepLR(actor_optimizer, step_size=50000, gamma=0.9)
    value_opt_scheduler = optim.lr_scheduler.StepLR(value_optimizer, step_size=50000, gamma=0.9)


    writer = SummaryWriter(comment="-dreamer_" + args.name)
    # 构建DDPG代理
    preheat_agent = common.EnvSampleAgent(env=test_env, device=device)
    dreamerv2_agent = common.EnvDreamerPredictAgent(actor_model=actor_model, rssm=rssm_model, obs_encoder=obs_encoder, action_size=dreamer_config.action_size, device=device)
    exp_source = ptan.experience.ExperienceSourceRAW(env, preheat_agent, steps_count=dreamer_config.reward_step)
    buffer = ptan.experience.ExperienceReplayChunkBuffer(exp_source, buffer_size=dreamer_config.replay_size)

    frame_idx = 1
    train_count = 1
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[-1].split('.')[0]))
        if len(checkpoints) > 0:
            checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
            rssm_model.load_state_dict(checkpoint['rssm_model'])
            reward_model.load_state_dict(checkpoint['reward_model'])
            actor_model.load_state_dict(checkpoint['actor_model'])
            value_model.load_state_dict(checkpoint['value_model'])
            obs_encoder.load_state_dict(checkpoint['obs_encoder'])
            obs_decoder.load_state_dict(checkpoint['obs_decoder'])
            discount_model.load_state_dict(checkpoint['discount_model'])
            model_optimizer.load_state_dict(checkpoint['model_optimizer'])
            actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
            value_optimizer.load_state_dict(checkpoint['value_optimizer'])
            model_opt_scheduler.load_state_dict(checkpoint['model_opt_scheduler'])
            act_opt_scheduler.load_state_dict(checkpoint['act_opt_scheduler'])
            value_opt_scheduler.load_state_dict(checkpoint['value_opt_scheduler'])

            frame_idx = checkpoint['frame_idx']
            train_count = checkpoint['train_count']

        print("加载模型成功")

    print("模型预热")
    for _ in tqdm(range(dreamer_config.replay_initial)):
        buffer.populate(1)
    print("模型预热完成")

    dreamerv2_agent.set_iter(frame_idx)
    exp_source = ptan.experience.ExperienceSourceRAW(env, dreamerv2_agent, steps_count=dreamer_config.reward_step)
    buffer.set_exp_source(exp_source)
    best_reward = None
    prev_rssmstate = rssm_model._init_rssm_state(1)
    prev_action = torch.zeros(1, dreamer_config.action_size).to(device=device)

    with ptan.common.utils.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=100) as tb_tracker:
            while True:
                frame_idx += 1
                buffer.populate(1)
                dreamerv2_agent.set_iter(frame_idx)
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    # 记录当前的训练进度并判断是否达到了奖励目标
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", steps[0], frame_idx)
                    tracker.reward(rewards[0], frame_idx)
                
                
                if frame_idx % dreamer_config.train_every == 0:
                    train_model(
                        dreamer_config=dreamer_config,
                        buffer=buffer,
                        obs_encoder=obs_encoder,
                        rssm_model=rssm_model,
                        reward_decoder_model=reward_model,
                        discount_model=discount_model,
                        model_optimizer=model_optimizer,
                        actor_optimizer=actor_optimizer,
                        value_optimizer=value_optimizer,
                        world_list=world_list,
                        device=device)
                    train_count += 1
                    checkpoints = {
                        'rssm_model': rssm_model.state_dict(),
                        'reward_model': reward_model.state_dict(),
                        'actor_model': actor_model.state_dict(),
                        'value_model': value_model.state_dict(),
                        'obs_encoder': obs_encoder.state_dict(),
                        'obs_decoder': obs_decoder.state_dict(),
                        'discount_model': discount_model.state_dict(),
                        'model_optimizer': model_optimizer.state_dict(),
                        'actor_optimizer': actor_optimizer.state_dict(),
                        'value_optimizer': value_optimizer.state_dict(),
                        'model_opt_scheduler': model_opt_scheduler.state_dict(),
                        'act_opt_scheduler': act_opt_scheduler.state_dict(),
                        'value_opt_scheduler': value_opt_scheduler.state_dict(),
                        'frame_idx': frame_idx,
                        'train_count': train_count
                    }
                    common.save_checkpoints(train_count, checkpoints, save_path, 'dreamer_v2')
                elif frame_idx % dreamer_config.slow_target_update == 0:
                    target_value_model.alpha_sync(0)
                elif train_count % dreamer_config.test_iters == 0:
                    checkpoints = {
                        'rssm_model': rssm_model.state_dict(),
                        'reward_model': reward_model.state_dict(),
                        'actor_model': actor_model.state_dict(),
                        'value_model': value_model.state_dict(),
                        'obs_encoder': obs_encoder.state_dict(),
                        'obs_decoder': obs_decoder.state_dict(),
                        'discount_model': discount_model.state_dict(),
                        'frame_idx': frame_idx,
                        'train_count': train_count
                    }
                    best_reward = save_every(
                        rssm_model=rssm_model, 
                        obs_encoder_model=obs_encoder, 
                        obs_decoder_model=obs_decoder, 
                        action_model=actor_model, 
                        device=device, 
                        save_path=save_path, 
                        test_env=test_env,
                        checkpoints=checkpoints,
                        best_reward=best_reward,
                        frame_idx=frame_idx,
                        writer=writer)
    pass
