import ptan
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from torch.distributions.utils import logits_to_probs
from dataclasses import dataclass, field
from torch.distributions import Categorical
import copy
import math

import torch
import torch.distributions as D
from torch import nn

import numpy as np
import math
import lib.common as common

from functools import lru_cache


def get_activation(act_name: str)->nn.Module:
    activation_functions = {
        "relu": nn.ReLU(),
        "silu": nn.SiLU(),
        "gelu": nn.GELU(),
        "tanh": nn.Tanh(),
        "elu": nn.ELU(),
        "leaky_relu": nn.LeakyReLU(),
        'none': nn.Identity(),
    }

    if act_name not in activation_functions:
        raise ValueError(f"Unsupported activation function: {act_name}. Supported functions are: {list(activation_functions.keys())}")
    
    return activation_functions[act_name]


class TWMAgent(ptan.agent.BaseAgent):
    # TODO: unify code with DQNAgent, as only action selector is differs.
    def __init__(self, params, wm, ac, replay_buffer, action_selector=ptan.actions.ProbabilityActionSelector(), device="cpu",
                 apply_softmax=False, preprocessor=ptan.agent.default_states_preprocessor):
        self.action_selector = action_selector
        self.device = device
        self.apply_softmax = apply_softmax
        self.preprocessor = preprocessor
        self.dreamer = None
        self.params = params
        self.wm = wm
        self.ac = ac
        self.replay_buffer = replay_buffer


    @torch.no_grad()
    def __call__(self, states, agent_states=None):

        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)

        if self.dreamer is None:
            prefix = self.params['wm_memory_length'] - 1
            start_o = torch.tensor(self.replay_buffer.get_obs([[len(self.replay_buffer)]],  prefix=prefix + 1), device=self.deivce) # 从这里来看感觉o和a差了一个时间步 todo
            start_a = torch.tensor(self.replay_buffer.get_actions([[len(self.replay_buffer) - 1]],  prefix=prefix), device=self.deivce)
            start_r = torch.tensor(self.replay_buffer.get_rewards([[len(self.replay_buffer) - 1]],  prefix=prefix), device=self.deivce)
            start_terminated = torch.tensor(self.replay_buffer.get_terminated([[len(self.replay_buffer) - 1]],  prefix=prefix), device=self.deivce)
            start_truncated = torch.tensor(self.replay_buffer.get_truncated([[len(self.replay_buffer) - 1]],  prefix=prefix), device=self.deivce)

            # todo 到这里
            self.dreamer.observe_reset(start_o, start_a, start_r, start_terminated, start_truncated)
            actions = self.dreamer.act()
        else:
            o = torch.tensor(self.replay_buffer.get_obs([[len(self.replay_buffer)]], device=self.deivce))
            a = torch.tensor(self.replay_buffer.get_actions([[len(self.replay_buffer) - 1]], device=self.deivce))
            r = torch.tensor(self.replay_buffer.get_rewards([[len(self.replay_buffer) - 1]], device=self.deivce))
            terminated = torch.tensor(self.replay_buffer.get_terminated([[len(self.replay_buffer) - 1]], device=self.deivce))
            truncated = torch.tensor(self.replay_buffer.get_truncated([[len(self.replay_buffer) - 1]], device=self.deivce))
            self.dreamer.observe_step(a, o, r, terminated, truncated)
            actions = self.dreamer.act()
    
        return np.array(actions), None


class Agent(nn.Module):

    def __init__(self, config, num_actions):
        super().__init__()
        self.config = config
        self.wm = WorldModel(config, num_actions)
        self.ac = ActorCritic(config, num_actions, self.wm.z_dim, self.wm.h_dim)

    
    def state_dict(self):
        return {
            'world_model': self.wm.state_dict(),
            'actor_critic': self.ac.state_dict(),
            # 显式保存 WorldModel 的优化器
            'wm_obs_optimizer': self.wm.obs_optimizer.state_dict(),
            'wm_dyn_optimizer': self.wm.dyn_optimizer.state_dict(),
            # 显式保存 ActorCritic 的优化器
            'ac_actor_optimizer': self.ac.actor_optimizer.state_dict(),
            'ac_critic_optimizer': self.ac.critic_optimizer.state_dict(),
        }
    
    def load_state_dict(self, state_dict):
        # 加载模型参数
        self.wm.load_state_dict(state_dict['world_model'])
        self.ac.load_state_dict(state_dict['actor_critic'])
        
        # 加载优化器状态
        self.wm.obs_optimizer.load_state_dict(state_dict['wm_obs_optimizer'])
        self.wm.dyn_optimizer.load_state_dict(state_dict['wm_dyn_optimizer'])
        self.ac.actor_optimizer.load_state_dict(state_dict['ac_actor_optimizer'])
        self.ac.critic_optimizer.load_state_dict(state_dict['ac_critic_optimizer'])



class WorldModel(nn.Module):

    def __init__(self, config, num_actions):
        super().__init__()
        self.config = config
        self.num_actions = num_actions

        self.obs_model = ObservationModel(config)
        self.dyn_model = DynamicsModel(config, self.obs_model.z_dim, num_actions)

        self.obs_optimizer = common.AdamOptim(
            self.obs_model.parameters(), lr=config['obs_lr'], eps=config['obs_eps'], weight_decay=config['obs_wd'],
            grad_clip=config['obs_grad_clip'])
        self.dyn_optimizer = common.AdamOptim(
            self.dyn_model.parameters(), lr=config['dyn_lr'], eps=config['dyn_eps'], weight_decay=config['dyn_wd'],
            grad_clip=config['dyn_grad_clip'])

    @property
    def z_dim(self):
        return self.obs_model.z_dim

    @property
    def h_dim(self):
        return self.dyn_model.h_dim

    def optimize_pretrain_obs(self, o):
        '''
        o: 传入环境的观察数据，数值范围是[0, 1], 形状为(batch_size, time, frame_stack, height, width, channels)  [wm_total_batch_size, 1, frame_stack, h, w, channels] 或者 [1 + prefix + wm_total_batch_size + 1, 1, frame_stack, h, w, c]
        '''

        obs_model = self.obs_model
        obs_model.train()
        
        # 将观察数据进行特征提取，获取一个潜在分布
        z_dist = obs_model.encode(o)
        # z shape is (batch_size, time, z_categoricals * z_categories)
        z = obs_model.sample_z(z_dist, reparameterized=True) # # 这里返回的是对应的潜在分布的采样结果，z shape is (batch_size, time, z_categoricals * z_categories)
        # 通过潜在分布进行重构,recons shape is (batch_size, time, frame_stack, height, width, channels)
        recons = obs_model.decode(z)

        # no consistency loss required for pretraining
        dec_loss, dec_met = obs_model.compute_decoder_loss(recons, o) # 计算重构损失
        ent_loss, ent_met = obs_model.compute_entropy_loss(z_dist) # 计算熵损失和指标，这里的熵应该是让提取的特征不会过于确定，保持一定的随机性，能够抓住更多的关键特征点

        obs_loss = dec_loss + ent_loss # 汇总全部损失
        self.obs_optimizer.step(obs_loss) # 优化

        metrics = common.combine_metrics([ent_met, dec_met]) # 合并指标
        metrics['obs_loss'] = obs_loss.detach()
        return metrics # 返回

    def optimize_pretrain_dyn(self, z, a, r, terminated, truncated, target_logits):
        '''
        z shape is （1，sequence_length + extra - 1， z_categoricals * z_categories）
        target_logits: （1，-2 + sequence_length + extra， z_categoricals * z_categories）
        a, r, terminated, truncated, shape is (1, sequence_length + extra - 2)
        '''
        assert common.same_batch_shape([z, a, r, terminated, truncated]) 
        assert common.same_batch_shape_time_offset(z, target_logits, 1)
        dyn_model = self.dyn_model
        dyn_model.train()

        d = torch.logical_or(terminated, truncated) # d shape is (1, sequence_length + extra - 2)
        g = self.to_discounts(terminated) # 获取一个折扣矩阵，并且如果terminated对应的位置为true，表示结束，折扣应为0 g shape is (batch_size(1), sequence_length + extra - 2)
        target_weights = (~d[:, 1:]).float() # target_weights shape is (1, -1 + sequence_length + extra - 2)，表示非结束状态的权重
        tgt_length = target_logits.shape[1] # 获取序列长度  tgt_length is sequence_length + extra - 2

        '''
        out/preds shape is {
            'z': (batch_size, tgt_length / num_modalities, z_dim),
            'r': (batch_size, tgt_length / num_modalities, 1),
            'g': (batch_size, tgt_length / num_modalities, 1)
        }
        hiddens/h shape is (batch_size, tgt_length / num_modalities, dim)
        mems shape is [num_layers + 1, mem_length, batch_size, dim]
        '''
        preds, h, mems = dyn_model.predict(z, a, r[:, :-1], g[:, :-1], d[:, :-1], tgt_length, compute_consistency=True)
        dyn_loss, metrics = dyn_model.compute_dynamics_loss(
            preds, h, target_logits=target_logits, target_r=r[:, 1:], target_g=g[:, 1:], target_weights=target_weights)
        # 优化动态环境模型损失，包含观察模型的潜在分布预测损失、奖励预测损失和折扣预测损失
        self.dyn_optimizer.step(dyn_loss)
        return metrics

    def optimize(self, o, a, r, terminated, truncated):
        '''
        obs shape is [wm_total_batch_size, wm_sequence_length, h, w, c]
        actions shape is [wm_total_batch_size, wm_sequence_length]
        rewards shape is [wm_total_batch_size, wm_sequence_length]
        terminated shape is [wm_total_batch_size, wm_sequence_length]
        truncated shape is [wm_total_batch_size, wm_sequence_length]
        '''
        assert common.same_batch_shape([a, r, terminated, truncated])
        assert common.same_batch_shape_time_offset(o, r, 1)

        obs_model = self.obs_model
        dyn_model = self.dyn_model

        self.eval() # 将模型切换为评估模式，避免在编码和解码过程中进行梯度计算
        with torch.no_grad():
            context_z_dist = obs_model.encode(o[:, :1]) # 这里仅编码了第一帧，为动态模型提供初始状态，不参与观察模型的重建损失计算
            context_z = obs_model.sample_z(context_z_dist) # 根据前缀观察的潜在分布进行采样，context_z shape is  (batch_size, 1, z_categoricals * z_categories)
            next_z_dist = obs_model.encode(o[:, -1:]) # 获取后续观察的潜在分布，也就是最后一个序列的观察，仅编码了最后一帧
            next_logits = next_z_dist.base_dist.logits # 获取后续观察的潜在分布的logits，next_logits shape is (batch_size, 1, z_categoricals, z_categories)

        self.train() # 将模型切换为训练模式，开始进行梯度计算和优化

        # observation model
        o = o[:, 1:-1] # 这里应该是去除了第一帧和最后一帧
        z_dist = obs_model.encode(o)
        z = obs_model.sample_z(z_dist, reparameterized=True) # 这里返回的是对应的潜在分布的采样结果，z shape is (batch_size, wm_sequence_length - 2, z_categoricals * z_categories)
        recons = obs_model.decode(z)

        dec_loss, dec_met = obs_model.compute_decoder_loss(recons, o) # 计算重构损失和指标
        ent_loss, ent_met = obs_model.compute_entropy_loss(z_dist) # 计算熵损失和指标，这里的熵应该是让提取的特征不会过于确定，保持一定的随机性，能够抓住更多的关键特征点

        # dynamics model
        z = z.detach()
        z = torch.cat([context_z, z], dim=1) # 将第一帧的潜在分布和后续帧的潜在分布拼接起来，z shape is (batch_size, wm_sequence_length - 1, z_categoricals * z_categories)
        z_logits = z_dist.base_dist.logits # z_logits shape is (batch_size, wm_sequence_length - 2, z_categoricals, z_categories)
        target_logits = torch.cat([z_logits[:, 1:].detach(), next_logits.detach()], dim=1) # 这里是将中间帧和最后一帧的潜在分布的logits拼接起来，target_logits shape is (batch_size, wm_sequence_length - 2 + 1, z_categoricals, z_categories)
        d = torch.logical_or(terminated, truncated) # d shape is (batch_size, wm_sequence_length)，表示是否结束的标志
        g = self.to_discounts(terminated) # g shape is (batch_size, wm_sequence_length)，获取一个折扣矩阵，并且如果terminated对应的位置为true，表示结束，折扣应为0
        target_weights = (~d[:, 1:]).float() # target_weights shape is (batch_size, wm_sequence_length - 2)，表示非结束状态的权重
        tgt_length = target_logits.shape[1] # wm_sequence_length - 2 + 1

        '''
        preds shape is {
            'z': (batch_size, tgt_length / num_modalities - 1, z_categoricals, z_categories),
            'r_dist': (batch_size, tgt_length / num_modalities - 1, 1),
            'r': (batch_size, tgt_length / num_modalities - 1, 1),
            'g_dist': (batch_size, tgt_length / num_modalities - 1, 1)
            'g': (batch_size, tgt_length / num_modalities - 1, 1)
        }

        h shape is (batch_size, tgt_length / num_modalities, dim)
        mems shape is [num_layers + 1, mem_length, batch_size, dim]
        attention shape is [num_layers, batch_size, num_heads, tgt_length / num_modalities, tgt_length / num_modalities]
        如果 return_attention 为True，则返回注意力权重，否则只返回预测结果和隐藏状态
        '''
        preds, h, mems = dyn_model.predict(z, a, r[:, :-1], g[:, :-1], d[:, :-1], tgt_length, compute_consistency=True)

        # 动态模型损失计算，主要是计算环境模型的潜在状态预测损失、奖励预测损失和折扣预测损失
        dyn_loss, dyn_met = dyn_model.compute_dynamics_loss(
            preds, h, target_logits=target_logits, target_r=r[:, 1:], target_g=g[:, 1:], target_weights=target_weights)
        self.dyn_optimizer.step(dyn_loss)

        z_hat_probs = preds['z_hat_probs'].detach()
        '''
        z_logits shape is (batch_size, tgt_length / num_modalities - 1, z_categoricals, z_categories)
        z_hat_probs shape is (batch_size, tgt_length / num_modalities - 1, z_categoricals, z_categories)

        简单来说，一致性损失是保证环境模型的编码接近predict模型的对环境的预测编码，两者传入的时间序列是相同的
        而compute_dynamics_loss是保证predict模型的对环境的预测编码接近环境模型的编码，时间序列是有偏差一个时间步的
        '''
        con_loss, con_met = obs_model.compute_consistency_loss(z_logits, z_hat_probs)

        # 汇总所有损失进行训练
        obs_loss = dec_loss + ent_loss + con_loss
        self.obs_optimizer.step(obs_loss)

        metrics = common.combine_metrics([dec_met, ent_met, con_met, dyn_met])
        metrics['obs_loss'] = obs_loss.detach()

        '''
        z shape is (batch_size, tgt_length / num_modalities - 1, z_categoricals * z_categories)
        h shape is (batch_size, tgt_length / num_modalities, h_dim)
        metrics shape is {
            'obs_loss': obs_loss,
            'enc_prior_ce': cross_entropy.detach().mean(),
            'enc_prior_loss': loss.detach(),
            'enc_decoder_loss': dec_loss.detach(),
            'enc_entropy_loss': ent_loss.detach(),
            'dyn_loss': dyn_loss.detach(),
            'dyn_r_loss': dyn_met['dyn_r_loss'].detach(),
            'dyn_g_loss': dyn_met['dyn_g_loss'].detach(),
            'dyn_r_ce': dyn_met['dyn_r_ce'].detach(),
            'dyn_g_ce': dyn_met['dyn_g_ce'].detach(),
            'dyn_r_mse': dyn_met['dyn_r_mse'].detach(),
            'dyn_g_mse': dyn_met['dyn_g_mse'].detach(),
            'dyn_enc_prior_ce': dyn_met['dyn_enc_prior_ce'].detach(),
            'dyn_enc_prior_loss': dyn_met['dyn_enc_prior_loss'].detach(),
            'enc_consistency_loss': con_loss.detach(),
            'enc_consistency_ce': con_met['enc_consistency_ce'].detach(),
            'enc_consistency_loss': con_met['enc_consistency_loss'].detach()
        }
        '''
        return z, h, metrics

    @torch.no_grad()
    def to_discounts(self, mask):
        assert common.check_no_grad(mask)
        discount_factor = self.config['env_discount_factor']
        g = torch.full(mask.shape, discount_factor, device=mask.device) # 获取折扣因子，创建和mask相同shape的折扣矩阵
        g = g * (~mask).float() # 如果mask对应的位置为True（表示结束），那么对应位置去反为false，float后为0，给折扣矩阵对应位置的折扣设置为0
        return g


class ObservationModel(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        # todo 看起来是将观察编码、解码的维度
        self.z_dim = config['z_categoricals'] * config['z_categories']

        h = config['obs_channels']
        activation = config['obs_act']
        norm = config['obs_norm']
        dropout_p = config['obs_dropout']

        num_channels = config['env_frame_stack']
        if not config['env_grayscale']:
            num_channels *= 3

        self.encoder = nn.Sequential(
            CNN(num_channels, [h, h * 2, h * 4], h * 8,
                     [4, 4, 4, 4], [2, 2, 2, 2], [0, 0, 0, 0], activation, norm=norm, post_activation=True),
            nn.Flatten(),
            MLP((h * 8) * 2 * 2, [512, 512], self.z_dim, activation, norm=norm, dropout_p=dropout_p)
        )

        # no norm here
        self.decoder = nn.Sequential(
            MLP(self.z_dim, [], (h * 16) * 1 * 1, activation, dropout_p=dropout_p, post_activation=True),
            nn.Unflatten(1, (h * 16, 1, 1)),
            TransposeCNN(h * 16, [h * 4, h * 2, h], num_channels, [5, 5, 6, 6], [2, 2, 2, 2], [0, 0, 0, 0],
                              activation, final_bias_init=0.5)
        )

    @staticmethod
    def create_z_dist(logits, temperature=1):
        assert temperature > 0 # 这里的温度是为了控制采样的平滑度，temperature越大，采样越平滑
        # OneHotCategoricalStraightThrough
        # 用于将观察编码为离散的潜在表示
        # temperature 参数控制采样的"软硬程度"
        # 通过 Straight-Through 技巧实现离散变量的可导采样

        # independent 的作用
        '''
        reinterpreted_batch_ndims=1 表示将最后一个维度视为事件维度
        对于形状 (batch, time, z_categoricals, z_categories) 的数据：
        将 z_categoricals 个类别分布视为独立分布

        联合概率计算:
        自动处理多个独立分布的联合概率
        在计算 log probability 时自动对事件维度求和

        学习离散的潜在表示
        保持端到端的可训练性
        正确处理多个独立分布的概率计算
        todo 调试了解
        '''
        return D.Independent(D.OneHotCategoricalStraightThrough(logits=logits / temperature), 1)

    def encode(self, o):
        '''
        o: 形状为(batch_size, time, frame_satck, height, width, channels)  [wm_total_batch_size, 1, frame_stack, h, w, channels] 或者 [1 + prefix + wm_total_batch_size + 1, 1, frame_stack, h, w, channels]

        return: 返回的分布对象
        '''
        assert common.check_no_grad(o)
        config = self.config
        shape = o.shape[:2] # 获取batch_size, timestep
        o = o.flatten(0, 1) # 仅展平 前两个维度，o shape is (batch_size * time, [frame_stack], height, width, channels)

        if not config['env_grayscale']: # 如果是彩色图，那么存在channels维度，需要将channels维度展平
            o = o.permute(0, 1, 4, 2, 3) # o shape is (batch_size * time, [frame_stack], height, width, channels) -> (batch_size * time, [frame_stack], channels, height, width)
            o = o.flatten(1, 2) # 展品第1个维度和第2个维度， shape is (batch_size * time, [frame_stack * channels], height, width)

        # z_logits shape is (batch_size * time, z_categoricals * z_categories(z_dim))
        z_logits = self.encoder(o)
        z_logits = z_logits.unflatten(0, shape) # 将展平的维度恢复为原来的维度，shape is (batch_size, time, z_categoricals * z_categories(z_dim))
        z_logits = z_logits.unflatten(-1, (config['z_categoricals'], config['z_categories'])) # shape is (batch_size, time, z_categoricals, z_categories)
        z_dist = ObservationModel.create_z_dist(z_logits) # 调用观察模型的create_z_dist方法，创建一个独立的OneHotCategorical分布
        return z_dist

    def sample_z(self, z_dist, reparameterized=False, temperature=1, idx=None, return_logits=False):
        '''
        当 reparameterized=True 时：
        使用 rsample() 方法进行采样
        实现可导的采样过程
        允许梯度通过采样操作反向传播
        当 reparameterized=False 时：

        使用 sample() 方法进行采样
        采样过程不可导
        阻止梯度通过采样操作反向传播


        return_logits: 是否返回 logits
        '''
        logits = z_dist.base_dist.logits # shape is (batch_size, time, z_categoricals, z_categories)
        # 这里应该是为了确保参数可导性是否和 reparameterized 参数一致
        assert (not reparameterized) == common.check_no_grad(logits)
        if temperature == 0:
            # 这里是参数不可导的流程
            assert not reparameterized
            with torch.no_grad():
                if idx is not None:
                    logits = logits[idx] # 这里的idx需要保证小于 logits 的第一个维度大小 所以这里之后 logits shape (idx batch_size, time, z_categoricals, z_categories)
                indices = torch.argmax(logits, dim=-1) # shape is (batch_size, time, z_categoricals)
                # one_hot = F.one_hot(indices, num_classes=self.config['z_categories'])
                # shape: (batch_size, time, z_categoricals, z_categories)
                # flattened = one_hot.flatten(2, 3)
                # shape: (batch_size, time, z_categoricals * z_categories)
                z = F.one_hot(indices, num_classes=self.config['z_categories']).flatten(2, 3).float()
            if return_logits:
                # 不但返回预测的最大可能预测值，还会返回原始的logits
                return z, logits  # actually wrong logits for temperature = 0
            # 这里是不可导的采样流程，直接返回
            return z

        if temperature != 1 or idx is not None:
            if idx is not None:
                logits = logits[idx] # 这里的idx需要保证小于 logits 的第一个维度大小 所以这里之后 logits shape (idx batch_size, time, z_categoricals, z_categories)
            # 这里是可导的采样流程，使用温度参数进行采样，又将logits包装为预测分布的对象
            z_dist = ObservationModel.create_z_dist(logits, temperature)
            if return_logits:
                # 获取新的归一化的logits
                logits = z_dist.base_dist.logits  # return new normalized logits

        # 这里是可导的采样流程，使用 reparameterized 参数决定是否使用 rsample() 方法进行采样
        '''
        `rsample()` 和 `sample()` 是 PyTorch 分布类中两种不同的采样方法，主要区别在于梯度传播：

        ### `sample()`
        - **不可导的采样**
        - 直接从分布中采样
        - **阻止梯度反向传播**
        - 用于推理阶段

        ```python
        # 示例
        dist = D.Normal(mean, std)
        z = dist.sample()  # 梯度在此处停止
        ```

        ### `rsample()`
        - **可重参数化的采样**
        - 使用重参数化技巧进行采样
        - **允许梯度反向传播**
        - 用于训练阶段

        ```python
        # 示例
        dist = D.Normal(mean, std)
        z = dist.rsample()  # 梯度可以通过采样传播
        ```

        ### 在代码中的使用
        ```python
        # 在 sample_z 函数中
        z = z_dist.rsample() if reparameterized else z_dist.sample()
        ```
        - 当 `reparameterized=True` 时使用 `rsample()` 用于训练
        - 当 `reparameterized=False` 时使用 `sample()` 用于推理

        ### 重参数化技巧的工作原理
        1. 将随机性从分布参数中分离
        2. 使用确定性函数将标准分布样本转换为目标分布样本
        3. 保持计算图的可导性

        这种设计对于变分自编码器（VAE）等需要通过随机变量反向传播梯度的模型特别重要。
        '''
        z = z_dist.rsample() if reparameterized else z_dist.sample()
        # z shape is (batch_size, time, z_categoricals, z_categories)
        z = z.flatten(2, 3) # 将最后两个维度展平，shape is (batch_size, time, z_categoricals * z_categories)
        if return_logits:
            return z, logits
        return z

    def encode_sample(self, o, reparameterized=False, temperature=1, idx=None, return_logits=False):
        z_dist = self.encode(o)
        return self.sample_z(z_dist, reparameterized, temperature, idx, return_logits)

    def decode(self, z):
        '''
        z: (batch_size, time, z_categoricals * z_categories)
        '''
        config = self.config
        shape = z.shape[:2]
        z = z.flatten(0, 1) # 将前两个维度展平，shape is (batch_size * time, z_categoricals * z_categorie(z_dim)))
        recons = self.decoder(z) # 解码潜在表示，shape is (batch_size * time, num_channels * frame_stack, height, width)
        if not config['env_grayscale']:
            # 如果是彩色图片 shape (batch_size * time, frame_stack, num_channels, height, width)
            recons = recons.unflatten(1, (config['env_frame_stack'], 3))
            # recons shape is (batch_size * time, frame_stack, height, width, num_channels)
            recons = recons.permute(0, 1, 3, 4, 2)
        recons = recons.unflatten(0, shape) # recons shape is (batch_size, time, frame_stack, height, width, num_channels)
        return recons

    def compute_decoder_loss(self, recons, o):
        '''
        recons: 重构观察后的数据，形状为(batch_size, time, frame_stack, height, width, channels) [wm_total_batch_size, 1, frame_stack, h, w, channels] 或者 [1 + prefix + wm_total_batch_size + 1, 1, frame_stack, h, w, channels]    
        o: 真实的观察数据 形状为(batch_size, time, frame_stack, height, width, channels) [wm_total_batch_size, 1, frame_stack, h, w, channels] 或者 [1 + prefix + wm_total_batch_size + 1, 1, frame_stack, h, w, channels]
        如果是灰度图，那么channels应该不存在
        return： loss: 重构损失, metrics: 一个字典，包含重构损失和重构均方误差
        '''
        assert common.check_no_grad(o)
        config = self.config
        metrics = {}
        # recons.flatten(0, 1):  (batch_size * time, frame_stack, height, width, channels)
        # recon_mean： .permute(0, 2, 3, 1)： (batch_size * time, height, width, frame_stack，channels) todo 验证这里的shape变化
        recon_mean = recons.flatten(0, 1).permute(0, 2, 3, 1) 
        coef = config['obs_decoder_coef'] # 这里的obs_decoder_coef可以为0吗？
        # coef权重系数，用于控制重构损失在总损失中的比重
        if coef != 0:
            if config['env_grayscale']: 
                # 灰度图像的处理
                # o shape is (batch_size * time, height, width, frame_stack)
                o = o.flatten(0, 1).permute(0, 2, 3, 1)
            else:
                # 彩色图像的处理
                # o.flatten(0, 1): (batch_size * time, frame_stack， height, width， channels)
                # o = o.permute(0, 1, 4, 2, 3) # 将channels维度移动到最后，o shape is (batch_size * time, height, width, frame_stack， channels)
                # o = o.flatten(1, 2) # 展平frame_stack和channels维度，o shape is (batch_size * time, height, width, frame_stack * channels)
                o = o.flatten(0, 1).permute(0, 2, 3, 1, 4).flatten(-2, -1)
            # D.Independent： base_distribution: 基础分布（如 Normal、Categorical 等）；reinterpreted_batch_ndims: 要重新解释为事件维度的批次维度数量，这里是将最后一个维度视为采样分数维度
            # D.Normal(loc, scale)： loc: 均值（μ），决定分布的中心位置；scale: 标准差（σ），决定分布的分散程；
            recon_dist = D.Independent(D.Normal(recon_mean, torch.ones_like(recon_mean)), 3)
            # 在计算重构分布和原始观察之间的对数似然（log-likelihood），越接近真实观察值，对数概率越大，负号表示我们要最小化负对数似然
            loss = -coef * recon_dist.log_prob(o).mean()
            metrics['dec_loss'] = loss.detach() # 记录重构损失
        else:
            # 如果重构损失系数为0，则不计算重构损失 为啥？
            loss = torch.zeros(1, device=recons.device, requires_grad=False)
        # 计算重构均方误差
        metrics['recon_mae'] = torch.abs(o - recon_mean.detach()).mean()
        return loss, metrics

    def compute_entropy_loss(self, z_dist):
        '''
        z_dist: 观察编码后的潜在分布对象，采样的形状为(batch_size, time, z_categoricals, z_categories)
        '''
        config = self.config
        metrics = {}

        entropy = z_dist.entropy().mean() # 计算潜在分布的熵
        '''
        单个类别分布的最大熵:
        对于一个有 k 个类别的离散分布
        当各类别概率相等时（均匀分布）熵最大
        此时每个类别概率为 1/k
        最大熵值为 log(k)
        独立分布的熵:
        z_categoricals 个独立的类别分布
        每个分布有 z_categories 个类别
        独立分布的总熵是各个分布熵的和

        max_entropy = n * log(k)
        其中：
        - n = z_categoricals (独立分布的数量)
        - k = z_categories (每个分布的类别数)


        '''
        max_entropy = config['z_categoricals'] * math.log(config['z_categories']) # 最大熵
        normalized_entropy = entropy / max_entropy # 归一化熵
        metrics['z_ent'] = entropy.detach() # 记录熵值和归一化熵值
        metrics['z_norm_ent'] = normalized_entropy.detach()

        coef = config['obs_entropy_coef'] # 获取熵损失权重
        if coef != 0:
            # 归一化熵损失阈值
            if config['obs_entropy_threshold'] < 1:
                # 如果有阈值且为1，则阈值生效，当normalized_entropy小于config['obs_entropy_threshold']，则产生损失
                # 当 normalized_entropy 大于等于 config['obs_entropy_threshold'] 时，则损失为0，因为relu
                # 过滤掉熵过大的部分，保留熵小的部分，这样可以避免本身过大的熵继续增加，而让过小的熵能够增加从而保持稳定的不确定性
                # hinge loss, inspired by https://openreview.net/pdf?id=HkCjNI5ex
                loss = coef * torch.relu(config['obs_entropy_threshold'] - normalized_entropy)
            else:
                # 得到熵的负值作为损失，求最小，则求最大化熵
                loss = -coef * normalized_entropy
            metrics['z_entropy_loss'] = loss.detach()
        else:
            # 同样如果权重为0，则熵损失为0
            loss = torch.zeros(1, device=z_dist.base_dist.logits.device, requires_grad=False)

        return loss, metrics

    def compute_consistency_loss(self, z_logits, z_hat_probs):
        '''
        z_logits shape is (batch_size, tgt_length / num_modalities - 1, z_categoricals, z_categories)
        z_hat_probs shape is (batch_size, tgt_length / num_modalities - 1, z_categoricals, z_categories)
        '''
        assert common.check_no_grad(z_hat_probs)
        config = self.config
        metrics = {}
        coef = config['obs_consistency_coef']
        if coef > 0:
            cross_entropy = -((z_hat_probs.detach() * z_logits).sum(-1)) # 计算交叉熵损失，
            cross_entropy = cross_entropy.sum(-1)  # independent
            loss = coef * cross_entropy.mean()
            metrics['enc_prior_ce'] = cross_entropy.detach().mean()
            metrics['enc_prior_loss'] = loss.detach()
        else:
            # 这里表示不计算一致性损失
            loss = torch.zeros(1, device=z_logits.device, requires_grad=False)
        return loss, metrics


class DynamicsModel(nn.Module):

    def __init__(self, config, z_dim, num_actions):
        super().__init__()
        self.config = config

        embeds = {
            'z': {'in_dim': z_dim, 'categorical': False}, # 环境的特征维度
            'a': {'in_dim': num_actions, 'categorical': True} # 动作的特征维度
        }
        modality_order = ['z', 'a']
        num_current = 2

        # todo 
        if config['dyn_input_rewards']:
            embeds['r'] = {'in_dim': 0, 'categorical': False}
            modality_order.append('r')

        if config['dyn_input_discounts']:
            embeds['g'] = {'in_dim': 0, 'categorical': False}
            modality_order.append('g')

        self.modality_order = modality_order

        # todo
        out_heads = {
            'z': {'hidden_dims': config['dyn_z_dims'], 'out_dim': z_dim},
            'r': {'hidden_dims': config['dyn_reward_dims'], 'out_dim': 1, 'final_bias_init': 0.0},
            'g': {'hidden_dims': config['dyn_discount_dims'], 'out_dim': 1,
                  'final_bias_init': config['env_discount_factor']}
        }

        # 这里可能是环境的记忆维度
        memory_length = config['wm_memory_length']
        max_length = 1 + config['wm_sequence_length']  # 1 for context
        # todo 预测网络
        self.prediction_net = PredictionNet(
            modality_order, num_current, embeds, out_heads, embed_dim=config['dyn_embed_dim'],
            activation=config['dyn_act'], norm=config['dyn_norm'], dropout_p=config['dyn_dropout'],
            feedforward_dim=config['dyn_feedforward_dim'], head_dim=config['dyn_head_dim'],
            num_heads=config['dyn_num_heads'], num_layers=config['dyn_num_layers'],
            memory_length=memory_length, max_length=max_length)

    @property
    def h_dim(self):
        return self.prediction_net.embed_dim

    def predict(self, z, a, r, g, d, tgt_length, heads=None, mems=None, return_attention=False,
                compute_consistency=False):
        '''
        z shape is （1，sequence_length + extra - 1， z_categoricals * z_categories） 是环境特征提取后，采样的环境潜在状态
        a  shape is (1, sequence_length + extra - 2)
        r shape is (1, sequence_length + extra - 3)
        g shape is (batch_size(1), sequence_length + extra - 3) 是根据终止状态计算的折扣矩阵
        tgt_length: -2 + sequence_length + extra，是序列长度
        d shape is (1, sequence_length + extra - 2 - 1) 是一个结束标志，表示当前状态是否是终止状态
        compute_consistency is True 参数在 TWM 世界模型中用于控制是否计算一致性损失，这是一个重要的训练机制
        heads=None
        mems=None
        return_attention=False
        '''
        assert common.check_no_grad(z, a, r, g, d)
        assert mems is None or common.check_no_grad(*mems)
        config = self.config

        if compute_consistency:
            # todo 确认这里加1后，对后续的影响是什么？
            tgt_length += 1  # add 1 timestep for context

        inputs = {'z': z, 'a': a, 'r': r, 'g': g}
        heads = tuple(heads) if heads is not None else ('z', 'r', 'g')

        '''
        out shape is {
            'z': (batch_size, tgt_length / num_modalities, z_dim),
            'r': (batch_size, tgt_length / num_modalities, 1),
            'g': (batch_size, tgt_length / num_modalities, 1)
        }
        hiddens shape is (batch_size, tgt_length / num_modalities, dim)
        mems shape is [num_layers + 1, mem_length, batch_size, dim]
        这里的tgt_length / num_modalities 是因为每个模态的输出都是在最后一个位置进行预测的，所以需要除以模态数量

        这里的输出应该是预测下一个状态的潜在分布、奖励和折扣，而不是样本分布，后续会将后续这些转换为采样分布
        '''
        outputs = self.prediction_net(
            inputs, tgt_length, stop_mask=d, heads=heads, mems=mems, return_attention=return_attention)
        out, h, mems, attention = outputs if return_attention else (outputs + (None,))

        preds = {}

        if 'z' in heads:  # latent states
            z_categoricals = config['z_categoricals']
            z_categories = config['z_categories']
            z_logits = out['z'].unflatten(-1, (z_categoricals, z_categories)) # z_logits shape is (batch_size, tgt_length / num_modalities, z_categoricals, z_categories)

            if compute_consistency:
                # used for consistency loss
                preds['z_hat_probs'] = ObservationModel.create_z_dist(z_logits[:, :-1].detach()).base_dist.probs 
                z_logits = z_logits[:, 1:]  # remove context 移除第一帧 todo  z_lgoits shape is (batch_size, tgt_length / num_modalities - 1, z_categoricals, z_categories)

            z_dist = ObservationModel.create_z_dist(z_logits) # 创建中间的潜在分布
            preds['z_dist'] = z_dist

        if 'r' in heads:  # rewards todo 调试观察这边的shape
            r_params = out['r']
            if compute_consistency:
                r_params = r_params[:, 1:]  # remove context 移除第一帧 r_params shape is (batch_size, tgt_length / num_modalities - 1, 1)
            r_mean = r_params.squeeze(-1) # r_mean shape is (batch_size, tgt_length / num_modalities - 1)
            r_dist = D.Normal(r_mean, torch.ones_like(r_mean)) # r_dist shape is (batch_size, tgt_length / num_modalities - 1, 1)，使用均值和标准差为1的正态分布

            r_pred = r_dist.mean # r_pred shape is (batch_size, tgt_length / num_modalities - 1)，将奖励预测值限制在0和1之间
            preds['r_dist'] = r_dist  # used for dynamics loss 中间状态的奖励分布，用于计算动态损失
            preds['r'] = r_pred # 

        if 'g' in heads:  # discounts
            g_params = out['g']
            if compute_consistency:
                g_params = g_params[:, 1:]  # remove context 移除第一帧的折扣
            g_mean = g_params.squeeze(-1) # g_mean shape is (batch_size, tgt_length / num_modalities - 1, 1)
            g_dist = D.Bernoulli(logits=g_mean) # 利用折扣均值做为伯努利分布的logits，g_dist shape is (batch_size, tgt_length / num_modalities - 1, 1)

            g_pred = torch.clip(g_dist.mean, 0, 1) # g_pred shape is (batch_size, tgt_length / num_modalities - 1, 1)，将折扣预测值限制在0和1之间
            preds['g_dist'] = g_dist  # used for dynamics loss
            preds['g'] = g_pred

        '''
        preds shape is {
            'z': (batch_size, tgt_length / num_modalities - 1, z_categoricals, z_categories),
            'r_dist': (batch_size, tgt_length / num_modalities - 1, 1),
            'r': (batch_size, tgt_length / num_modalities - 1, 1),
            'g_dist': (batch_size, tgt_length / num_modalities - 1, 1)
            'g': (batch_size, tgt_length / num_modalities - 1, 1)
        }

        h shape is (batch_size, tgt_length / num_modalities, dim)
        mems shape is [num_layers + 1, mem_length, batch_size, dim]
        attention shape is [num_layers, batch_size, num_heads, tgt_length / num_modalities, tgt_length / num_modalities]
        如果 return_attention 为True，则返回注意力权重，否则只返回预测结果和隐藏状态
        '''
        return (preds, h, mems, attention) if return_attention else (preds, h, mems)

    def compute_dynamics_loss(self, preds, h, target_logits, target_r, target_g, target_weights):
        '''
        preds: 预测的结果，包含潜在状态、奖励和折扣 shape
        {
            'z': (batch_size, tgt_length / num_modalities, z_dim),
            'z_dist': (batch_size, tgt_length / num_modalities, z_categoricals, z_categories),
            'r_dist': (batch_size, tgt_length / num_modalities, 1),
            'r': (batch_size, tgt_length / num_modalities, 1),
            'g': (batch_size, tgt_length / num_modalities, 1)
        }
        h: 隐藏状态 shape is (batch_size, tgt_length / num_modalities, dim)
        target_logits: 目标的潜在状态 logits，shape is (batch_size, tgt_length, z_categoricals * z_categories)
        target_r: 目标的奖励，shape is (batch_size, tgt_length - 1, 1)
        target_g: 目标的折扣，shape is (batch_size, tgt_length - 1, 1)
        target_weights: 目标的权重，shape is (batch_size, tgt_length - 1) 是中断状态的权重，通常是1或0
        '''
        assert common.check_no_grad(target_logits, target_r, target_g, target_weights)
        config = self.config
        losses = []
        metrics = {}

        '''
        h.norm(dim=-1,  # 在最后一个维度上计算范数
            p=2      # L2范数，也称为欧几里德范数
        )

        batch_size: 批次大小（通常为1）
        tgt_length / num_modalities: 目标序列长度除以模态数量
        dim: 隐藏状态维度（embed_dim）
        '''
        metrics['h_norm'] = h.norm(dim=-1, p=2).mean().detach() # shape is (batch_size, tgt_length / num_modalities, dim) 这里仅仅只是记录起来

        if 'z' in preds:
            # 如果环境观察特征包含在预测中
            z_dist = preds['z_dist']
            z_logits = z_dist.base_dist.logits  # use normalized logits

            # doesn't check for q == 0
            target_probs = logits_to_probs(target_logits)
            cross_entropy = -((target_probs * z_logits).sum(-1)) # 目标的潜在状态 logits 和预测的潜在状态 logits 之间的交叉熵损失，todo 这边应该是对比世界模型编码的潜在状态和动态模型预测的潜在状态是否一致
            cross_entropy = cross_entropy.sum(-1)  # independent
            weighted_cross_entropy = target_weights * cross_entropy # 如果是中断状态则权重为0，否则为1
            weighted_cross_entropy = weighted_cross_entropy.sum() / target_weights.sum() # 计算加权交叉熵损失的平均值

            coef = config['dyn_z_coef']
            if coef != 0:
                transition_loss = coef * weighted_cross_entropy
                losses.append(transition_loss)

                metrics['z_pred_loss'] = transition_loss.detach()
                metrics['z_pred_ent'] = z_dist.entropy().detach().mean()
                metrics['z_pred_ce'] = weighted_cross_entropy.detach()

            # doesn't check for q == 0
            # 这边应该还是老一套，预测经过的潜在状态 logits （transformers）和目标的潜在状态 logits 之间的KL散度
            kl = (target_probs * (target_logits - z_logits.detach())).mean()
            kl = F.relu(kl.mean())
            metrics['z_kl'] = kl

        if 'r' in preds:
            r_dist = preds['r_dist']
            r_pred = preds['r']
            coef = config['dyn_reward_coef']
            if coef != 0:
                r_loss = -coef * r_dist.log_prob(target_r).mean() # 计算奖励的负对数似然损失，目标是为了使得r_dist和target_r尽可能接近
                losses.append(r_loss)
                metrics['reward_loss'] = r_loss.detach()
                metrics['reward_mae'] = torch.abs(target_r - r_pred.detach()).mean()
            metrics['reward'] = r_pred.mean().detach()

        if 'g' in preds:
            g_dist = preds['g_dist']
            g_pred = preds['g']
            coef = config['dyn_discount_coef']
            if coef != 0:
                g_dist._validate_args = False
                g_loss = -coef * g_dist.log_prob(target_g).mean()
                losses.append(g_loss)
                metrics['discount_loss'] = g_loss.detach()
                metrics['discount_mae'] = torch.abs(target_g - g_pred.detach()).mean()
            metrics['discount'] = g_pred.detach().mean()

        if len(losses) == 0:
            loss = torch.zeros(1, device=z.device, requires_grad=False)
        else:
            loss = sum(losses)
            metrics['dyn_loss'] = loss.detach()
        # 这里的loss是所有损失的总和
        # metrics是一个字典，包含了所有的指标
        # 这里的loss是所有损失的总和，包含了潜在状态的交叉熵损失、奖励的负对数似然损失和折扣的负对数似然损失
        return loss, metrics


def get_activation(nonlinearity, param=None):
    if nonlinearity is None or nonlinearity == 'none' or nonlinearity == 'linear':
        return nn.Identity()
    elif nonlinearity == 'relu':
        return nn.ReLU()
    elif nonlinearity == 'leaky_relu':
        if param is None:
            param = 1e-2
        return nn.LeakyReLU(negative_slope=param)
    elif nonlinearity == 'elu':
        if param is None:
            param = 1.0
        return nn.ELU(alpha=param)
    elif nonlinearity == 'silu':
        return nn.SiLU()
    else:
        raise ValueError(f'Unsupported nonlinearity: {nonlinearity}')


def get_norm_1d(norm, k):
    if norm is None or norm == 'none':
        return nn.Identity()
    elif norm == 'batch_norm':
        return nn.BatchNorm1d(k)
    elif norm == 'layer_norm':
        return nn.LayerNorm(k)
    else:
        raise ValueError(f'Unsupported norm: {norm}')


def get_norm_2d(norm, c, h=None, w=None):
    if norm == 'none':
        return nn.Identity()
    elif norm == 'batch_norm':
        return nn.BatchNorm2d(c)
    elif norm == 'layer_norm':
        assert h is not None and w is not None
        return nn.LayerNorm([c, h, w])
    else:
        raise ValueError(f'Unsupported norm: {norm}')


def _calculate_gain(nonlinearity, param=None):
    if nonlinearity == 'elu':
        nonlinearity = 'selu'
        param = 1
    elif nonlinearity == 'silu':
        nonlinearity = 'relu'
        param = None
    return torch.nn.init.calculate_gain(nonlinearity, param)


def _kaiming_uniform_(tensor, gain):
    # same as torch.nn.init.kaiming_uniform_, but uses gain
    fan = torch.nn.init._calculate_correct_fan(tensor, mode='fan_in')
    std = gain / math.sqrt(fan)
    bound = math.sqrt(3.0) * std
    torch.nn.init._no_grad_uniform_(tensor, -bound, bound)


def _get_initializer(name, nonlinearity=None, param=None):
    if nonlinearity is None:
        assert param is None
    if name == 'kaiming_uniform':
        if nonlinearity is None:
            # defaults from PyTorch
            nonlinearity = 'leaky_relu'
            param = math.sqrt(5)
        return lambda x: _kaiming_uniform_(x, gain=_calculate_gain(nonlinearity, param))
    elif name == 'xavier_uniform':
        if nonlinearity is None:
            nonlinearity = 'relu'
        return lambda x: torch.nn.init.xavier_uniform_(x, gain=_calculate_gain(nonlinearity, param))
    elif name == 'orthogonal':
        if nonlinearity is None:
            nonlinearity = 'relu'
        return lambda x: torch.nn.init.orthogonal_(x, gain=_calculate_gain(nonlinearity, param))
    elif name == 'zeros':
        return lambda x: torch.nn.init.zeros_(x)
    else:
        raise ValueError(f'Unsupported initializer: {name}')


def init_(mod, weight_initializer=None, bias_initializer=None, nonlinearity=None, param=None):
    weight_initializer = _get_initializer(weight_initializer, nonlinearity, param) \
        if weight_initializer is not None else lambda x: x
    bias_initializer = _get_initializer(bias_initializer, nonlinearity='linear', param=None) \
        if bias_initializer is not None else lambda x: x

    def fn(m):
        if isinstance(m, (nn.Linear, nn.Conv2d)):
            weight_initializer(m.weight)
            if m.bias is not None:
                bias_initializer(m.bias)

    return mod.apply(fn)


class _MultilayerModule(nn.Module):

    def __init__(self, layer_prefix, ndim, in_dim, num_layers, nonlinearity, param,
                 norm, dropout_p, pre_activation, post_activation,
                 weight_initializer, bias_initializer, final_bias_init):
        super().__init__()
        self.layer_prefix = layer_prefix
        self.ndim = ndim
        self.num_layers = num_layers
        self.nonlinearity = nonlinearity
        self.param = param
        self.pre_activation = pre_activation
        self.post_activation = post_activation
        self.weight_initializer = weight_initializer
        self.bias_initializer = bias_initializer
        self.final_bias_init = final_bias_init

        self.has_norm = norm is not None and norm != 'none'
        self.has_dropout = dropout_p != 0
        self.unsqueeze = in_dim == 0

        self.act = get_activation(nonlinearity, param)

    def reset_parameters(self):
        init_(self, self.weight_initializer, self.bias_initializer, self.nonlinearity, self.param)
        final_layer = getattr(self, f'{self.layer_prefix}{self.num_layers}')
        if not self.post_activation:
            init_(final_layer, self.weight_initializer, self.bias_initializer, nonlinearity='linear', param=None)
        if self.final_bias_init is not None:
            def final_init(m):
                if isinstance(m, (nn.Linear, nn.Conv2d)) and m.bias is not None:
                    with torch.no_grad():
                        m.bias.data.fill_(self.final_bias_init)
            final_layer.apply(final_init)

    def forward(self, x):
        if self.unsqueeze:
            x = x.unsqueeze(-self.ndim)

        if x.ndim > self.ndim + 1:
            batch_shape = x.shape[:-self.ndim]
            x = x.reshape(-1, *x.shape[-self.ndim:])
        else:
            batch_shape = None

        if self.pre_activation:
            if self.has_norm:
                x = getattr(self, 'norm0')(x)
            x = self.act(x)

        for i in range(self.num_layers - 1):
            x = getattr(self, f'{self.layer_prefix}{i + 1}')(x)
            if self.has_norm:
                x = getattr(self, f'norm{i + 1}')(x)
            x = self.act(x)
            if self.has_dropout:
                x = self.dropout(x)
        x = getattr(self, f'{self.layer_prefix}{self.num_layers}')(x)

        if self.post_activation:
            if self.has_norm:
                x = getattr(self, f'norm{self.num_layers}')(x)
            x = self.act(x)

        if batch_shape is not None:
            x = x.unflatten(0, batch_shape)
        return x


class MLP(_MultilayerModule):

    def __init__(self, in_dim, hidden_dims, out_dim, nonlinearity, param=None, norm=None, dropout_p=0, bias=True,
                 pre_activation=False, post_activation=False,
                 weight_initializer='kaiming_uniform', bias_initializer='zeros', final_bias_init=None):
        dims = (in_dim,) + tuple(hidden_dims) + (out_dim,)
        super().__init__('linear', 1, in_dim, len(dims) - 1, nonlinearity, param, norm, dropout_p,
                         pre_activation, post_activation, weight_initializer, bias_initializer, final_bias_init)
        if self.unsqueeze:
            dims = (1,) + dims[1:]

        if pre_activation and self.has_norm:
            norm_layer = get_norm_1d(norm, in_dim)
            self.add_module(f'norm0', norm_layer)

        for i in range(self.num_layers - 1):
            linear_layer = nn.Linear(dims[i], dims[i + 1], bias=bias)
            self.add_module(f'linear{i + 1}', linear_layer)
            if self.has_norm:
                norm_layer = get_norm_1d(norm, dims[i + 1])
                self.add_module(f'norm{i + 1}', norm_layer)

        linear_layer = nn.Linear(dims[-2], dims[-1], bias=bias)
        self.add_module(f'linear{self.num_layers}', linear_layer)

        if post_activation and self.has_norm:
            norm_layer = get_norm_1d(norm, dims[-1])
            self.add_module(f'norm{self.num_layers}', norm_layer)

        if self.has_dropout:
            self.dropout = nn.Dropout(dropout_p)

        self.reset_parameters()


class CNN(_MultilayerModule):
    '''
    这里只是覆盖了构造网络的结构
    总体调用方式在_MultilayerModule中
    '''

    def __init__(self, in_dim, hidden_dims, out_dim, kernel_sizes, strides, paddings, nonlinearity,
                 param=None, norm=None, dropout_p=0, bias=True, padding_mode='zeros', in_shape=None,
                 pre_activation=False, post_activation=False,
                 weight_initializer='kaiming_uniform', bias_initializer='zeros', final_bias_init=None):
        '''
        in_dim: num_channels，输入的通道数，等于帧堆叠数和图像的通道数乘积
        hidden_dims: [h, h * 2, h * 4]
        out_dim: h * 8
        kernel_sizes: [4, 4, 4, 4]
        strides: [2, 2, 2, 2]
        paddings: [0, 0, 0, 0]
        nonlinearity: activation
        norm=norm
        post_activation=True
        '''
        assert len(kernel_sizes) == len(hidden_dims) + 1
        assert len(strides) == len(kernel_sizes) and len(paddings) == len(kernel_sizes)
        # 拼接输入的通道数、隐藏层的通道数和输出的通道数
        dims = (in_dim,) + tuple(hidden_dims) + (out_dim,)
        super().__init__('conv', 3, in_dim, len(dims) - 1, nonlinearity, param, norm, dropout_p,
                         pre_activation, post_activation, weight_initializer, bias_initializer, final_bias_init)
        if self.unsqueeze: # 如果输入的通道数是0，那么就要讲第一个维度进行扩展，默认为1个维度
            dims = (1,) + dims[1:]

        def to_pair(x):
            if isinstance(x, int):
                return x, x
            assert isinstance(x, tuple) and len(x) == 2
            return x

        def calc_out_shape(shape, kernel_size, stride, padding):
            kernel_size, padding, stride = [to_pair(x) for x in (kernel_size, stride, padding)]
            return tuple((shape[j] + 2 * padding[j] - kernel_size[j]) / stride[j] + 1 for j in [0, 1])

        # 这里应该是获取归一化层添加到模型中
        if pre_activation and self.has_norm:
            norm_layer = get_norm_2d(norm, in_dim, in_shape[0], in_shape[1])
            self.add_module('norm0', norm_layer)

        shape = in_shape
        # 增加卷积层
        for i in range(self.num_layers - 1):
            conv_layer = nn.Conv2d(dims[i], dims[i + 1], kernel_sizes[i], strides[i], paddings[i],
                                   bias=bias, padding_mode=padding_mode)
            self.add_module(f'conv{i + 1}', conv_layer)
            if self.has_norm:
                if shape is not None:
                    # 计算卷积层输出的形状
                    shape = calc_out_shape(shape, kernel_sizes[i], strides[i], paddings[i])
                # 获取归一化层,输入的shape时针对LayerNorm使用
                norm_layer = get_norm_2d(norm, dims[i + 1], shape[0], shape[1])
                self.add_module(f'norm{i + 1}', norm_layer)

        # 增加最后一层卷积层
        conv_layer = nn.Conv2d(dims[-2], dims[-1], kernel_sizes[-1], strides[-1], paddings[-1],
                               bias=bias, padding_mode=padding_mode)
        self.add_module(f'conv{self.num_layers}', conv_layer)

        # 增加最后一层归一化层
        if post_activation and self.has_norm:
            shape = calc_out_shape(shape, kernel_sizes[-1], strides[-1], paddings[-1])
            norm_layer = get_norm_2d(norm, dims[-1], shape[0], shape[1])
            self.add_module(f'norm{self.num_layers}', norm_layer)

        # 如果有归一化层则添加dropout层，但是理论上不应该要和norm分开使用吗？
        if self.has_dropout:
            self.dropout = nn.Dropout2d(dropout_p)

        # 重置参数
        self.reset_parameters()


class TransposeCNN(_MultilayerModule):

    def __init__(self, in_dim, hidden_dims, out_dim, kernel_sizes, strides, paddings, nonlinearity,
                 param=None, norm=None, dropout_p=0, bias=True, padding_mode='zeros', in_shape=None,
                 pre_activation=False, post_activation=False,
                 weight_initializer='kaiming_uniform', bias_initializer='zeros', final_bias_init=None):
        assert len(kernel_sizes) == len(hidden_dims) + 1
        assert len(strides) == len(kernel_sizes) and len(paddings) == len(kernel_sizes)
        dims = (in_dim,) + tuple(hidden_dims) + (out_dim,)
        super().__init__('conv_transpose', 3, in_dim, len(dims) - 1, nonlinearity, param, norm, dropout_p,
                         pre_activation, post_activation, weight_initializer, bias_initializer, final_bias_init)
        if self.unsqueeze:
            dims = (1,) + dims[1:]

        def to_pair(x):
            if isinstance(x, int):
                return x, x
            assert isinstance(x, tuple) and len(x) == 2
            return x

        def calc_out_shape(shape, kernel_size, stride, padding):
            kernel_size, padding, stride = [to_pair(x) for x in (kernel_size, stride, padding)]
            return tuple((shape[j] - 1) * stride[j] - 2 * padding[j] + kernel_size[j] for j in [0, 1])

        if pre_activation and self.has_norm:
            norm_layer = get_norm_2d(norm, in_dim, in_shape[0], in_shape[1])
            self.add_module('norm0', norm_layer)

        shape = in_shape
        for i in range(self.num_layers - 1):
            conv_transpose_layer = nn.ConvTranspose2d(dims[i], dims[i + 1], kernel_sizes[i], strides[i], paddings[i],
                                                      bias=bias, padding_mode=padding_mode)
            self.add_module(f'conv_transpose{i + 1}', conv_transpose_layer)
            if self.has_norm:
                if shape is not None:
                    shape = calc_out_shape(shape, kernel_sizes[i], strides[i], paddings[i])
                norm_layer = get_norm_2d(norm, dims[i + 1], shape[0], shape[1])
                self.add_module(f'norm{i + 1}', norm_layer)

        conv_transpose_layer = nn.ConvTranspose2d(dims[-2], dims[-1], kernel_sizes[-1], strides[-1], paddings[-1],
                                                  bias=bias, padding_mode=padding_mode)
        self.add_module(f'conv_transpose{self.num_layers}', conv_transpose_layer)

        if post_activation and self.has_norm:
            shape = calc_out_shape(shape, kernel_sizes[-1], strides[-1], paddings[-1])
            norm_layer = get_norm_2d(norm, dims[-1], shape[0], shape[1])
            self.add_module(f'norm{self.num_layers}', norm_layer)

        if self.has_dropout:
            self.dropout = nn.Dropout2d(dropout_p)

        self.reset_parameters()


# adopted from
# https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
# and https://github.com/sooftware/attentions/blob/master/attentions.py
class TransformerXLDecoder(nn.Module):

    def __init__(self, decoder_layer, num_layers, max_length, mem_length, batch_first=False):
        '''
        decoder_layer: TransformerXLDecoderLayer(embed_dim, feedforward_dim, head_dim, num_heads, activation, dropout_p)
        num_layers: config['dyn_num_layers']
        max_length: 1 + config['wm_sequence_length'] * 模态数量(4) + 当前模态数量(2) | 1 + config['wm_sequence_length'] * len( ['z', 'a', 'r'(存在config: dyn_input_rewards则有), 'g'(存在dyn_input_discounts则有)]) + num_current: 2
        mem_length: config['wm_memory_length'] * 模态数量(4) + 当前模态数量(2) | config['wm_memory_length'] * len( ['z', 'a', 'r'(存在config: dyn_input_rewards则有), 'g'(存在dyn_input_discounts则有)]) + num_current: 2
        batch_first=True: 输入的batch维度在第一维
        '''
        super().__init__()
        # 构建多个transformer解码层
        self.layers = nn.ModuleList([copy.deepcopy(decoder_layer) for _ in range(num_layers)])
        self.num_layers = num_layers
        self.mem_length = mem_length
        self.batch_first = batch_first

        self.pos_enc = PositionalEncoding(decoder_layer.dim, max_length, dropout_p=decoder_layer.dropout_p)
        # todo 这两个参数的作用
        self.u_bias = nn.Parameter(torch.Tensor(decoder_layer.num_heads, decoder_layer.head_dim))
        self.v_bias = nn.Parameter(torch.Tensor(decoder_layer.num_heads, decoder_layer.head_dim))
        nn.init.xavier_uniform_(self.u_bias)
        nn.init.xavier_uniform_(self.v_bias)

    def init_mems(self):
        '''
        返回的 mems 是一个列表，长度为 num_layers + 1，每个元素是一个空张量
        '''
        if self.mem_length > 0:
            param = next(self.parameters())
            dtype, device = param.dtype, param.device
            mems = []
            for i in range(self.num_layers + 1):
                mems.append(torch.empty(0, dtype=dtype, device=device))
            return mems
        else:
            return None

    def forward(self, x, positions, attn_mask, mems=None, tgt_length=None, return_attention=False):
        '''
        inputs/x shape is (batch_size, src_length, embed_dim)
        positions shape is (src_length,) 一个从 src_length - 1 到 0 的张量
        src_mask/attn_mask shape is (tgt_length, src_length, batch_size)
        mems shape is [num_layers + 1, mem_length, batch_size, embed_dim] if mems is not None
        tgt_length: 目标长度，sequence_length + extra - 2 或者 sequence_length + extra - 1
        return_attention: 是否返回注意力，默认为 False
        '''
        if self.batch_first:
            # todo 实际运行确认这里是否有问题
            x = x.transpose(0, 1)

        if mems is None:
            # 如果没有提供记忆，则初始化记忆，初始化的记忆全0，shape 是 
            mems = self.init_mems()

        if tgt_length is None:
            tgt_length = x.shape[0]
        assert tgt_length > 0

        pos_enc = self.pos_enc(positions) # pos_enc shape (src_length, 1, dim)
        hiddens = [x] # 存储每一个解码层的输出以及第一层的输入
        attentions = [] # 存储每一个解码层的注意力
        out = x
        for i, layer in enumerate(self.layers):
            out, attention = layer(out, pos_enc, self.u_bias, self.v_bias, attn_mask=attn_mask, mems=mems[i])
            # out: shape is (tgt_length, batch_size, dim)
            # attention: shape is (tgt_length, src_length, batch_size, num_heads)
            hiddens.append(out)
            attentions.append(attention)
        
        # out shape is (tgt_length, batch_size, dim)
        out = out[-tgt_length:]

        if self.batch_first:
            # 将输出的维度从 (tgt_length, batch_size, dim) 转换为 (batch_size, tgt_length, dim)
            out = out.transpose(0, 1)

        assert len(hiddens) == len(mems) # 看来这里的记忆保存的是每一层的输出（记忆）
        with torch.no_grad():
            new_mems = []
            for i in range(len(hiddens)):
                cat = torch.cat([mems[i], hiddens[i]], dim=0) # 将当前层的输出和之前的记忆拼接起来
                new_mems.append(cat[-self.mem_length:].detach()) # 只保留最新的 mem_length 个记忆，因为是有交叉，即最新的记忆包含部分旧的记忆和所有的新记忆
        if return_attention:
            attention = torch.stack(attentions, dim=-2) # 将注意力矩阵堆叠起来，dim=-2 表示在倒数第二个维度上堆叠，那么shape is (tgt_length, src_length, batch_size, num_layers, num_heads)
            # out shape is (batch_size, tgt_length, dim)
            # new_mems shape is [num_layers + 1, mem_length, batch_size, dim]
            # attention shape is (tgt_length, src_length, batch_size, num_layers, num_heads)
            return out, new_mems, attention 
        # out shape is (batch_size, tgt_length, dim)
        # new_mems shape is [num_layers + 1, mem_length, batch_size, dim]
        return out, new_mems


class TransformerXLDecoderLayer(nn.Module):

    def __init__(self, dim, feedforward_dim, head_dim, num_heads, activation, dropout_p, layer_norm_eps=1e-5):
        '''
        embed_dim/dim: 环境特征的维度，config['dyn_embed_dim']
        feedforward_dim: 前馈网络的维度，config['dyn_feedforward_dim']
        head_dim: 注意力头的维度，config['dyn_head_dim']
        num_heads: 注意力头的数量，config['dyn_num_heads']
        activation: 激活函数，config['dyn_act']
        dropout_p: dropout的概率，config['dyn_dropout']
        '''
        super().__init__()
        self.dim = dim
        self.head_dim = head_dim
        self.num_heads = num_heads
        self.dropout_p = dropout_p
        self.self_attn = RelativeMultiheadSelfAttention(dim, head_dim, num_heads, dropout_p)
        self.linear1 = nn.Linear(dim, feedforward_dim)
        self.linear2 = nn.Linear(feedforward_dim, dim)
        self.norm1 = nn.LayerNorm(dim, eps=layer_norm_eps)
        self.norm2 = nn.LayerNorm(dim, eps=layer_norm_eps)
        self.act = get_activation(activation)
        self.dropout = nn.Dropout(dropout_p) if dropout_p > 0 else nn.Identity()

    def _ff(self, x):
        x = self.linear2(self.dropout(self.act(self.linear1(x))))
        return self.dropout(x)

    def forward(self, x, pos_encodings, u_bias, v_bias, attn_mask=None, mems=None):
        '''
        out/x shape is (src_length, batch_size, embed_dim)
        pos_enc/pos_encodings shape is (src_length, 1, dim)
        self.u_bias shape is (num_heads, head_dim)
        self.v_bias shape is (num_heads, head_dim)
        attn_mask shape is (tgt_length, src_length, batch_size)
        mems[i]/mems shape is empty tensor if 传入给Transformer是空的张量

        return:
        out: shape is (tgt_length, batch_size, dim)
        attention: shape is (tgt_length, src_length, batch_size, num_heads)
        '''

        # attention: 返回注意力分数矩阵 (tgt_length, src_length, batch_size, num_heads)
        # out: 将上下文向量投影到原始的嵌入维度，shape is (tgt_length, batch_size, dim)
        out, attention = self.self_attn(x, pos_encodings, u_bias, v_bias, attn_mask, mems)
        out = self.dropout(out) # (tgt_length, batch_size, dim)
        out = self.norm1(x + out) # 残差连接 shape is (tgt_length, batch_size, dim)
        out = self.norm2(out + self._ff(out)) # 残差连接 self._ff进一步提取特征 shape is (tgt_length, batch_size, dim)
        return out, attention


class RelativeMultiheadSelfAttention(nn.Module):

    def __init__(self, dim, head_dim, num_heads, dropout_p):
        '''
        dim: 环境特征的维度，config['dyn_embed_dim']
        head_dim: 注意力头的维度，config['dyn_head_dim']
        num_heads: 注意力头的数量，config['dyn_num_heads']
        dropout_p: dropout的概率，config['dyn_dropout']
        '''
        super().__init__()
        self.dim = dim
        self.head_dim = head_dim
        self.num_heads = num_heads
        self.scale = 1 / (dim ** 0.5) # todo 这是在干嘛

        self.qkv_proj = nn.Linear(dim, 3 * num_heads * head_dim, bias=False)
        self.pos_proj = nn.Linear(dim, num_heads * head_dim, bias=False)
        self.out_proj = nn.Linear(num_heads * head_dim, dim, bias=False)
        self.dropout = nn.Dropout(dropout_p) if dropout_p > 0 else nn.Identity()

    def _rel_shift(self, x):
        '''
        x: shape is (tgt_length, pos_len, batch_size, num_heads)
        '''
        zero_pad = torch.zeros((x.shape[0], 1, *x.shape[2:]), device=x.device, dtype=x.dtype)
        x_padded = torch.cat([zero_pad, x], dim=1) # 给x前面填充一个零向量，shape is (tgt_length, pos_len + 1, batch_size, num_heads)
        x_padded = x_padded.view(x.shape[1] + 1, x.shape[0], *x.shape[2:]) # shape is (pos_len + 1, tgt_length, batch_size, num_heads)，像是交换了0/1维度
        x = x_padded[1:].view_as(x) # 取出除了第一个位置的所有位置，shape is (pos_len, tgt_length, batch_size, num_heads)
        # 经过这种变换，可以使得相邻位置的数据进行一次偏移，提高位置的信息，能够捕捉更多的相对位置信息
        '''
        类似以下这种
        [1, 2, 3, 4]
        [5, 6, 7, 8]

        ->
        [2, 3, 4, 5]
        [6, 7, 8, 1]
        '''
        return x # (pos_len, tgt_length, batch_size, num_heads)

    def forward(self, x, pos_encodings, u_bias, v_bias, attn_mask=None, mems=None):
        '''
        out/x shape is (src_length, batch_size, embed_dim) 这里面混合了环境特征、奖励、中断信息、动作
        pos_enc/pos_encodings shape is (src_length, 1, dim)
        self.u_bias shape is (num_heads, head_dim)
        self.v_bias shape is (num_heads, head_dim)
        attn_mask shape is (tgt_length, src_length, batch_size)
        mems[i]/mems shape is empty tensor if 传入给Transformer是空的张量
        '''
        tgt_length, batch_size = x.shape[:2]
        pos_len = pos_encodings.shape[0]
        # tgt_length = pos_len

        if mems is not None:
            cat = torch.cat([mems, x], dim=0) # 混合历史信息和当前信息 shape is (src_length + mem_length, batch_size, embed_dim)
            qkv = self.qkv_proj(cat) # qkv shape is (src_length + mem_length, batch_size, 3 * num_heads * head_dim)
            q, k, v = torch.chunk(qkv, 3, dim=-1) # q, k, v shape is (src_length + mem_length, batch_size, num_heads * head_dim)
            q = q[-tgt_length:] # 只取当前目标长度的查询向量 q shape is (tgt_length, batch_size, num_heads * head_dim)
        else:
            qkv = self.qkv_proj(x) # shape is (src_length, batch_size, 3 * num_heads * head_dim)
            q, k, v = torch.chunk(qkv, 3, dim=-1) # q, k, v shape is (src_length, batch_size, num_heads * head_dim)

        pos_encodings = self.pos_proj(pos_encodings) # pos_encodings shape is (pos_len, 1, num_heads * head_dim)

        src_length = k.shape[0] # src_length is src_length + mem_length or src_length
        num_heads = self.num_heads
        head_dim = self.head_dim

        q = q.view(tgt_length, batch_size, num_heads, head_dim) # shape is (tgt_length, batch_size, num_heads, head_dim)
        k = k.view(src_length, batch_size, num_heads, head_dim) # todo shape is (src_length, batch_size, num_heads, head_dim)
        v = v.view(src_length, batch_size, num_heads, head_dim) # todo shape is (src_length, batch_size, num_heads, head_dim)
        pos_encodings = pos_encodings.view(pos_len, num_heads, head_dim) # shape is (pos_len, num_heads, head_dim)

        # q + u_bias: 增加偏置
        # ibnd (q + u_bias)，jbnd (k)
        # 输出输出: ijbn，这里面的jbnd是指的是维度
        # 根据ibnd,jbnd->ijb表示在D维度上进行点积运算
        '''
        # 对于每个位置:
        for i in range(tgt_length):
            for j in range(src_length):
                for b in range(batch_size):
                    for n in range(num_heads):
                        content_score[i,j,b,n] = sum(
                            (q[i,b,n,d] + u_bias[n,d]) * k[j,b,n,d]
                            for d in range(head_dim)
                        )
        '''
        content_score = torch.einsum('ibnd,jbnd->ijbn', (q + u_bias, k)) # contetn_score shape is (tgt_length, src_length, batch_size, num_heads) # 计算注意力分数矩阵\包含相对位置编码的偏置项
        pos_score = torch.einsum('ibnd,jnd->ijbn', (q + v_bias, pos_encodings)) # 同理，pos_score shape is (tgt_length, pos_len, batch_size, num_heads) 捕获序列中的相对位置关系、处理长距离依赖、增强位置感知能力
        pos_score = self._rel_shift(pos_score) # todo 结合实际的运行看过程 pos_score shape is (pos_len, tgt_length, batch_size, num_heads)

        # [tgt_length x src_length x batch_size x num_heads]
        attn_score = content_score + pos_score # 将内容分数和位置分数相加，得到最终的注意力分数矩阵 atten_score shape is (tgt_length, src_length, batch_size, num_heads)
        attn_score.mul_(self.scale) # 缩放因子，将注意力分数矩阵进行缩放，防止梯度消失或爆炸，因为后续的softmax如果值过大或过小，会导致梯度消失或爆炸的问题

        if attn_mask is not None:
            # 这里主要是将注意力分数矩阵attn_mask扩展为4维度，以便与注意力掩码进行广播操作，并且所有被掩码的位置都被设置为负无穷大
            # 这样做的目的是为了在softmax计算时，将被掩码的位置的注意力分数设置为负无穷大，从而使得softmax计算时这些位置的注意力权重为0
            # 这样可以确保在计算注意力时，被掩码的位置不会对最终的注意力分布产生影响
            if attn_mask.ndim == 2:
                attn_score = attn_score.masked_fill(attn_mask[:, :, None, None], -float('inf'))
            elif attn_mask.ndim == 3:
                attn_score = attn_score.masked_fill(attn_mask[:, :, :, None], -float('inf'))

        # [tgt_length x src_length x batch_size x num_heads]
        attn = F.softmax(attn_score, dim=1)
        return_attn = attn
        attn = self.dropout(attn) # shape is (tgt_length, src_length, batch_size, num_heads)

        context = torch.einsum('ijbn,jbnd->ibnd', (attn, v)) # 计算上下文向量，context shape is (tgt_length, batch_size, num_heads, head_dim)
        context = context.reshape(context.shape[0], context.shape[1], num_heads * head_dim) # 将多头注意力合并 shape is (tgt_length, batch_size, num_heads * head_dim)
        # return_attn: 返回注意力分数矩阵 (tgt_length, src_length, batch_size, num_heads)
        # context: 上下文向量 (tgt_length, batch_size, num_heads * head_dim)
        # self.out_proj(context): 将上下文向量投影到原始的嵌入维度，shape is (tgt_length, batch_size, dim)
        return self.out_proj(context), return_attn


class PositionalEncoding(nn.Module):

    def __init__(self, dim, max_length, dropout_p=0, batch_first=False):
        '''
        decoder_layer.dim: 环境特征的维度，config['dyn_embed_dim']
        max_length: 1 + config['wm_sequence_length'] * 模态数量(4) + 当前模态数量(2) | 1 + config['wm_sequence_length'] * len( ['z', 'a', 'r'(存在config: dyn_input_rewards则有), 'g'(存在dyn_input_discounts则有)]) + num_current: 2
        dropout_p: config['dyn_dropout']
        '''
        super().__init__()
        self.dim = dim
        self.max_length = max_length
        self.batch_first = batch_first
        self.dropout = nn.Dropout(dropout_p) if dropout_p > 0 else nn.Identity()

        encodings = torch.zeros(max_length, dim)
        position = torch.arange(0, max_length, dtype=torch.float).unsqueeze(1)
        # torch.arange(0, dim, 2) 生成 [0, 2, 4, ..., dim-2] 的偶数序列，长度是dim/2
        # -math.log(10000.0) = -4
        # (-math.log(10000.0) / dim) = -0.015625
        # torch.exp(0) = 1.0
        # torch.exp(-0.015625) = 0.98449644
        div_term = torch.exp(torch.arange(0, dim, 2).float() * (-math.log(10000.0) / dim))
        encodings[:, 0::2] = torch.sin(position * div_term)
        encodings[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('encodings', encodings)

    def forward(self, positions):
        '''
        positions shape is (src_length,) 一个从 src_length - 1 到 0 的张量
        return shape is (1, src_length, dim) if batch_first=True
        return shape is (src_length, 1, dim) if batch_first=False
        这里的positions是一个从 src_length - 1 到 0 的张量，表示位置编码的索引
        '''
        out = self.encodings[positions]
        out = self.dropout(out)
        return out.unsqueeze(0) if self.batch_first else out.unsqueeze(1)


class PredictionNet(nn.Module):

    def __init__(self, modality_order, num_current, embeds, out_heads, embed_dim, activation, norm, dropout_p,
                 feedforward_dim, head_dim, num_heads, num_layers, memory_length, max_length):
        '''
        PredictionNet 是 TWM (Transformer World Model) 中的核心组件，它负责预测环境的动态变化。让我详细解析其作用和工作机制：

        1. 基本作用
        PredictionNet 是动态模型的核心，用于预测：

        下一个潜在状态 (z)
        未来奖励 (r)
        未来折扣因子 (g)
        它接收当前状态和动作等输入，预测环境的未来发展。

        modality_order: ['z', 'a', 'r'(存在config: dyn_input_rewards则有), 'g'(存在dyn_input_discounts则有)]
        num_current: 2
        embeds: {'z': {'in_dim': z_dim, 'categorical': False}, 'a': {'in_dim': num_actions, 'categorical': True}, 'r'(config: dyn_input_rewards): {'in_dim': 0, 'categorical': False}, 'g'(dyn_input_discounts): {'in_dim': 0, 'categorical': False}}
        out_heads: {'z': {'hidden_dims': config['dyn_z_dims'], 'out_dim': z_dim}, 'r': {'hidden_dims': config['dyn_reward_dims'], 'out_dim': 1, 'final_bias_init': 0.0}, 'g': {'hidden_dims': config['dyn_discount_dims'], 'out_dim': 1, 'final_bias_init': config['env_discount_factor']}}
        embed_dim: config['dyn_embed_dim']
        activation: config['dyn_act']
        norm: config['dyn_norm']
        dropout_p: config['dyn_dropout']
        feedforward_dim: config['dyn_feedforward_dim']
        head_dim: config['dyn_head_dim']
        num_heads: config['dyn_num_heads']
        num_layers: config['dyn_num_layers']
        memory_length: config['wm_memory_length']
        max_length: 1 + config['wm_sequence_length']  # 1 for context
        '''
        super().__init__()
        self.embed_dim = embed_dim
        self.memory_length = memory_length
        self.modality_order = tuple(modality_order) # 将list转换为元组
        self.num_current = num_current

        # embed['in_dim']：观察的特征提取后的维度
        # embed_dim： 这个潜入维度的作用是啥？todo
        # 根据embed的类型来选择是使用nn.Embedding还是MLP，可知除了动作之外，其余的都是MLP
        # todo 那为什么动作需要使用nn.Embedding呢？因为动作是离散的，所以需要使用nn.Embedding来进行嵌入
        # semf.embeds = {
        #   'z': nn.Embedding(z_dim, embed_dim),
        #   'a': nn.Embedding(num_actions, embed_dim),
        #   'r': MLP(0, [], embed_dim, activation, norm=norm, dropout_p=dropout_p, post_activation=True), 虽然传入的是0，但是根据代码，会自动将0转换为1个维度
        #   'g': MLP(0, [], embed_dim, activation, norm=norm, dropout_p=dropout_p, post_activation=True)，虽然传入的是0，但是根据代码，会自动将0转换为1个维度
        #}
        self.embeds = nn.ModuleDict({
            name: nn.Embedding(embed['in_dim'], embed_dim) if embed.get('categorical', False) else
            MLP(embed['in_dim'], [], embed_dim, activation, norm=norm, dropout_p=dropout_p, post_activation=True)
            for name, embed in embeds.items()
        })

        # TransformerXl 特征解码层
        # todo 对比标准的Transformer
        decoder_layer = TransformerXLDecoderLayer(
            embed_dim, feedforward_dim, head_dim, num_heads, activation, dropout_p)

        num_modalities = len(modality_order)
        max_length = max_length * num_modalities + self.num_current
        mem_length = memory_length * num_modalities + self.num_current
        self.transformer = TransformerXLDecoder(decoder_layer, num_layers, max_length, mem_length, batch_first=True)

        # todo 查看如何调用
        self.out_heads = nn.ModuleDict({
            name: MLP(embed_dim, head['hidden_dims'], head['out_dim'], activation, norm=norm, dropout_p=dropout_p,
                      pre_activation=True, final_bias_init=head.get('final_bias_init', None))
            for name, head in out_heads.items()
        })

    @lru_cache(maxsize=20)
    def _get_base_mask(self, src_length, tgt_length, device):
        '''
        src_length: 历史序列长度 * 模态数量 + 当前模态数量=cat z\a\r\g的总长度 (sequence_length + extra - 1 - 1) * 模态数量(4) + 当前模态数量(2)
        tgt_length/src_length: 历史序列长度 * 模态数量 + 当前模态数量=cat z\a\r\g的总长度 (sequence_length + extra - 1 - 1) * 模态数量(4) + 当前模态数量(2)
        input.device: 输入的设备
        todo 函数在 Transformer XL 架构中创建了一个基础注意力掩码，用于控制序列中哪些位置可以相互关注
        '''
        # 初始化全为1的掩码，表示默认所有位置都被掩蔽（不允许注意力）
        src_mask = torch.ones(tgt_length, src_length, dtype=torch.bool, device=device)
        num_modalities = len(self.modality_order)
        for tgt_index in range(tgt_length):
            # the last indices are always 'current'
            start_index = src_length - self.num_current # 计算当前模态的起始索引，其中num_current是当前模态的数量
            src_index = src_length - tgt_length + tgt_index # 
            modality_index = (src_index - start_index) % num_modalities
            if modality_index < self.num_current:
                start = max(src_index - (self.memory_length + 1) * num_modalities, 0)
            else:
                start = max(src_index - modality_index - self.memory_length * num_modalities, 0)
            src_mask[tgt_index, start:src_index + 1] = False
        return src_mask

    def _get_mask(self, src_length, tgt_length, device, stop_mask):
        '''
        src_length: 历史序列长度 * 模态数量 + 当前模态数量=cat z\a\r\g的总长度 (sequence_length + extra - 1 - 1) * 模态数量(4) + 当前模态数量(2)
        tgt_length/src_length: 历史序列长度 * 模态数量 + 当前模态数量=cat z\a\r\g的总长度 (sequence_length + extra - 1 - 1) * 模态数量(4) + 当前模态数量(2)
        input.device: 输入的设备
        stop_mask: 停止掩码，shape is (1, sequence_length + extra - 3)

        todo 后续调试
        '''
        # prevent attention over episode ends using stop_mask
        num_modalities = len(self.modality_order) # 模态数量（如'z', 'a', 'r', 'g'）
        assert stop_mask.shape[1] * num_modalities + self.num_current == src_length # 确认输入的src_length和stop_mask的长度是相同的

        # 获取基础掩码 todo
        # 返回一个[tgt_length, src_length]布尔张量，强制执行因果关系和固定记忆窗口。True表示该位置被掩码（不可关注）。
        src_mask = self._get_base_mask(src_length, tgt_length, device)

        batch_size, seq_length = stop_mask.shape
        stop_mask = stop_mask.t() # 这里是进行转置，shape is (sequence_length + extra - 3, batch_size)
        stop_mask_shift_right = torch.cat([stop_mask.new_zeros(1, batch_size), stop_mask], dim=0) # 在开头添加一行零。
        stop_mask_shift_left = torch.cat([stop_mask, stop_mask.new_zeros(1, batch_size)], dim=0) # 在末尾添加一行零。

        tril = stop_mask.new_ones(seq_length + 1, seq_length + 1).tril() # 创建一个下三角矩阵，形状为 (sequence_length + 1, sequence_length + 1)，用于确保注意力只关注当前和之前的时间步。
        src = torch.logical_and(stop_mask_shift_left.unsqueeze(0), tril.unsqueeze(-1)) # 利用创建的下三角矩阵去限制注意力的范围
        src = torch.cummax(src.flip(1), dim=1).values.flip(1) # todo src[i, j]为True表示从源位置j到目标位置i之间存在片段结束，防止跨片段关注。

        shifted_tril = stop_mask.new_ones(seq_length + 1, seq_length + 1).tril(diagonal=-1)
        tgt = torch.logical_and(stop_mask_shift_right.unsqueeze(1), shifted_tril.unsqueeze(-1))
        tgt = torch.cummax(tgt, dim=0).values # todo 沿目标维度向前传播"片段结束"信号。如果目标i是新片段的一部分，则不能关注前一片段的位置j

        idx = torch.logical_and(src, tgt) # 合并两个条件。最终idx为True表示注意力对(i, j)跨越了片段边界。

        i, j, k = idx.shape
        idx = idx.reshape(i, 1, j, 1, k).expand(i, num_modalities, j, num_modalities, k) \
            .reshape(i * num_modalities, j * num_modalities, k)

        offset = num_modalities - self.num_current
        if offset > 0:
            idx = idx[:-offset, :-offset]
        idx = idx[-tgt_length:]

        src_mask = src_mask.unsqueeze(-1).tile(1, 1, batch_size)
        src_mask[idx] = True
        return src_mask # 返回最终组合的掩码，用于Transformer的注意力机制。

    def forward(self, inputs, tgt_length, stop_mask, heads=None, mems=None, return_attention=False):
        '''
        1: inputs: {
            'z': z, shape is (1, sequence_length + extra - 1, z_categoricals * z_categories)
            'a': a, shape is (1, sequence_length + extra - 2)
            'r': r, shape is (1, sequence_length + extra - 3)
            'g': g shape is (1, sequence_length + extra - 3) (结束表示)
        }
        2: tgt_length: 目标长度，sequence_length + extra - 2 或者 sequence_length + extra - 1
        stop_mask: d shape is (1, sequence_length + extra - 3)
        heads: 预测的头部，默认为 ('z', 'r', 'g')
        mems: 记忆，默认为 None
        return_attention: 是否返回注意力，默认为 False
        '''
        modality_order = self.modality_order 
        num_modalities = len(modality_order)
        num_current = self.num_current # 这current代表说输入的inputs确认一定存在的数量有多少个，其余的都是可选存在

        # 这里的是为了确保输入的batch shape是相同的
        assert common.same_batch_shape([inputs[name] for name in modality_order[:num_current]]) 
        if num_modalities > num_current:
            assert common.same_batch_shape([inputs[name] for name in modality_order[num_current:]])

        # 根据不同的输入类型来获取不同的特征（z：环境采样特征，a：动作特征，r：奖励特征，g：折扣因子特征）
        # embeds shape is {
        #  'z': (1, sequence_length + extra - 1, embed_dim),
        #  'a': (1, sequence_length + extra - 2, embed_dim),
        #  'r': (1, sequence_length + extra - 3, embed_dim),
        #  'g': (1, sequence_length + extra - 3, embed_dim)
        #}
        embeds = {name: mod(inputs[name]) for name, mod in self.embeds.items()}

        def cat_modalities(xs):
            '''
            传入的xs时一个四维变量，比如传入的是一个z\a\r\g的列表，所以这里就是将z\a\r\g的特征在序列维度上进行拼接
            '''
            batch_size, seq_len, dim = xs[0].shape
            # torch.cat(xs, dim=2)是将所有的xs在seq序列维度上进行拼接
            return torch.cat(xs, dim=2).reshape(batch_size, seq_len * len(xs), dim)

        if mems is None: 
            # # modality_order[0] 表示 z 
            # # 获取 历史序列长度。这里应该是将序列中除了最后一个，其余的当作历史序列
            history_length = embeds[modality_order[0]].shape[1] - 1
            if num_modalities == num_current:
                inputs = cat_modalities([embeds[name] for name in modality_order])
            else:
                # 将所有序列中的历史信息和当前信息进行拼接
                history = cat_modalities([embeds[name][:, :history_length] for name in modality_order])
                current = cat_modalities([embeds[name][:, history_length:] for name in modality_order[:num_current]])
                # 再将cat后的历史信息和当前信息进行拼接
                inputs = torch.cat([history, current], dim=1)
            # tgt_length 是单条序列的长度 - 2 todo 为什么tgt_length是这么长
            tgt_length = (tgt_length - 1) * num_modalities + num_current
            # history_length 历史序列长度
            src_length = history_length * num_modalities + num_current
            # 而以上* num_modalities + num_current就是为了计算拼接后的长度
            assert inputs.shape[1] == src_length
            # src mask shape is [tgt_length, src_length, batch_size]
            src_mask = self._get_mask(src_length, src_length, inputs.device, stop_mask)
        else:
            # todo 后续补充
            # modality_order[0] 表示 z
            # 获取 序列长度
            sequence_length = embeds[modality_order[0]].shape[1]
            # switch order so that 'currents' are last
            '''
            modality_order[num_current:] 得到 ('r', 'g')
            modality_order[:num_current] 得到 ('z', 'a')
            合并后变成 ('r', 'g', 'z', 'a')

            在 Transformer 架构中，这种重排序是为了：

            调整注意力机制：

            将历史信息（如奖励、折扣）放在序列的前部
            将当前需要预测的信息（如状态、动作）放在序列的后部
            序列处理顺序：

            在调用 cat_modalities 时，这种排序确保了编码器先处理历史相关模态，再处理当前模态
            这种处理顺序对于预测任务尤为重要
            内存处理：

            在使用记忆机制时，这种排序与内存的存储和检索模式一致
            这是典型的 Transformer XL 或类似架构中的一种优化技术，有助于模型更有效地处理时序依赖关系。
            '''
            # 将embeds中的模态按照modality_order的顺序进行拼接
            inputs = cat_modalities(
                [embeds[name] for name in (modality_order[num_current:] + modality_order[:num_current])])
            tgt_length = tgt_length * num_modalities # todo 这里是将目标长度乘以模态数量
            mem_length = mems[0].shape[0] 
            src_length = mem_length + sequence_length * num_modalities
            src_mask = self._get_mask(src_length, tgt_length, inputs.device, stop_mask)

        positions = torch.arange(src_length - 1, -1, -1, device=inputs.device)
        outputs = self.transformer(
            inputs, positions, attn_mask=src_mask, mems=mems, tgt_length=tgt_length, return_attention=return_attention)
        # out/hiddens shape is (batch_size, tgt_length, dim)
        # new_mems/mems shape is [num_layers + 1, mem_length, batch_size, dim]
        # attention shape is (tgt_length, src_length, batch_size, num_layers, num_heads) or None
        hiddens, mems, attention = outputs if return_attention else (outputs + (None,))

        # take outputs at last current
        assert hiddens.shape[1] == tgt_length
        out_idx = torch.arange(tgt_length - 1, -1, -num_modalities, device=inputs.device).flip([0]) # 这里又是将目标长度的索引进行翻转，得到的是每个模态的最后一个位置的索引
        hiddens = hiddens[:, out_idx] # 取出每个模态的最后一个位置的输出 shape is (batch_size, tgt_length / num_modalities, dim)
        if return_attention:
            attention = attention[out_idx] # attention shape is (tgt_length / num_modalities, src_length, batch_size, num_layers, num_heads)

        if heads is None:
            heads = self.out_heads.keys()  # 如果没有指定heads，则使用out_heads中的所有头部

        out = {name: self.out_heads[name](hiddens) for name in heads} # 将提取的特征通过对应的头部进行预测得到需要预测的内容，包含 'z', 'r', 'g'等
        # out shape is {
        #   'z': (batch_size, tgt_length / num_modalities, z_dim),
        #   'r': (batch_size, tgt_length / num_modalities, 1
        #   'g': (batch_size, tgt_length / num_modalities, 1)
        #   ...
        # }

        '''
        out shape is {
            'z': (batch_size, tgt_length / num_modalities, z_dim),
            'r': (batch_size, tgt_length / num_modalities, 1),
            'g': (batch_size, tgt_length / num_modalities, 1)
        }
        hiddens shape is (batch_size, tgt_length / num_modalities, dim)
        mems shape is [num_layers + 1, mem_length, batch_size, dim]
        attention shape is (tgt_length / num_modalities, src_length, batch_size, num_layers, num_heads) or None
        这里的tgt_length / num_modalities 是因为每个模态的输出都是在最后一个位置进行预测的，所以需要除以模态数量
        '''
        return (out, hiddens, mems, attention) if return_attention else (out, hiddens, mems)


class ActorCritic(nn.Module):

    def __init__(self, config, num_actions, z_dim, h_dim):
        '''
        z_dim: todo
        h_dim:  todo
        '''
        super().__init__()
        self.config = config
        self.num_actions = num_actions
        activation = config['ac_act']
        norm = config['ac_norm']
        dropout_p = config['ac_dropout']

        input_dim = z_dim
        # todo 这个是输入什么？
        if config['ac_input_h']:
            input_dim += h_dim

        self.h_norm = get_norm_1d(config['ac_h_norm'], h_dim)
        self.trunk = nn.Identity()
        # 这里的动作预测
        self.actor_model = MLP(
            input_dim, config['actor_dims'], num_actions, activation, norm=norm, dropout_p=dropout_p,
            weight_initializer='orthogonal', bias_initializer='zeros')
        # 这里应该就是对观察的评价
        self.critic_model = MLP(
            input_dim, config['critic_dims'], 1, activation, norm=norm, dropout_p=dropout_p,
            weight_initializer='orthogonal', bias_initializer='zeros')
        if config['critic_target_interval'] > 1:
            # 注册目标网络
            self.target_critic_model = copy.deepcopy(self.critic_model).requires_grad_(False)
            # todo是用来做什么的
            self.register_buffer('target_critic_lag', torch.zeros(1, dtype=torch.long))

        self.actor_optimizer = common.AdamOptim(
            self.actor_model.parameters(), lr=config['actor_lr'], eps=config['actor_eps'],
            weight_decay=config['actor_wd'], grad_clip=config['actor_grad_clip'])
        self.critic_optimizer = common.AdamOptim(
            self.critic_model.parameters(), lr=config['critic_lr'], eps=config['critic_eps'],
            weight_decay=config['critic_wd'], grad_clip=config['critic_grad_clip'])

        self.sync_target()

    @torch.no_grad()
    def _prepare_inputs(self, z, h):
        # z shape is (batch_size, - 1 + sequence_length + extra , z_categoricals * z_categories)
        # h shape is (batch_size, tgt_length(- 1 + sequence_length + extra), dyn_embed_dim)
        assert common.check_no_grad(z, h)
        assert h is None or common.same_batch_shape([z, h])
        config = self.config
        if config['ac_input_h']:
            h = self.h_norm(h)
            x = torch.cat([z, h], dim=-1)
        else:
            x = z
        # x shape is (batch_size, - 1 + sequence_length + extra, z_categoricals * z_categories + dyn_embed_dim)
        # or shape is (batch_size, - 1 + sequence_length + extra, z_categoricals * z_categories)
        shape = x.shape[:2] # (batch_size, sequence_length + extra)

        # x.flatten(0, 1) shape is (batch_size * (sequence_length + extra), z_categoricals * z_categories + dyn_embed_dim) or (batch_size * (sequence_length + extra), z_categoricals * z_categories)
        # self.trunk is an identity function, so it does not change the shape
        # .unflatten(0, shape) restores the original shape x shape is (batch_size, sequence_length + extra, z_categoricals * z_categories + dyn_embed_dim) or (batch_size, sequence_length + extra, z_categoricals * z_categories)
        x = self.trunk(x.flatten(0, 1)).unflatten(0, shape)
        return x

    def actor(self, x):
        shape = x.shape[:2]
        logits = self.actor_model(x.flatten(0, 1)).unflatten(0, shape)
        return logits

    def critic(self, x):
        # x shape (batch_size, sequence_length + extra - 1, z_categoricals * z_categories)
        shape = x.shape[:2]
        values = self.critic_model(x.flatten(0, 1)).squeeze(-1).unflatten(0, shape)
        # values shape is (batch_size, sequence_length + extra - 1)
        return values

    def sync_target(self):
        # 完全同步到目标网络
        if self.config['critic_target_interval'] > 1:
            self.target_critic_lag[:] = 0 # 这里把标识设置为0的作用是什么？
            self.target_critic_model.load_state_dict(self.critic_model.state_dict())

    def optimize(self, z, h, a, r, g, d, weights):
        '''
        
        weights: 这里的权重就是折扣矩阵，包含未来多步的折扣而来
        '''
        # 它将观察模型的潜在状态和动态模型的隐藏状态组合成适合策略和价值网络使用的输入
        # 观察模型的潜在状态会根据动态模型的预测得到
        '''
        说明了_prepare_inputs方法的核心作用：

        组合两种不同的信息：

        z：潜在状态表示（可能来自观察编码或动态预测）
        h：动态模型的隐藏状态（包含时序信息）
        这种组合的好处：

        潜在状态z提供当前观察的紧凑表示
        隐藏状态h提供历史上下文和动态信息
        组合后的表示更全面，包含静态和动态特征
        '''
        x = self._prepare_inputs(z, h)
        # 计算目标回报Q值和优势
        returns, advantages = self._compute_targets(x, r, g, d)
        self.train()

        # remove last time step, the last state is for bootstrapping
        values = self.critic(x[:, :-1])
        critic_loss, critic_metrics = self._compute_critic_loss(values, returns, weights)
        self.critic_optimizer.step(critic_loss)

        logits = self.actor(x[:, :-1])
        actor_loss, actor_metrics = self._compute_actor_loss(logits, a, advantages, weights)
        self.actor_optimizer.step(actor_loss)

        metrics = common.combine_metrics([critic_metrics, actor_metrics])
        if d is not None:
            metrics['num_dones'] = d.sum().detach()  # number of imagined dones
        return metrics

    def optimize_pretrain(self, z, h, r, g, d):
        # z shape is (batch_size, - 1 + sequence_length + extra , z_categoricals * z_categories)
        # h shape is (batch_size, tgt_length(- 1 + sequence_length + extra), dyn_embed_dim)
        # r shape is (batch_size, - 1 + sequence_length + extra , 1)
        # g shape is (batch_size, - 1 + sequence_length + extra - 1)
        # d shape is (batch_size, - 1 + sequence_length + extra - 1)
        # 总统来说，这里对价值网络来说是在训练价值网络，对动作网络来说是最大化熵
        config = self.config
        x = self._prepare_inputs(z, h) # x shape is (batch_size, sequence_length + extra, z_categoricals * z_categories)
        returns, advantages = self._compute_targets(x, r, g, d) # 计算优势值和目标Q值， shape is (batch_size, sequence_length + extra - 1)
        weights = torch.ones_like(returns)  # no weights, since we use real data

        self.train()
        # remove last time step, the last state is for bootstrapping
        values = self.critic(x[:, :-1]) # values shape is (batch_size, sequence_length + extra - 1)
        critic_loss, critic_metrics = self._compute_critic_loss(values, returns, weights) # 这边应该是很常见的价值损失，这里就是在训练价值网络

        # maximize entropy, ok since data was collected with random policy
        shape = x.shape[:2]
        logits = self.actor_model(x.flatten(0, 1)).unflatten(0, shape) # logits shape is (batch_size, sequence_length + extra - 1, num_actions)
        dist = D.Categorical(logits=logits) # 创建一个分类分布，使用logits作为参数，得到一个动作离散分布，分布的概率和动作网络预测一致
        max_entropy = math.log(self.num_actions) # 这里就是最大的动作熵
        entropy = dist.entropy().mean() # 当前预测的动作熵
        normalized_entropy = entropy / max_entropy # 归一化
        '''
        这是一个熵系数，用来控制策略的探索程度：

        值越大，越鼓励策略探索
        值越小，越倾向于确定性行为
        '''
        actor_loss = -config['actor_entropy_coef'] * normalized_entropy # 这里只是在鼓励探索，没有真正的在训练确定性动作，让预测的动作分布更均匀
        actor_metrics = {
            'actor_loss': actor_loss.detach(), 'ent': entropy.detach(), 'norm_ent': normalized_entropy.detach()
        }

        self.actor_optimizer.step(actor_loss)
        self.critic_optimizer.step(critic_loss)

        return common.combine_metrics([critic_metrics, actor_metrics])

    def _compute_actor_loss(self, logits, a, advantages, weights):
        '''
        logits: Actor 网络预测的动作分布 logits shape: (batch_size, sequence_length + extra - 1, num_actions)
        a: 采样的动作 shape: (batch_size, sequence_length + extra - 1)
        advantages: 计算的优势值 shape: (batch_size, sequence_length + extra - 1)
        weights: 每个样本的权重 shape: (batch_size, sequence_length + extra - 1)
        训练动作网络的损失函数
        '''
        assert common.check_no_grad(a, advantages, weights)
        # 这边就是很想普通ac训练方式
        config = self.config
        dist = D.Categorical(logits=logits) # 预测动作动作分布
        reinforce = dist.log_prob(a) * advantages # 预测的动作分布集合实际的动作得到实际的概率，乘以优势，这样如果对应的动作是优势大的会加大概率，反之则减少
        reinforce = (weights * reinforce).mean() # 每个样本的权重乘以实际的概率，得到每个样本的损失，最后取平均
        loss = -reinforce

        entropy = weights * dist.entropy() # 计算动作分布的熵，表示动作的多样性
        max_entropy = math.log(self.num_actions) # 最大的动作熵
        normalized_entropy = (entropy / max_entropy).mean() # 归一化动作熵，表示动作的多样性
        coef = config['actor_entropy_coef']
        if coef != 0:
            # 如果熵太大，则会将熵归零如果熵低于阈值，则计算距离阈值的距离，保证熵不会过低或者过高
            entropy_reg = coef * torch.relu(config['actor_entropy_threshold'] - normalized_entropy)
            loss = loss + entropy_reg

        metrics = {
            'actor_loss': loss.detach(), 'reinforce': reinforce.detach().mean(), 'ent': entropy.detach().mean(),
            'norm_ent': normalized_entropy.detach()
        }
        return loss, metrics

    def _compute_critic_loss(self, values, returns, weights):
        '''
        values: Critic 网络预测的状态值 shape: (batch_size, sequence_length + extra - 1)
        returns: 计算的目标 Q 值 shape: (batch_size, sequence_length + extra - 1)
        weights: 每个样本的权重 shape: (batch_size, sequence_length + extra - 1)
        '''
        assert common.check_no_grad(returns, weights)
        # todo 测试使用mse替换这里的训练效果
        value_dist = D.Normal(values, torch.ones_like(values)) # 模拟创建一个符合values的正太分布
        # value_dist.log_prob(returns): 计算 returns 在正态分布下的对数概率，计算离values的距离，因为正太分布的概率就是离均值的距离
        # weights: 每个样本的权重，在预训练时，这里是全1/在正式训练时为折扣小数
        # loss: 计算损失，使用负对数似然损失，表示预测的值与目标值之间的距离
        # mean()：对所有样本的损失取平均
        loss = -(weights * value_dist.log_prob(returns)).mean() 
        mae = torch.abs(returns - values.detach()).mean() # 计算平均绝对误差
        metrics = {'critic_loss': loss.detach(), 'critic_mae': mae, 'critic': values.detach().mean(),
                   'returns': returns.mean()}
        return loss, metrics

    @torch.no_grad()
    def _compute_gae(self, r, g, values, dones=None):
        '''
        r shape is (batch_size, sequence_length + extra, 1)
        g shape is (batch_size, sequence_length + extra - 1)
        values shape is (batch_size, sequence_length + extra, 1)
        d shape is (batch_size, sequence_length + extra - 1)
        计算优势值像ppo中的优势计算
        return advantages shape is (batch_size, sequence_length + extra - 1)
        '''
        assert common.same_batch_shape([r, g])
        assert dones is None or common.same_batch_shape([r, dones])
        assert common.same_batch_shape_time_offset(values, r, 1)
        assert common.check_no_grad(r, g, values, dones)
        stopped_discounts = (g * (~dones).float()) if dones is not None else discounts
        delta = r + stopped_discounts * values[:, 1:] - values[:, :-1]
        advantages = torch.zeros_like(values)
        factors = stopped_discounts * self.config['env_discount_lambda']
        for t in range(r.shape[1] - 1, -1, -1):
            advantages[:, t] = delta[:, t] + factors[:, t] * advantages[:, t + 1]
        advantages = advantages[:, :-1]
        return advantages

    @torch.no_grad()
    def _compute_targets(self, x, r, g, d=None):
        '''
        x shape is (batch_size, sequence_length + extra, z_categoricals * z_categories + dyn_embed_dim)
        r shape is (batch_size, sequence_length + extra, 1)
        g shape is (batch_size, sequence_length + extra - 1)
        d shape is (batch_size, sequence_length + extra - 1)
        '''
        # adopted from https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/buffers.py
        assert common.same_batch_shape([r, g])
        assert common.same_batch_shape_time_offset(x, r, 1)
        assert d is None or common.same_batch_shape([r, d])
        assert common.check_no_grad(x, r, g, d)
        config = self.config
        self.eval()

        shape = x.shape[:2]
        if config['critic_target_interval'] > 1:
            # 大于1表示使用目标网络
            self.target_critic_lag += 1
            if self.target_critic_lag >= config['critic_target_interval']:
                self.sync_target()
            values = self.target_critic_model(x.flatten(0, 1)).squeeze(-1).unflatten(0, shape)
        else:
            # 而且小于等于1表示使用当前网络，因为每次都同步不如使用当前网络
            # values shape is (batch_size, sequence_length + extra, 1)
            values = self.critic_model(x.flatten(0, 1)).squeeze(-1).unflatten(0, shape)

        # adv shape is (batch_size, sequence_length + extra - 1) 优势值
        advantages = self._compute_gae(r, g, values, d) 
        # returns shape is (batch_size, sequence_length + extra - 1) 预测的Q值
        returns = advantages + values[:, :-1]
        if config['ac_normalize_advantages']:
            adv_mean = advantages.mean() # 归一化优势值
            adv_std = torch.std(advantages, unbiased=False)
            advantages = (advantages - adv_mean) / (adv_std + 1e-8)
        return returns, advantages

    @torch.no_grad()
    def policy(self, z, h, temperature=1):
        '''
        z: 环境的编码特征 shape is (batch_size, sequence_length + extra, z_categoricals * z_categories)
        h: transformer的输出 shape is (batch_size, sequence_length + extra, dyn_embed_dim)
        temperature: 温度参数，控制动作的随机性，0表示贪婪选择，1表示随机选择
        返回动作的索引
        这里的z和h是最后一个时间步的状态和transformer的输出
        '''
        assert common.check_no_grad(z, h)
        # 这里设置为评估模式，表示不进行梯度计算，也就是不进行反向传播
        self.eval()
        # 这里看起来是将z和h组合起来
        x = self._prepare_inputs(z, h)
        # 直接进行动作概率的分布预测
        logits = self.actor(x)
        
        # 是随机选择还是贪婪选择
        if temperature == 0:
            actions = logits.argmax(dim=-1)
        else:
            if temperature != 1:
                logits = logits / temperature
            actions = D.Categorical(logits=logits / temperature).sample()
        return actions


class Dreamer:
    # reset: s_t-1, a_t-1, r_t-1, d_t-1, s_t => s_t, h_t-1
    # step:  a_t => s_t+1, h_t, r_t, d_t

    def __init__(self, config, wm, mode, ac=None, store_data=False, start_z_sampler=None, always_compute_obs=False):
        '''
        config: 配置
        wm: WorldModel 实例
        mode: 'imagine' 或 'observe'
        ac: ActorCritic 实例 (仅在 mode='imagine' 时使用)
        store_data: 是否存储数据
        start_z_sampler: 用于生成初始状态 z 的采样器 (仅在 mode='imagine' 时使用)
        always_compute_obs: 是否在每一步都计算观察
        '''
        assert mode in ('imagine', 'observe')
        assert mode != 'imagine' or start_z_sampler is not None
        self.config = config
        self.wm = wm
        self.ac = ac
        self.mode = mode
        self.store_data = store_data
        self.start_z_sampler = start_z_sampler
        self.always_compute_obs = always_compute_obs

        self.cumulative_g = None  # cumulative discounts
        self.stop_mask = None  # history of dones, for transformer
        self.mems = None
        self.prev_z = None
        self.prev_o = None
        self.prev_h = None
        self.prev_r = None
        self.prev_g = None  # discounts
        self.prev_d = None  # episode ends

        if store_data:
            self.z_data = None
            self.o_data = None
            self.h_data = None
            self.a_data = None
            self.r_data = None
            self.g_data = None
            self.d_data = None
            self.weight_data = None

    @torch.no_grad()
    def get_data(self):
        assert self.store_data
        z = torch.cat(self.z_data, dim=1)
        o = torch.cat(self.o_data, dim=1) if len(self.o_data) > 0 else None
        h = torch.cat(self.h_data, dim=1)
        a = torch.cat(self.a_data, dim=1)
        r = torch.cat(self.r_data, dim=1)
        g = torch.cat(self.g_data, dim=1)
        d = torch.cat(self.d_data, dim=1)
        weights = torch.cat(self.weight_data, dim=1)
        return z, o, h, a, r, g, d, weights

    def _zero_h(self, batch_size, device):
        return torch.zeros(batch_size, 1, self.wm.h_dim, device=device)

    def _reset(self, start_z, start_a, start_r, start_terminated, start_truncated, keep_start_data=False):
        '''
        start_z shape is (batch_size * num_windows, z_categoricals * z_categories)
        start_a shape is (batch_size * num_windows, num_actions)
        start_r shape is (batch_size * num_windows, 1)
        start_terminated shape is (batch_size * num_windows, 1)
        start_truncated shape is (batch_size * num_windows, 1)
        keep_start_data=False
        '''
        assert common.same_batch_shape([start_a, start_r, start_terminated, start_truncated])
        assert common.same_batch_shape_time_offset(start_z, start_r, 1)
        assert not (keep_start_data and not self.store_data)
        config = self.config
        # 设置为eval模式，也就是这里不训练wm模型
        wm = self.wm.eval()
        obs_model = wm.obs_model
        dyn_model = wm.dyn_model

        # 根据结束状态获取折扣矩阵
        start_g = wm.to_discounts(start_terminated) # start_g shape is (batch_size * num_windows, 1)
        start_d = torch.logical_or(start_terminated, start_truncated) # start_d shape is (batch_size * num_windows, 1)
        if self.mode == 'imagine' or (self.mode == 'observe' and config['ac_input_h']):
            if start_a.shape[1] == 0: # todo 
                h = self._zero_h(start_a.shape[0], start_a.device)
                mems = None
            else:
                # dyn_model 对 start_z, start_a, start_r, start_g, start_d 进行预测，可以看到这些都去除了最后一个时间步
                # 结果就是预测下一个时间步的状态 z, r, g, d得到隐藏状态（即每一层transformer的输出）h和记忆mems(即每一层transformer的输出和之前的历史记忆)
                _, h, mems = dyn_model.predict(
                    start_z[:, :-1], start_a, start_r[:, :-1], start_g[:, :-1], start_d[:, :-1], heads=[], tgt_length=1)
        else:
            h, mems = None, None

        # set cumulative_g to 1 for real data, start discounting after that
        start_weights = (~start_d).float()
        self.cumulative_g = torch.ones_like(start_g[:, -1:])
        self.stop_mask = start_d

        # 以下都是将初始状态的最后一个时间步的值取出来
        z = start_z[:, -1:]
        r = start_r[:, -1:]
        g = start_g[:, -1:]
        d = start_d[:, -1:]

        # 用于act方法中起始状态的初始化
        self.mems = mems
        self.prev_z = z
        self.prev_h = h
        self.prev_r = r
        self.prev_g = g
        self.prev_d = d

        if self.store_data:
            '''
            是否存储在想象或观察过程中收集和保存智能体的轨迹数据
            todo 后续作用
            '''
            self.h_data = [self._zero_h(start_z.shape[0], start_z.device) if h is None else h]

            if keep_start_data:
                # 是否保存起始状态（即全部轨迹信息）
                self.z_data = [start_z]
                self.a_data = [start_a]
                self.r_data = [start_r]
                self.g_data = [start_g]
                self.d_data = [start_d]
                self.weight_data = [start_weights]
            else:
                # 但是貌似默认时false，索引应该是只保存最后一个时间步的值
                self.z_data = [z]
                self.a_data = []
                self.r_data = []
                self.g_data = []
                self.d_data = []
                self.weight_data = []

        if self.always_compute_obs:
            # 对最后一个时间步的状态 z 进行解码，得到观察 o
            start_o = obs_model.decode(start_z)
            o = start_o[:, -1:] # 最后一个时间步的观察
            self.prev_o = o
            if self.store_data:
                if keep_start_data:
                    # 保存全部的观察轨迹信息
                    self.o_data = [start_o]
                else:
                    # 仅保存最后一个时间步的观察
                    self.o_data = [o]
        else:
            if self.store_data:
                self.o_data = []

        '''
        z shape is (batch_size * num_windows, z_categoricals * z_categories) 最后一个环境时间步的状态
        h shape is (batch_size * num_windows, n_layer, h_dim)  transformer每一层的输出
        start_g shape is (batch_size * num_windows, 1) 折扣矩阵
        start_d shape is (batch_size * num_windows, 1) 是否终止或截断
        但是貌似在训练过程中没有使用到 todo
        '''
        return z, h, start_g, start_d

    @torch.no_grad()
    def imagine_reset(self, start_z, start_a, start_r, start_terminated, start_truncated, keep_start_data=False):
        '''
        start_z shape is (batch_size * num_windows, z_categoricals * z_categories)
        start_a shape is (batch_size * num_windows, num_actions)
        start_r shape is (batch_size * num_windows, 1)
        start_terminated shape is (batch_size * num_windows, 1)
        start_truncated shape is (batch_size * num_windows, 1)
        '''
        assert self.mode == 'imagine'
        # returns: z, h, start_g, start_d
        return self._reset(start_z, start_a, start_r, start_terminated, start_truncated, keep_start_data)

    @torch.no_grad()
    def observe_reset(self, start_o, start_a, start_r, start_terminated, start_truncated, keep_start_data=False):
        assert self.mode == 'observe'
        obs_model = self.wm.obs_model.eval()
        start_z = obs_model.encode_sample(start_o, temperature=0)
        z, h, start_g, start_d = self._reset(
            start_z, start_a, start_r, start_terminated, start_truncated, keep_start_data)
        return z, h, start_z, start_g, start_d

    @staticmethod
    def _create_single_data(batch_size, device):
        start_a = torch.zeros(batch_size, 0, dtype=torch.long, device=device)
        start_r = torch.zeros(batch_size, 0, device=device)
        start_terminated = torch.zeros(batch_size, 0, dtype=torch.bool, device=device)
        start_truncated = torch.zeros(batch_size, 0, dtype=torch.bool, device=device)
        return start_a, start_r, start_terminated, start_truncated

    @torch.no_grad()
    def imagine_reset_single(self, start_z, keep_start_data=False):
        assert start_z.shape[1] == 1
        start_a, start_r, start_terminated, start_truncated = self._create_single_data(start_z.shape[0], start_z.device)
        return self.imagine_reset(start_z, start_a, start_r, start_terminated, start_truncated, keep_start_data)

    @torch.no_grad()
    def observe_reset_single(self, start_o, keep_start_data=False):
        assert start_o.shape[1] == 1
        start_a, start_r, start_terminated, start_truncated = self._create_single_data(start_o.shape[0], start_o.device)
        return self.observe_reset(start_o, start_a, start_r, start_terminated, start_truncated, keep_start_data)

    def _step(self, a, z, r, g, d, temperature, return_attention):
        '''
        a: 在正式训练时仅传入a，验证评估时传入a, z, r, g, d，用真实的观察数据替代想象的数据
        None
        None 
        None
        None
        temperature
        return_attention
        '''
        config = self.config
        imagine = self.mode == 'imagine' # 观察时这边为False
        assert a.shape[1] == 1
        assert all(x is None for x in (z, r, g, d)) if imagine else common.same_batch_shape([a, z, r, g, d])
        wm = self.wm.eval()
        obs_model = wm.obs_model
        dyn_model = wm.dyn_model

        z_dist = None
        if imagine or self.config['ac_input_h']:
            # 在训练时imagine为true
            assert self.mems is not None or self.prev_r.shape[1] == 0
            assert self.mems is None or a.shape[0] == self.mems[0].shape[1]
            heads = ['z', 'r', 'g'] if imagine else []
            # 利用上一个状态和预测的动作，预测下一个状态 z, r, g
            outputs = dyn_model.predict(
                self.prev_z, a, self.prev_r, self.prev_g, self.stop_mask, tgt_length=1, heads=heads, mems=self.mems,
                return_attention=return_attention)
            preds, h, mems, attention = outputs if return_attention else (outputs + (None,))
            if imagine:
                # 在想象过程中，z_dist是一个分布，用于采样下一个状态 z
                z_dist = preds['z_dist']
                # 根据 z_dist 和温度参数采样下一个状态 z
                z = obs_model.sample_z(z_dist, temperature=temperature)
                # 如果是观察模式，则直接使用 z
                r = preds['r']
                # 如果是想象模式，则使用预测的奖励 r
                g = preds['g']
                # 如果是想象模式，则使用预测的折扣 g
        else:
            h, mems, attention = None, None, None

        # cumulative_g初始状态是一个全1的张量，表示从初始状态开始的折扣
        if self.cumulative_g.shape[1] == 0:
            # 表示当前是第一个时间步（或者说刚刚重置后的初始状态）
            weights = torch.ones_like(g)
            self.cumulative_g = g.clone()
        else:
            # 如果不是第一个时间步，则根据上一个状态的结束标志和当前状态的折扣计算权重
            done = self.prev_d.float()
            not_done = (~self.prev_d).float()
            # 如果prev_d为True，则表示当前状态是一个结束状态，当前是新状态，则权重采用全1矩阵
            # 如果prev_d为False，则表示当前状态不是一个结束状态，那么权重采用self.cumulative_g * not_done
            weights = self.cumulative_g * not_done + torch.ones_like(self.prev_g) * done
            # 更新 cumulative_g
            # 这行代码在 [Dreamer._step]agent.py ) 方法中用于更新累积折扣因子，是强化学习中实现跨多个时间步的回报计算的关键部分
            self.cumulative_g = (not_done * self.cumulative_g + done) * g

        if imagine:
            if config['wm_discount_threshold'] > 0:
                # 这行代码用于检测何时需要重置一个想象的轨迹，基于累积折扣值。让我详细解释这个机制
                # 如果低于阈值，则将该轨迹标记为"完成"(done)
                d = (self.cumulative_g < config['wm_discount_threshold'])
                num_done = d.sum()
                if num_done > 0:
                    # 为这些"完成"的轨迹重新采样新的起始状态
                    new_start_z = self.start_z_sampler(num_done)
                    z[d] = new_start_z
            else:
                d = torch.zeros(a.shape[0], 1, dtype=torch.bool, device=a.device)

        # 结合上一个状态的结束标志和当前状态的结束标志，更新 stop_mask
        stop_mask = torch.cat([self.stop_mask, d], dim=1)
        memory_length = config['wm_memory_length']
        if stop_mask.shape[1] > memory_length + 1:
            # 如果 stop_mask 的长度超过了 memory_length + 1，则截断
            stop_mask = stop_mask[:, -(memory_length + 1):]
        self.stop_mask = stop_mask

        # 走了一步后，更新记录的上一步信息 prev_z, prev_h, prev_r, prev_g, prev_d
        # 这边近存储一个时间步的值
        self.mems = mems
        self.prev_z, self.prev_h, self.prev_r, self.prev_g, self.prev_d = z, h, r, g, d

        if self.store_data:
            # 如果有存储数据，则将当前的 z, h, r, g, d, weights 存储到对应的列表中
            # 这边会存储每个时间步的值
            self.z_data.append(z)
            self.h_data.append(h)
            self.a_data.append(a)
            self.r_data.append(r)
            self.g_data.append(g)
            self.d_data.append(d)
            self.weight_data.append(weights)

        if self.always_compute_obs:
            # 如果 always_compute_obs 为 True，则每一步都计算观察，也就是将特征解码为观察
            o = obs_model.decode(z)
            # 并将观察存储起来
            self.prev_o = o
            if self.store_data:
                self.o_data.append(o)

        # 将当前的 z, h, z_dist, r, g, d, weights 打包成一个元组返回
        outputs = (z, h, z_dist, r, g, d, weights)
        if return_attention:
            outputs = outputs + (attention,)
        return outputs

    @torch.no_grad()
    def imagine_step(self, a, temperature=1, return_attention=False):
        assert self.mode == 'imagine'
        # returns: z, h, z_dist, r, g, d, weights, [attention]
        return self._step(a, None, None, None, None, temperature, return_attention)

    @torch.no_grad()
    def observe_step(self, a, o, r, terminated, truncated, return_attention=False):
        '''
        在观察模式下，传入的参数包括动作 a、观察 o、奖励 r、终止标志 terminated 和截断标志 truncated
        想象模型根据这些完成动作的预测和观察数据来更新状态
        '''
        assert self.mode == 'observe'
        wm = self.wm
        obs_model = wm.obs_model
        obs_model.eval()
        z = obs_model.encode_sample(o, temperature=0)
        g = wm.to_discounts(terminated)
        d = torch.logical_or(terminated, truncated)
        if return_attention:
            _, h, _, _, _, _, weights, attention = self._step(a, z, r, g, d, temperature=None, return_attention=True)
            return z, h, g, d, weights, attention
        else:
            _, h, _, _, _, _, weights = self._step(a, z, r, g, d, temperature=None, return_attention=False)
            return z, h, g, d, weights

    @torch.no_grad()
    def act(self, temperature=1, epsilon=0):
        z, h = self.prev_z, self.prev_h
        # 返回动作的索引
        a = self.ac.policy(z, h, temperature=temperature)
        if epsilon > 0:
            # 以下是在 epsilon-greedy 策略下进行随机动作选择
            # 获取动作的数量
            num_actions = self.ac.num_actions
            # 生成一个与 a 相同形状的随机掩码，掩码中小于 epsilon 的位置为 True
            epsilon_mask = torch.rand_like(a, dtype=torch.float) < epsilon
            # 在掩码为 True 的位置，随机选择动作，False的位置保持不变
            # 进一步的随机选择动作
            random_actions = torch.randint_like(a, num_actions)
            a[epsilon_mask] = random_actions[epsilon_mask]
        return a