import ptan
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from typing import Optional, Tuple
import torch.distributions as td
from collections import namedtuple



# logit: 根据t-1动作和t-1的离散状态的logits
# stoch: 根据logit菜样得到的随机状态
# deter: rnn得到的确定性状态（根据前一时刻的动作和先验的状态）
RSSMDiscState = namedtuple('RSSMDiscState', ['logit', 'stoch', 'deter'])
# mean: 根据t-1动作和t-1的连续状态的均值
# std: 根据t-1动作和t-1的连续状态的标准差
# stoch: 根据mean和std采样得到的随机状态
# deter: rnn得到的确定性状态（根据前一时刻的动作和先验的状态）
RSSMContState = namedtuple('RSSMContState', ['mean', 'std', 'stoch', 'deter'])  

def seq_to_batch(sequence_data, batch_size, seq_len):
    """
    converts a sequence of length L and batch_size B to a single batch of size L*B
    """
    shp = tuple(sequence_data.shape)
    batch_data = torch.reshape(sequence_data, [shp[0]*shp[1], *shp[2:]])
    return batch_data

def batch_to_seq(batch_data, batch_size, seq_len):
    """
    converts a single batch of size L*B to a sequence of length L and batch_size B
    """
    shp = tuple(batch_data.shape)
    seq_data = torch.reshape(batch_data, [seq_len, batch_size, *shp[1:]])
    return seq_data


class FeatureExtractor(nn.Module):
    def __init__(self, input_shape):
        super(FeatureExtractor, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=4, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1),
            nn.ReLU()
        )


    def get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        x = x.float() / 255.0
        return self.conv(x)
    

class RssmModel(nn.Module):
    '''
    深度确定性策略梯度动作预测网络
    '''
    def __init__(self, 
                action_size,
                rssm_node_size,
                embedding_size,
                rssm_type,
                info,
                act_fn=nn.ELU,  
                device='cpu'
    ):
        super(RssmModel, self).__init__()

        self.rssm_type = rssm_type
        self.device = device
        # 这边根据状态是连续的还是离散的选择不同的type
        if rssm_type == 'continuous':
            self.deter_size = info['deter_size']
            self.stoch_size = info['stoch_size']
            self.min_std = info['min_std']
        elif rssm_type == 'discrete':
            self.deter_size = info['deter_size']
            self.class_size = info['class_size']
            self.category_size = info['category_size']
            self.stoch_size  = self.class_size*self.category_size
        else:
            raise NotImplementedError
        
        self.action_size = action_size
        self.node_size = rssm_node_size
        self.embedding_size = embedding_size
        self.act_fn = act_fn
        self.rnn = nn.GRUCell(self.deter_size, self.deter_size) # 有一个RNN层，用于输入先验动作随机状态+先验实际动作得到的先验动作特征以及先验确定状态得到确定状态特征
        self.fc_embed_state_action = self._build_embed_state_action()
        # 先验分布预测
        self.fc_prior = self._build_temporal_prior()
        # 后验分布预测
        self.fc_posterior = self._build_temporal_posterior()

        '''
        fc_embed_state_action->self.rnn->self.fc_prior
        fc_embed_state_action->self.rnn->self.fc_posterior
        '''

    def _build_embed_state_action(self):
        """
        model is supposed to take in previous stochastic state and previous action 
        and embed it to deter size for rnn input
        """
        # 将随机状态stochastic state 和前一个动作嵌入到一个确定的deter_size 维度
        # 也就是提取了特征后输入到RNN中
        # todo 这是模型在上一个时间步预测的随机状态，表示环境的隐状态
        # todo 这是代理在上一个时间步执行的动作
        fc_embed_state_action = [nn.Linear(self.stoch_size + self.action_size, self.deter_size)]
        fc_embed_state_action += [self.act_fn()]
        return nn.Sequential(*fc_embed_state_action)
    

    def _build_temporal_prior(self):
        """
        model is supposed to take in latest deterministic state 
        and output prior over stochastic state
        选中的注释解释了模型的另一个关键功能：如何从最新的确定性状态（deterministic state）生成随机状态（stochastic state）的先验分布（prior）
        """
        # deter_size 是确定性状态的维度
        # node_size 是 RSSM 的节点大小
        # stoch_size 是随机状态的大小
        temporal_prior = [nn.Linear(self.deter_size, self.node_size)]
        temporal_prior += [self.act_fn()]
        if self.rssm_type == 'discrete':
            temporal_prior += [nn.Linear(self.node_size, self.stoch_size)]
        elif self.rssm_type == 'continuous':
             temporal_prior += [nn.Linear(self.node_size, 2 * self.stoch_size)]
        return nn.Sequential(*temporal_prior)
    

    def _build_temporal_posterior(self):
        """
        model is supposed to take in latest embedded observation and deterministic state 
        and output posterior over stochastic states
        选中的注释解释了模型的另一个关键功能：如何从最新的嵌入观察（embedded observation）和确定性状态（deterministic state）生成随机状态（stochastic state）的后验分布（posterior）
        最新的嵌入观察（latest embedded observation）：
        这是模型在当前时间步从环境中获取的观察数据，经过编码器处理后得到的嵌入表示。

        确定性状态（deterministic state）：
        这是模型在当前时间步通过 RNN 计算得到的确定性状态，表示环境的隐状态。

        生成随机状态的后验分布（output posterior over stochastic states）：
        模型使用最新的嵌入观察和确定性状态来生成一个后验分布，这个分布描述了随机状态的可能值。
        后验分布通常用参数化的概率分布（如高斯分布）来表示，参数包括均值和方差。
        """
        temporal_posterior = [nn.Linear(self.deter_size + self.embedding_size, self.node_size)]
        temporal_posterior += [self.act_fn()]
        if self.rssm_type == 'discrete':
            temporal_posterior += [nn.Linear(self.node_size, self.stoch_size)]
        elif self.rssm_type == 'continuous':
            temporal_posterior += [nn.Linear(self.node_size, 2 * self.stoch_size)]
        return nn.Sequential(*temporal_posterior)
    

    def get_dist(self, rssm_state):
        '''
        根据传入的RSSM状态，构建返回一个概率分布
        '''
        if self.rssm_type == 'discrete':
            shape = rssm_state.logit.shape
            logit = torch.reshape(rssm_state.logit, shape = (*shape[:-1], self.category_size, self.class_size))
            return td.Independent(td.OneHotCategoricalStraightThrough(logits=logit), 1)
        elif self.rssm_type == 'continuous':
            return td.independent.Independent(td.Normal(rssm_state.mean, rssm_state.std), 1)


    def rssm_seq_to_batch(self, rssm_state, batch_size, seq_len):
        '''
        为了更好的训练，这里将rssm state转化为batch形式，也就是将前两维度seq_len, batch(l, n)合并为一个维度
        '''
        if self.rssm_type == 'discrete':
            return RSSMDiscState(
                seq_to_batch(rssm_state.logit[:seq_len], batch_size, seq_len),
                seq_to_batch(rssm_state.stoch[:seq_len], batch_size, seq_len),
                seq_to_batch(rssm_state.deter[:seq_len], batch_size, seq_len)
            )
        elif self.rssm_type == 'continuous':
            return RSSMContState(
                seq_to_batch(rssm_state.mean[:seq_len], batch_size, seq_len),
                seq_to_batch(rssm_state.std[:seq_len], batch_size, seq_len),
                seq_to_batch(rssm_state.stoch[:seq_len], batch_size, seq_len),
                seq_to_batch(rssm_state.deter[:seq_len], batch_size, seq_len)
            )
        

    def rssm_batch_to_seq(self, rssm_state, batch_size, seq_len):
        if self.rssm_type == 'discrete':
            return RSSMDiscState(
                batch_to_seq(rssm_state.logit, batch_size, seq_len),
                batch_to_seq(rssm_state.stoch, batch_size, seq_len),
                batch_to_seq(rssm_state.deter, batch_size, seq_len)
            )
        elif self.rssm_type == 'continuous':
            return RSSMContState(
                batch_to_seq(rssm_state.mean, batch_size, seq_len),
                batch_to_seq(rssm_state.std, batch_size, seq_len),
                batch_to_seq(rssm_state.stoch, batch_size, seq_len),
                batch_to_seq(rssm_state.deter, batch_size, seq_len)
            )
        
    
    def rollout_imagination(self, horizon:int, actor:nn.Module, prev_rssm_state):
        '''
        param horizon: int, 要进行 rollout的步数 todo
        param actor: nn.Module, 用于执行动作的actor模型
        param prev_rssm_state: RSSMState, 初始的rssm状态,这里传入的后验状态

        return next_rssm_states: RSSMState, 预测得到的先验状态
        return imag_log_probs: torch.Tensor, 每个horizon步的动作的对数概率
        return action_entropy: torch.Tensor, 每个horizon步的动作的熵
        '''

        rssm_state = prev_rssm_state
        next_rssm_states = []
        action_entropy = [] # 存储动作的熵
        imag_log_probs = [] # 存储动作的对数概率
        for t in range(horizon):
            # todo 确定后验状态中的deter和stoch是否存在t时刻偏移
            action, action_dist = actor((self.get_model_state(rssm_state)).detach())
            # 根据动作和后验状态预测得到的预测先验状态 todo
            rssm_state = self.rssm_imagine(action, rssm_state)
            next_rssm_states.append(rssm_state)
            action_entropy.append(action_dist.entropy())
            imag_log_probs.append(action_dist.log_prob(torch.round(action.detach())))

        # 整合每个horizon步的结果到一个RSSMDiscState中
        next_rssm_states = self.rssm_stack_states(next_rssm_states, dim=0)
        # 整合每个horizon步的熵
        action_entropy = torch.stack(action_entropy, dim=0)
        # 整合每个horizon步的对数概率
        imag_log_probs = torch.stack(imag_log_probs, dim=0)
        return next_rssm_states, imag_log_probs, action_entropy
        

    
    def rssm_detach(self, rssm_state):
        if self.rssm_type == 'discrete':
            return RSSMDiscState(
                rssm_state.logit.detach(),  
                rssm_state.stoch.detach(),
                rssm_state.deter.detach(),
            )
        elif self.rssm_type == 'continuous':
            return RSSMContState(
                rssm_state.mean.detach(),
                rssm_state.std.detach(),  
                rssm_state.stoch.detach(),
                rssm_state.deter.detach()
            )


    def _init_rssm_state(self, batch_size, **kwargs):
        '''
        方法的作用是初始化 RSSM（Recurrent State-Space Model，递归状态空间模型）的状态。在 DreamerV2 算法中，RSSM 状态包括确定性状态（deterministic state）和随机状态（stochastic state）。初始化这些状态是模型开始运行时的必要步骤
        对比dreamerv1算法，也是存在类似的初始化，一般来说都是初始化为0

        始化确定性状态 deter_state 为全零张量，形状为 (batch_size, deter_size)。
        初始化随机状态 stoch_state 为全零张量，形状为 (batch_size, stoch_size)。
        返回初始化的确定性状态和随机状态
        这个应该是一个工具类，想要初始化状态时随时返回
        '''
        if self.rssm_type  == 'discrete':
            return RSSMDiscState(
                torch.zeros(batch_size, self.stoch_size, **kwargs).to(self.device),
                torch.zeros(batch_size, self.stoch_size, **kwargs).to(self.device),
                torch.zeros(batch_size, self.deter_size, **kwargs).to(self.device),
            )
        elif self.rssm_type == 'continuous':
            return RSSMContState(
                torch.zeros(batch_size, self.stoch_size, **kwargs).to(self.device),
                torch.zeros(batch_size, self.stoch_size, **kwargs).to(self.device),
                torch.zeros(batch_size, self.stoch_size, **kwargs).to(self.device),
                torch.zeros(batch_size, self.deter_size, **kwargs).to(self.device),
            )
        

    def get_stoch_state(self, stats):
        '''
        这个函数好像是根据计算得到的概率分布，然后采样得到随机状态
        '''
        if self.rssm_type == 'discrete':
            logit = stats['logit']
            shape = logit.shape # (batch_size, stoch_size)
            # 将logit转换为(batch_size, category_size, class_size)，那么category_size*class_size就要stoch_size
            # todo 结合后续学习class_size和category_size分别代表什么意思？有什么意义
            logit = torch.reshape(logit, shape = (*shape[:-1], self.category_size, self.class_size))
            # 然后根据OneHotCategorical创建一个概率分布
            dist = torch.distributions.OneHotCategorical(logits=logit)        
            # stoch shape = (batch_size, category_size, class_size)
            stoch = dist.sample()
            '''
            这行代码的目的是通过一种称为 "Straight-Through Estimator" 的技术来实现梯度传播。具体来说，它在离散采样过程中保持梯度信息，以便在反向传播时能够更新模型参数。
具体解释
dist.sample()：首先，从 OneHotCategorical 分布中采样得到 stoch，这是一个 one-hot 编码的张量，表示离散的随机变量。

dist.probs：这是 OneHotCategorical 分布的概率，表示每个类别的概率分布。

dist.probs.detach()：使用 detach() 方法从计算图中分离出 dist.probs，使其在反向传播时不会计算梯度。

dist.probs - dist.probs.detach()：这部分计算结果是一个零梯度的张量，因为 dist.probs 和 dist.probs.detach() 的值是相同的，但 dist.probs.detach() 没有梯度。

stoch += dist.probs - dist.probs.detach()：这一步将采样得到的 stoch 加上零梯度的 dist.probs，从而在前向传播时保持 stoch 的值不变，但在反向传播时，梯度将通过 dist.probs 传播。

作用
这种技术的作用是：

保持离散采样的值：在前向传播时，stoch 保持为离散采样的值。
实现梯度传播：在反向传播时，梯度通过 dist.probs 传播，从而实现对模型参数的更新。
这种方法在训练包含离散变量的神经网络时非常有用，因为离散采样本身是不可微的，而这种技术允许我们在不改变采样值的情况下实现梯度传播。
            '''
            stoch += dist.probs - dist.probs.detach()
            # 返回的stoch shape = (batch_size, stoch_size)
            return torch.flatten(stoch, start_dim=-2, end_dim=-1)

        elif self.rssm_type == 'continuous':
            # 如果是连续的随机状态，直接返回均值和标准差
            mean = stats['mean']
            std = stats['std']
            # min_std标准差的最小值，超参数，避免标准差过小，导致数值不稳定
            # F.softplus(std)函数将标准差 std 转换为正值
            # 因为过小的标准差可能导致数值不稳定和梯度爆炸。
            std = F.softplus(std) + self.min_std
            '''
            torch.randn_like(mean)：生成一个与 mean 张量形状相同的标准正态分布（均值为 0，标准差为 1）的随机张量。
std * torch.randn_like(mean)：将生成的标准正态分布随机张量乘以标准差 std，得到一个标准差为 std 的正态分布随机张量。
mean + std * torch.randn_like(mean)：将上述结果加上均值 mean，得到一个均值为 mean，标准差为 std 的正态分布随机张量。
            '''
            return mean + std*torch.randn_like(mean), std
    

    def rssm_imagine(self, prev_action, prev_rssm_state, nonterms=True):
        '''
        模拟t时刻动作,rssm状态，t时刻的非终止状态

        返回根据t时刻的动作和rssm状态，预测的t+1时刻的rssm状态，也就是先验状态
        '''
        # 随机状态和动作嵌入结合(stoch_size+action_size)抽取得到随机状态动作嵌入（shape=deter_size）
        # 如果遇到中止状态那么prev_action就是0，prev_rssm_state.stoch*nonterms就是0
        # state_action_embed中提取的对应特征也是0
        state_action_embed = self.fc_embed_state_action(torch.cat([prev_rssm_state.stoch*nonterms, prev_action],dim=-1))
        # 将其提取的动作特征输入到RNN中，得到deter_state确定性状态
        deter_state = self.rnn(state_action_embed, prev_rssm_state.deter*nonterms)
        # 根据观察时离散还是连续，得到不同的随机状态
        if self.rssm_type == 'discrete':
            # 如果是离散的随机状态，需要将输出的结果作为 logits，然后得到随机分布状态
            prior_logit = self.fc_prior(deter_state)
            stats = {'logit':prior_logit}
            # 根据logits得到随机状态
            prior_stoch_state = self.get_stoch_state(stats)
            prior_rssm_state = RSSMDiscState(prior_logit, prior_stoch_state, deter_state)

        elif self.rssm_type == 'continuous':
            # 对于连续的随机状态，需要将输出的结果分成两部分，一部分是均值，一部分是标准差
            prior_mean, prior_std = torch.chunk(self.fc_prior(deter_state), 2, dim=-1)
            stats = {'mean':prior_mean, 'std':prior_std}
            # 根据logits得到随机状态
            prior_stoch_state, std = self.get_stoch_state(stats)
            prior_rssm_state = RSSMContState(prior_mean, std, prior_stoch_state, deter_state)
        return prior_rssm_state
        
    
    def rssm_observe(self, obs_embed, prev_action, prev_nonterm, prev_rssm_state):
        '''
        obs_embed: t+1时刻的观察嵌入特征
        prev_action: t时刻的动作,如果是终止状态则是0
        prev_nonterm: t时刻的非终止状态
        prev_rssm_state: t时刻的RSSM状态

        返回预测得到的先验rssm状态和后验rssm状态
        '''
        prior_rssm_state = self.rssm_imagine(prev_action, prev_rssm_state, prev_nonterm)
        deter_state = prior_rssm_state.deter
        # 结合确定性状态（动作和RSSM状态）和t+1时刻的观察嵌入特征
        # 后验状态是结合了实际的观察
        x = torch.cat([deter_state, obs_embed], dim=-1)
        if self.rssm_type == 'discrete':
            # 得到后验状态的logits
            posterior_logit = self.fc_posterior(x)
            stats = {'logit':posterior_logit}
            posterior_stoch_state = self.get_stoch_state(stats)
            posterior_rssm_state = RSSMDiscState(posterior_logit, posterior_stoch_state, deter_state)
        
        elif self.rssm_type == 'continuous':
            # 得到后验状态的均值和方差
            posterior_mean, posterior_std = torch.chunk(self.fc_posterior(x), 2, dim=-1)
            stats = {'mean':posterior_mean, 'std':posterior_std}
            posterior_stoch_state, std = self.get_stoch_state(stats)
            posterior_rssm_state = RSSMContState(posterior_mean, std, posterior_stoch_state, deter_state)
        return prior_rssm_state, posterior_rssm_state
    
    
    def get_model_state(self, rssm_state):
        # 结合确定性状态和随机状态得到模型状态
        if self.rssm_type == 'discrete':
            return torch.cat((rssm_state.deter, rssm_state.stoch), dim=-1)
        elif self.rssm_type == 'continuous':
            return torch.cat((rssm_state.deter, rssm_state.stoch), dim=-1)
    


    def forward(self, prev_state, actions, prev_belief, observations, nonterminals):
        T = actions.size(0) + 1
        beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = (
            [torch.empty(0)] * T,
            [torch.empty(0)] * T,
            [torch.empty(0)] * T,
            [torch.empty(0)] * T,
            [torch.empty(0)] * T,
            [torch.empty(0)] * T,
            [torch.empty(0)] * T
        )

        beliefs[0], prior_states[0], posterior_states[0] = prev_belief, prev_state, prev_state

        for t in range(T - 1):
            _state = (
                prior_states[t] if observations is None else posterior_states[t]
            )
            _state = (
                _state if nonterminals is None else _state * nonterminals[t]
            )

            hidden = F.elu(self.fc_embed_state_action(torch.cat([_state, actions[t]], dim=1)))
            beliefs[t+1] = self.rnn(hidden, beliefs[t])
            hidden = F.elu(self.fc_embed_belief_prior(beliefs[t+1]))
            prior_means[t + 1], _prior_std_dev = torch.chunk(self.fc_state_prior(hidden), 2, dim=1)
            prior_std_devs[t + 1] = F.softplus(_prior_std_dev) + self.min_std_dev
            prior_states[t + 1] = prior_means[t + 1] + prior_std_devs[t + 1] * torch.randn_like(prior_means[t + 1])
            if observations is not None:
                t_ = t - 1
                hidden = F.elu(self.fc_embed_belief_posterior(torch.cat([beliefs[t + 1], observations[t_ + 1]], dim=1)))
                posterior_means[t + 1], _posterior_std_dev = torch.chunk(self.fc_state_priorior(hidden), 2, dim=1)
                posterior_std_devs[t + 1] = F.softplus(_posterior_std_dev) + self.min_std_dev
                posterior_states[t + 1] = posterior_means[t + 1] + posterior_std_devs[t + 1] * torch.randn_like(posterior_means[t + 1])

        hidden = [
            torch.stack(beliefs[1:], dim=0),
            torch.stack(prior_states[1:], dim=0),
            torch.stack(prior_means[1:], dim=0),
            torch.stack(prior_std_devs[1:], dim=0),
        ]

        if observations is not None:
            hidden += [
                torch.stack(posterior_states[1:], dim=0),
                torch.stack(posterior_means[1:], dim=0),
                torch.stack(posterior_std_devs[1:], dim=0)
            ]

        return hidden
    

    def rssm_stack_states(self, rssm_states, dim):
        if self.rssm_type == 'discrete':
            return RSSMDiscState(
                torch.stack([state.logit for state in rssm_states], dim=dim),
                torch.stack([state.stoch for state in rssm_states], dim=dim),
                torch.stack([state.deter for state in rssm_states], dim=dim),
            )
        elif self.rssm_type == 'continuous':
            return RSSMContState(
            torch.stack([state.mean for state in rssm_states], dim=dim),
            torch.stack([state.std for state in rssm_states], dim=dim),
            torch.stack([state.stoch for state in rssm_states], dim=dim),
            torch.stack([state.deter for state in rssm_states], dim=dim),
        )


    def rollout_observation(self, seq_len:int, obs_embed: torch.Tensor, action: torch.Tensor, nonterms: torch.Tensor, prev_rssm_state):
        '''
        seq_len: 序列长度
        obs_embed: 观察嵌入特征
        action: 动作
        nonterms: 非终止状态
        prev_rssm_state: 前一个RSSM状态,在t=0时刻是初始化的状态,全0
        '''
        
        priors = [] # 存储t时刻的先验状态
        posteriors = [] # 存储t时刻的后验状态
        # 遍历序列长度
        for t in range(seq_len):
            # t时刻的动作 乘以 t时刻的非终止状态
            # 如果不终止，那么prev_action 就是 t时刻的动作
            # 如果终止，那么prev_action 就是 t时刻的动作乘以0,最终结果也是0
            prev_action = action[t]*nonterms[t]
            # 根据t+1时刻的观察��入特征，t时刻的动作，t时刻的非终止状态，t1时刻的RSSM状态
            # 得到t时刻的先验状态和后验状态
            prior_rssm_state, posterior_rssm_state = self.rssm_observe(obs_embed[t], prev_action, nonterms[t], prev_rssm_state)
            priors.append(prior_rssm_state)
            posteriors.append(posterior_rssm_state)
            prev_rssm_state = posterior_rssm_state
        # 将收集到的1-nt时刻的先验和后验状态��接成一个batch
        prior = self.rssm_stack_states(priors, dim=0)
        post = self.rssm_stack_states(posteriors, dim=0)
        return prior, post


class DenseModel(nn.Module):
    '''
    环境回报生成（actor）
    环境价值函数估计（critic）
    '''
    def __init__(
            self, 
            output_shape,
            input_size, 
            info,
        ):
        """
        :param output_shape: tuple containing shape of expected output
        :param input_size: size of input features
        :param info: dict containing num of hidden layers, size of hidden layers, activation function, output distribution etc.
        """
        super().__init__()
        self._output_shape = output_shape # 如果是用于reward和value，那么输出的output_shape是1维的
        self._input_size = input_size # stoch_size + deter_size：
        self._layers = info['layers']
        self._node_size = info['node_size']
        self.activation = info['activation']
        self.dist = info['dist']
        self.model = self.build_model()

    def build_model(self):
        # 全连接神经网络
        # todo 搞清楚self.input_size是输入什么东西
        # 
        model = [nn.Linear(self._input_size, self._node_size)]
        model += [self.activation()]
        for i in range(self._layers-1):
            model += [nn.Linear(self._node_size, self._node_size)]
            model += [self.activation()]
        model += [nn.Linear(self._node_size, int(np.prod(self._output_shape)))]
        return nn.Sequential(*model)

    def forward(self, input):
        dist_inputs = self.model(input)
        if self.dist == 'normal':
            return td.independent.Independent(td.Normal(dist_inputs, 1), len(self._output_shape))
        if self.dist == 'binary':
            return td.independent.Independent(td.Bernoulli(logits=dist_inputs), len(self._output_shape))
        if self.dist == None:
            return dist_inputs

        raise NotImplementedError(self._dist)
    


class DreamerActorModel(nn.Module):

    def __init__(self, 
                action_size,
                deter_size,
                stoch_size,
                embedding_size,
                actor_info,
                expl_info):
        
        super().__init__()

        self.action_size = action_size
        self.deter_size = deter_size
        self.stoch_size = stoch_size
        self.embedding_size = embedding_size
        self.layers = actor_info['layers']
        self.node_size = actor_info['node_size']
        self.act_fn = actor_info['activation']
        self.dist = actor_info['dist']
        self.act_fn = actor_info['activation']
        self.train_noise = expl_info['train_noise']
        self.eval_noise = expl_info['eval_noise']
        self.expl_min = expl_info['expl_min']
        self.expl_decay = expl_info['expl_decay']
        self.expl_type = expl_info['expl_type']
        self.model = self._build_model()

    
    def _build_model(self):
        '''
        输入维度：

        self.deter_size：确定性状态的维度，表示通过 RNN 计算得到的隐状态。
        self.stoch_size：随机状态的维度，表示通过先验或后验分布生成的隐状态。
        self.deter_size + self.stoch_size：输入层的总维度，是确定性状态和随机状态的拼接。

        self.node_size：隐藏层的节点数量，表示每个隐藏层的输出维度。
        self.layers：隐藏层的数量，表示网络中隐藏层的层数。

        todo 搞清楚在实际运行时，每个输入的谁是从哪里来的，然后到哪里去。
        '''
        model = [nn.Linear(self.deter_size + self.stoch_size, self.node_size)]
        model += [self.act_fn()]
        for i in range(1, self.layers):
            model += [nn.Linear(self.node_size, self.node_size)]
            model += [self.act_fn()]

        # 输出动作分布，仅实现了 one-hot 动作分布，也就是离散动作分户。
        if self.dist == 'one_hot':
            model += [nn.Linear(self.node_size, self.action_size)]
        else:
            raise NotImplementedError
        return nn.Sequential(*model) 
    

    
    def forward(self, model_state):
        # 获取预测的动作概率分布
        action_dist = self.get_action_dist(model_state)
        # 从动作概率分布中采样一个动作
        action = action_dist.sample()
        '''
        选中的代码行：

        这行代码是实现离散动作的直通估计器(Straight-Through Estimator)技术，它允许梯度通过离散采样操作传播
        在强化学习中，从动作分布中采样离散动作是常见操作
        但采样操作是不可微的，这会阻止梯度从损失函数流回到生成动作分布的网络
        '''
        action = action + action_dist.probs - action_dist.probs.detach()
        return action, action_dist
    

    def get_action_dist(self, modelstate):
        # 根据后验状态的确定性状态和随机状态得到 动作logits
        logits = self.model(modelstate)
        if self.dist == 'one_hot':
            return torch.distributions.OneHotCategorical(logits=logits)         
        else:
            raise NotImplementedError
            
    def add_exploration(self, action: torch.Tensor, itr: int, mode='train'):
        '''
        给预测的动作增加噪音，实现探索
        '''
        if mode == 'train':
            expl_amount = self.train_noise
            expl_amount = expl_amount - itr/self.expl_decay
            expl_amount = max(self.expl_min, expl_amount)
        elif mode == 'eval':
            expl_amount = self.eval_noise
        else:
            raise NotImplementedError
            
        if self.expl_type == 'epsilon_greedy':
            if np.random.uniform(0, 1) < expl_amount:
                index = torch.randint(0, self.action_size, action.shape[:-1], device=action.device)
                action = torch.zeros_like(action)
                action[:, index] = 1
            return action

        raise NotImplementedError
        
    
    
        

class ObsEncoder(nn.Module):
    def __init__(self, input_shape, embedding_size, info):
        """
        :param input_shape: tuple containing shape of input，输入观察的形状
        :param embedding_size: Supposed length of encoded vector，希望的编码向量的维度
        :param info: dict containing num of hidden layers, size of hidden layers, activation function, output distribution etc.
        """
        super(ObsEncoder, self).__init__()
        print("ObsEncoder input shape: ", input_shape)
        self.shape = input_shape
        activation = info['activation']
        d = info['depth']
        k  = info['kernel']
        self.k = k
        self.d = d
        self.convolutions = nn.Sequential(
            nn.Conv2d(input_shape[0], d, k),
            activation(),
            nn.Conv2d(d, 2*d, k),
            activation(),
            nn.Conv2d(2*d, 4*d, k),
            activation(),
        )

        # 这一层是确保输出的维度是embedding_size
        if embedding_size == self.embed_size:
            self.fc_1 = nn.Identity()
        else:
            self.fc_1 = nn.Linear(self.embed_size, embedding_size)

    def forward(self, obs):
        # batch_shape应该是 l, n
        batch_shape = obs.shape[:-3]
        # img_shape应该是 c, h, w
        img_shape = obs.shape[-3:]
        # 将l,n维度展品送入卷积层，顺序应该整体还是序列时间顺序
        embed = self.convolutions(obs.reshape(-1, *img_shape))
        # 完成后再reshape回来，此时shape应该是 l, n, k * w * h
        embed = torch.reshape(embed, (*batch_shape, -1))
        embed = self.fc_1(embed)
        return embed

    @property
    def embed_size(self):
        conv1_shape = conv_out_shape(self.shape[1:], 0, self.k, 1)
        conv2_shape = conv_out_shape(conv1_shape, 0, self.k, 1)
        conv3_shape = conv_out_shape(conv2_shape, 0, self.k, 1)
        embed_size = int(4*self.d*np.prod(conv3_shape).item())
        return embed_size

class ObsDecoder(nn.Module):
    def __init__(self, output_shape, embed_size, info):
        """
        :param output_shape: tuple containing shape of output obs
        :param embed_size: the size of input vector, for dreamerv2 : modelstate 
        """
        super(ObsDecoder, self).__init__()
        c, h, w = output_shape
        activation = info['activation']
        d = info['depth']
        k  = info['kernel']
        # 计算第一层的卷积输出尺寸，这里应该是计算每一层的反卷积的输出尺寸
        # 得到最终输入的状态需要是什么尺寸
        conv1_shape = conv_out_shape(output_shape[1:], 0, k, 1)
        # 计算第二层的卷积输出尺寸
        conv2_shape = conv_out_shape(conv1_shape, 0, k, 1)
        # 计算第三层的卷积输出尺寸
        conv3_shape = conv_out_shape(conv2_shape, 0, k, 1)
        self.conv_shape = (4*d, *conv3_shape)
        self.output_shape = output_shape
        # 这层是确保输出的维度是符合np.prod(self.conv_shape)
        if embed_size == np.prod(self.conv_shape).item():
            self.linear = nn.Identity()
        else:
            self.linear = nn.Linear(embed_size, np.prod(self.conv_shape).item())
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(4*d, 2*d, k, 1),
            activation(),
            nn.ConvTranspose2d(2*d, d, k, 1),
            activation(),
            nn.ConvTranspose2d(d, c, k, 1),
        )

    def forward(self, x):
        batch_shape = x.shape[:-1] # 这里应该是l, n
        embed_size = x.shape[-1] # 潜入尺寸 todo 实际运行的时这里输入的维度的意义
        squeezed_size = np.prod(batch_shape).item()
        x = x.reshape(squeezed_size, embed_size)
        # 将维度转变为 (squeezed_size, *self.conv_shape)
        x = self.linear(x)
        x = torch.reshape(x, (squeezed_size, *self.conv_shape))
        # 反卷积输出模拟的环境观察
        x = self.decoder(x)
        # 转变回 (l, n, *self.output_shape)也就是 (l, n, c, h, w)
        # 这里输出的是每个维度的均值
        mean = torch.reshape(x, (*batch_shape, *self.output_shape))
        # 根据每个维度的均值和方差构建一个正态分布
        # td.Independent 的主要作用是将多维分布的各个维度视为独立的，从而在计算对数概率时可以将各个维度的对数概率相加。这对于处理高维观测数据非常有用，因为它简化了对数概率的计算
        obs_dist = td.Independent(td.Normal(mean, 1), len(self.output_shape))
        return obs_dist
    
def conv_out(h_in, padding, kernel_size, stride):
    return int((h_in + 2. * padding - (kernel_size - 1.) - 1.) / stride + 1.)

def output_padding(h_in, conv_out, padding, kernel_size, stride):
    return h_in - (conv_out - 1) * stride + 2 * padding - (kernel_size - 1) - 1

def conv_out_shape(h_in, padding, kernel_size, stride):
    return tuple(conv_out(x, padding, kernel_size, stride) for x in h_in)

def output_padding_shape(h_in, conv_out, padding, kernel_size, stride):
    return tuple(output_padding(h_in[i], conv_out[i], padding, kernel_size, stride) for i in range(len(h_in)))