from learning_to_adapt.dynamics.core.layers import MLP
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from learning_to_adapt.utils.serializable import Serializable
from learning_to_adapt.utils import tensor_utils
from learning_to_adapt.logger import logger
import time


class MetaMLPDynamicsModel(Serializable):
    """
    Class for MLP continous dynamics model
    """

    _activations = {
        None: None,
        "relu": tf.nn.relu,
        "tanh": tf.tanh,
        "sigmoid": tf.sigmoid,
        "softmax": tf.nn.softmax,
        "swish": lambda x: x * tf.sigmoid(x)
    }#激活函数

    def __init__(self,
                 name,# 模型名称
                 env,# 环境
                 hidden_sizes=(512, 512), # 隐藏层大小
                 meta_batch_size=10,# 元批次大小
                 hidden_nonlinearity=tf.nn.relu, #藏层非线性激活函数
                 output_nonlinearity=None, # 输出非线性激活函数
                 batch_size=500, # 批次大小
                 learning_rate=0.001, # 学习率
                 inner_learning_rate=0.1, # 内部学习率
                 normalize_input=True, # 是否对输入进行归一化
                 optimizer=tf.train.AdamOptimizer, # 优化器
                 valid_split_ratio=0.2, # 验证集划分比例
                 rolling_average_persitency=0.99, #滚动平均持续性
                 ):

        Serializable.quick_init(self, locals())#对父类 Serializable 的初始化操作，将当前类的所有属性序列化

        self.normalization = None
        self.normalize_input = normalize_input#归一化处理
        self.next_batch = None
        self.meta_batch_size = meta_batch_size# 批处理大小

        self.valid_split_ratio = valid_split_ratio
        self.rolling_average_persitency = rolling_average_persitency

        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.inner_learning_rate = inner_learning_rate
        self.name = name
        self._dataset_train = None
        self._dataset_test = None
        self._prev_params = None
        self._adapted_param_values = None

        # determine dimensionality of state and action space
        self.obs_space_dims = obs_space_dims = env.observation_space.shape[0] # 状态空间大小
        self.action_space_dims = action_space_dims = env.action_space.shape[0] # 动作空间的大小

        hidden_nonlinearity = self._activations[hidden_nonlinearity]# 选取隐藏层非线性函数
        output_nonlinearity = self._activations[output_nonlinearity]# 选择输出层非线性函数

        """ ------------------ Pre-Update Graph + Adaptation ----------------------- """
        """ ------------------ 预更新图 ----------------------- """
        with tf.variable_scope(name):
            # Placeholders 占位符
            self.obs_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims))#
            self.act_ph = tf.placeholder(tf.float32, shape=(None, action_space_dims)) # 
            self.delta_ph = tf.placeholder(tf.float32, shape=(None, obs_space_dims))

            # Concatenate action and observation --> NN input
            # 输入
            self.nn_input = tf.concat([self.obs_ph, self.act_ph], axis=1)

            # Create MLP 创建多层感知机
            mlp = MLP(name,
                      output_dim=obs_space_dims,
                      hidden_sizes=hidden_sizes,
                      hidden_nonlinearity=hidden_nonlinearity,
                      output_nonlinearity=output_nonlinearity,
                      input_var=self.nn_input,
                      input_dim=obs_space_dims+action_space_dims)
            #预测是mlp的输出
            self.delta_pred = mlp.output_var  # shape: (batch_size, ndim_obs, n_models)

            self.loss = tf.reduce_mean(tf.square(self.delta_ph - self.delta_pred))# 损失
            self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)# 优化器
            self.adaptation_sym = tf.train.GradientDescentOptimizer(self.inner_learning_rate).minimize(self.loss)# 梯度下降来最小化训练损失

            # 计算函数
            self.f_delta_pred = tensor_utils.compile_function([self.obs_ph, self.act_ph], self.delta_pred)

        """ --------------------------- Meta-training Graph ---------------------------------- """
        """ --------------------------- 元训练图 ---------------------------------- """
        # 把输入数据 self.nn_input 在第0维度上按照 self.meta_batch_size 进行切分，得到 nn_input_per_task
        nn_input_per_task:list = tf.split(self.nn_input, self.meta_batch_size, axis=0)
        # 把目标数据 self.delta_ph 在第0维度上按照 self.meta_batch_size 进行切分，得到 delta_per_task
        delta_per_task = tf.split(self.delta_ph, self.meta_batch_size, axis=0)

        # 将每个任务的输入数据再次按照第0维度在中间进行切分，得到 pre_input_per_task 和 post_input_per_task
        pre_input_per_task, post_input_per_task = zip(*[tf.split(nn_input, 2, axis=0) for nn_input in nn_input_per_task])
        # 将每个任务的目标数据再次按照第0维度在中间进行切分，得到 pre_delta_per_task 和 post_delta_per_task
        pre_delta_per_task, post_delta_per_task = zip(*[tf.split(delta, 2, axis=0) for delta in delta_per_task])

        pre_losses = []
        post_losses = []
        self._adapted_params = []

        # 对每个任务进行模型的训练和适应参数的更新
        for idx in range(self.meta_batch_size):
            """对于每个任务，使用不同的命名空间创建两个模型，分别为 pre_model 和 post_model"""
            # 
            with tf.variable_scope(name + '/pre_model_%d' % idx, reuse=tf.AUTO_REUSE):
                # 构建一个多层感知机（MLP）模型 pre_mlp
                pre_mlp = MLP(name,
                              output_dim=obs_space_dims,
                              hidden_sizes=hidden_sizes,
                              hidden_nonlinearity=hidden_nonlinearity,
                              output_nonlinearity=output_nonlinearity,
                              input_var=pre_input_per_task[idx],
                              input_dim=obs_space_dims + action_space_dims,
                              params=mlp.get_params())
                
                # pre_delta_pred保存pre_mlp的输出结果，预测值
                pre_delta_pred = pre_mlp.output_var
                #计算损失
                pre_loss = tf.reduce_mean(tf.square(pre_delta_per_task[idx] - pre_delta_pred))
                # 经过适应（adaptation）后的模型参数
                adapted_params = self._adapt_sym(pre_loss, pre_mlp.get_params())
                self._adapted_params.append(adapted_params)

            with tf.variable_scope(name + '/post_model_%d' % idx, reuse=tf.AUTO_REUSE):
                # 创建一个后处理模型，使用MLP类定义
                post_mlp = MLP(name,
                            output_dim=obs_space_dims,
                            hidden_sizes=hidden_sizes,
                            hidden_nonlinearity=hidden_nonlinearity,
                            output_nonlinearity=output_nonlinearity,
                            input_var=post_input_per_task[idx],
                            params=adapted_params,
                            input_dim=obs_space_dims + action_space_dims)
                post_delta_pred = post_mlp.output_var

                # 计算后处理模型的损失，使用预测值和目标值之间的均方差
                post_loss = tf.reduce_mean(tf.square(post_delta_per_task[idx] - post_delta_pred))

                # 将前处理模型和后处理模型的损失添加到相应的损失列表中
                pre_losses.append(pre_loss)
                post_losses.append(post_loss)

            self.pre_loss = tf.reduce_mean(pre_losses)
            self.post_loss = tf.reduce_mean(post_losses)
            self.train_op = optimizer(self.learning_rate).minimize(self.post_loss)
        """ --------------------------- Post-update Inference Graph --------------------------- """
        # 这部分代码负责定义后处理更新图（Post-update Inference Graph）

        with tf.variable_scope(name + '_ph_graph'):
            # 初始化存储后处理更新结果和网络占位符的列表
            self.post_update_delta = []
            self.network_phs_meta_batch = []

            # 将 nn_input 按照 meta_batch_size 进行分割
            nn_input_per_task = tf.split(self.nn_input, self.meta_batch_size, axis=0)

            # 遍历每个任务的索引 (idx)
            for idx in range(meta_batch_size):
                with tf.variable_scope('task_%i' % idx):
                    # 为当前任务创建网络变量的占位符
                    network_phs = self._create_placeholders_for_vars(mlp.get_params())
                    self.network_phs_meta_batch.append(network_phs)

                    # 创建一个多层感知器（MLP）的实例，作为后处理模型
                    mlp_meta_batch = MLP(name,
                                        output_dim=obs_space_dims,
                                        hidden_sizes=hidden_sizes,
                                        hidden_nonlinearity=hidden_nonlinearity,
                                        output_nonlinearity=output_nonlinearity,
                                        params=network_phs,
                                        input_var=nn_input_per_task[idx],
                                        input_dim=obs_space_dims + action_space_dims,
                                        )

                    # 将后处理模型的输出添加到后处理更新结果列表中
                    self.post_update_delta.append(mlp_meta_batch.output_var)

        # 将当前网络（mlp）添加到网络列表中
        self._networks = [mlp]

    def fit(self, obs, act, obs_next, epochs=1000, compute_normalization=True,
            valid_split_ratio=None, rolling_average_persitency=None, verbose=False, log_tabular=False):
        # 函数用于训练动态模型

        assert obs.ndim == 3 and obs.shape[2] == self.obs_space_dims
        assert obs_next.ndim == 3 and obs_next.shape[2] == self.obs_space_dims
        assert act.ndim == 3 and act.shape[2] == self.action_space_dims
        # 断言，确保输入的obs、obs_next和act形状满足要求

        if valid_split_ratio is None: valid_split_ratio = self.valid_split_ratio
        if rolling_average_persitency is None: rolling_average_persitency = self.rolling_average_persitency
        # 如果valid_split_ratio和rolling_average_persitency为None，则使用默认值

        assert 1 > valid_split_ratio >= 0
        # 断言，确保valid_split_ratio在(0, 1)范围内

        sess = tf.get_default_session()
        # 获取当前的TensorFlow会话

        if (self.normalization is None or compute_normalization) and self.normalize_input:
            self.compute_normalization(obs, act, obs_next)
        # 如果self.normalization是None或compute_normalization为True，并且self.normalize_input为True，则进行数据归一化处理

        if self.normalize_input:
            # Normalize data
            obs, act, delta = self._normalize_data(obs, act, obs_next)
            assert obs.ndim == act.ndim == obs_next.ndim == 3
        else:
            delta = obs_next - obs
        # 如果self.normalize_input为True，则对数据进行归一化处理，计算delta值；否则，直接计算delta值

        # Split into valid and test set
        obs_train, act_train, delta_train, obs_test, act_test, delta_test = train_test_split(obs, act, delta,
                                                                                            test_split_ratio=valid_split_ratio)
        # 将数据集拆分为训练集和测试集

        if self._dataset_test is None:
            self._dataset_test = dict(obs=obs_test, act=act_test, delta=delta_test)
            self._dataset_train = dict(obs=obs_train, act=act_train, delta=delta_train)
        else:
            self._dataset_test['obs'] = np.concatenate([self._dataset_test['obs'], obs_test])
            self._dataset_test['act'] = np.concatenate([self._dataset_test['act'], act_test])
            self._dataset_test['delta'] = np.concatenate([self._dataset_test['delta'], delta_test])

            self._dataset_train['obs'] = np.concatenate([self._dataset_train['obs'], obs_train])
            self._dataset_train['act'] = np.concatenate([self._dataset_train['act'], act_train])
            self._dataset_train['delta'] = np.concatenate([self._dataset_train['delta'], delta_train])
        # 将拆分后的数据集存储在_dataset_test和_dataset_train字典中

        valid_loss_rolling_average = None
        epoch_times = []
        # 初始化变量

        """ ------- Looping over training epochs ------- """
        num_steps_per_epoch = max(int(np.prod(self._dataset_train['obs'].shape[:2])
                                / (self.meta_batch_size * self.batch_size * 2)), 1)
        num_steps_test = max(int(np.prod(self._dataset_test['obs'].shape[:2])
                                / (self.meta_batch_size * self.batch_size * 2)), 1)
        # 计算每个epoch中的步数

        for epoch in range(epochs):
            # 循环进行训练

            # preparations for recording training stats
            pre_batch_losses = []
            post_batch_losses = []
            t0 = time.time()
            # 初始化变量，用于记录训练过程中的损失和时间

            """ ------- Looping through the shuffled and batched dataset for one epoch -------"""
            for _ in range(num_steps_per_epoch):
                obs_batch, act_batch, delta_batch = self._get_batch(train=True)
                # 从训练集中获取一个batch的数据

                pre_batch_loss, post_batch_loss, _ = sess.run([self.pre_loss, self.post_loss, self.train_op],
                                                            feed_dict={self.obs_ph: obs_batch,
                                                            self.act_ph: act_batch,
                                                            self.delta_ph: delta_batch})
                # 运行session，计算pre_loss、post_loss和执行train_op操作

                pre_batch_losses.append(pre_batch_loss)
                post_batch_losses.append(post_batch_loss)
                # 将损失添加到列表中

            valid_losses = []
            for _ in range(num_steps_test):
                obs_test, act_test, delta_test = self._get_batch(train=False)
                # 从测试集中获取一个batch的数据

                # compute validation loss
                feed_dict = {self.obs_ph: obs_test,
                            self.act_ph: act_test,
                            self.delta_ph: delta_test}
                valid_loss = sess.run(self.loss, feed_dict=feed_dict)
                valid_losses.append(valid_loss)
                # 计算验证集的损失，并将损失添加到列表中

            valid_loss = np.mean(valid_losses)
            if valid_loss_rolling_average is None:
                valid_loss_rolling_average = 1.5 * valid_loss  # set initial rolling to a higher value avoid too early stopping
                valid_loss_rolling_average_prev = 2 * valid_loss
                if valid_loss < 0:
                    valid_loss_rolling_average = valid_loss/1.5  # set initial rolling to a higher value avoid too early stopping
                    valid_loss_rolling_average_prev = valid_loss/2
            # 计算验证集损失的滚动均值

            valid_loss_rolling_average = rolling_average_persitency*valid_loss_rolling_average \
                                        + (1.0-rolling_average_persitency)*valid_loss
            # 更新验证集损失的滚动均值

            epoch_times.append(time.time() - t0)
            # 记录每个epoch的时间

            if verbose:
                logger.log("Training DynamicsModel - finished epoch %i - "
                        "train loss: %.4f   valid loss: %.4f   valid_loss_mov_avg: %.4f   epoch time: %.2f"
                        % (epoch, np.mean(post_batch_losses), valid_loss, valid_loss_rolling_average,
                            time.time() - t0))
            # 打印训练过程中的损失和时间信息

            if valid_loss_rolling_average_prev < valid_loss_rolling_average or epoch == epochs - 1:
                logger.log('Stopping Training of Model since its valid_loss_rolling_average decreased')
                break
            valid_loss_rolling_average_prev = valid_loss_rolling_average
            # 如果滚动均值增加或达到最大训练轮数，则停止训练

        """ ------- Tabular Logging ------- """
        if log_tabular:
            logger.logkv('AvgModelEpochTime', np.mean(epoch_times))
            logger.logkv('Post-Loss', np.mean(post_batch_losses))
            logger.logkv('Pre-Loss', np.mean(pre_batch_losses))
            logger.logkv('Epochs', epoch)
        # 记录训练过程中的统计信息

    def predict(self, obs, act):
        # 预测下一个状态

        assert obs.shape[0] == act.shape[0]
        assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims
        assert act.ndim == 2 and act.shape[1] == self.action_space_dims
        # 断言，确保输入的obs和act形状满足要求

        obs_original = obs
        # 保存原始的观测值

        if self.normalize_input:
            obs, act = self._normalize_data(obs, act)
            delta = np.array(self._predict(obs, act))
            delta = denormalize(delta, self.normalization['delta'][0], self.normalization['delta'][1])
        else:
            delta = np.array(self._predict(obs, act))
        # 如果需要归一化输入，则进行数据归一化处理，并计算预测的delta值；否则，直接计算delta值

        assert delta.ndim == 2
        pred_obs = obs_original + delta
        # 计算预测的下一个状态

        return pred_obs


    def _predict(self, obs, act):
        # 内部方法，用于预测delta值

        if self._adapted_param_values is not None:
            sess = tf.get_default_session()
            obs, act = self._pad_inputs(obs, act)
            feed_dict = {self.obs_ph: obs, self.act_ph: act}
            feed_dict.update(self.network_params_feed_dict)
            delta = sess.run(self.post_update_delta[:self._num_adapted_models], feed_dict=feed_dict)
            delta = np.concatenate(delta, axis=0)
        else:
            delta = self.f_delta_pred(obs, act)
        # 如果存在已适应参数的情况，则运行session，计算预测的delta值；否则，调用f_delta_pred方法进行预测

        return delta


    def _pad_inputs(self, obs, act, obs_next=None):
        # 内部方法，用于填充输入数据以匹配适应模型的数量

        if self._num_adapted_models < self.meta_batch_size:
            pad = int(obs.shape[0] / self._num_adapted_models * (self.meta_batch_size - self._num_adapted_models))
            obs = np.concatenate([obs, np.zeros((pad,) + obs.shape[1:])], axis=0)
            act = np.concatenate([act, np.zeros((pad,) + act.shape[1:])], axis=0)
            if obs_next is not None:
                obs_next = np.concatenate([obs_next, np.zeros((pad,) + obs_next.shape[1:])], axis=0)
        # 如果适应模型的数量小于元批次大小，则填充输入数据

        if obs_next is not None:
            return obs, act, obs_next
        else:
            return obs, act


    def adapt(self, obs, act, obs_next):
        # 适应（训练）动态模型，使其适应新的观测值、动作和下一个状态

        self._num_adapted_models = len(obs)
        assert len(obs) == len(act) == len(obs_next)
        obs = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs], axis=0)
        act = np.concatenate([np.concatenate([a, np.zeros_like(a)], axis=0) for a in act], axis=0)
        obs_next = np.concatenate([np.concatenate([ob, np.zeros_like(ob)], axis=0) for ob in obs_next], axis=0)

        obs, act, obs_next = self._pad_inputs(obs, act, obs_next)
        assert obs.shape[0] == act.shape[0] == obs_next.shape[0]
        assert obs.ndim == 2 and obs.shape[1] == self.obs_space_dims
        assert act.ndim == 2 and act.shape[1] == self.action_space_dims
        assert obs_next.ndim == 2 and obs_next.shape[1] == self.obs_space_dims
        # 合并输入数据以适应模型的数量，并进行断言，确保输入数据满足要求

        if self.normalize_input:
            # Normalize data
            obs, act, delta = self._normalize_data(obs, act, obs_next)
            assert obs.ndim == act.ndim == obs_next.ndim == 2
        else:
            delta = obs_next - obs
        # 如果需要归一化输入，则进行数据归一化处理，计算delta值；否则，直接计算delta值

        self._prev_params = [nn.get_param_values() for nn in self._networks]

        sess = tf.get_default_session()
        self._adapted_param_values = sess.run(self._adapted_params[:self._num_adapted_models],
                                            feed_dict={self.obs_ph: obs, self.act_ph: act, self.delta_ph: delta})
        # 运行session，执行适应操作，并将适应后的参数值保存


    def switch_to_pre_adapt(self):
        """
        切换到预适应模式的方法。
        如果self._prev_params不为None，将使用之前保存的参数恢复网络模型。
        然后将self._prev_params和self._adapted_param_values设置为None。
        """
        if self._prev_params is not None:
            [nn.set_params(params) for nn, params in zip(self._networks, self._prev_params)]
            self._prev_params = None
            self._adapted_param_values = None

    def _get_batch(self, train=True):
        """获取训练或测试数据的批次方法。
        参数train表示是否获取训练数据，如果train为True，将从self._dataset_train中获取数据；
        如果train为False，将从self._dataset_test中获取数据。
        """
        if train:
            num_paths, len_path = self._dataset_train['obs'].shape[:2]
            idx_path = np.random.randint(0, num_paths, size=self.meta_batch_size)
            idx_batch = np.random.randint(self.batch_size, len_path - self.batch_size, size=self.meta_batch_size)

            obs_batch = np.concatenate([self._dataset_train['obs'][ip,
                                        ib - self.batch_size:ib + self.batch_size, :]
                                        for ip, ib in zip(idx_path, idx_batch)], axis=0)
            act_batch = np.concatenate([self._dataset_train['act'][ip,
                                        ib - self.batch_size:ib + self.batch_size, :]
                                        for ip, ib in zip(idx_path, idx_batch)], axis=0)
            delta_batch = np.concatenate([self._dataset_train['delta'][ip,
                                          ib - self.batch_size:ib + self.batch_size, :]
                                          for ip, ib in zip(idx_path, idx_batch)], axis=0)

        else:
            num_paths, len_path = self._dataset_test['obs'].shape[:2]
            idx_path = np.random.randint(0, num_paths, size=self.meta_batch_size)
            idx_batch = np.random.randint(self.batch_size, len_path - self.batch_size, size=self.meta_batch_size)

            obs_batch = np.concatenate([self._dataset_test['obs'][ip,
                                        ib - self.batch_size:ib + self.batch_size, :]
                                        for ip, ib in zip(idx_path, idx_batch)], axis=0)
            act_batch = np.concatenate([self._dataset_test['act'][ip,
                                        ib - self.batch_size:ib + self.batch_size, :]
                                        for ip, ib in zip(idx_path, idx_batch)], axis=0)
            delta_batch = np.concatenate([self._dataset_test['delta'][ip,
                                          ib - self.batch_size:ib + self.batch_size, :]
                                          for ip, ib in zip(idx_path, idx_batch)], axis=0)
        return obs_batch, act_batch, delta_batch

    def _normalize_data(self, obs, act, obs_next=None):
        """对观测数据、动作数据和下一步观测数据进行归一化处理的方法。

        参数obs表示观测数据，act表示动作数据，obs_next表示下一步观测数据（可选）。
        将观测数据和动作数据按照预先计算的均值和标准差进行归一化处理。
        如果obs_next不为None，则计算下一步观测数据与当前观测数据的差值，并对差值也进行归一化处理。
        返回归一化后的观测数据、动作数据和差值（如果obs_next不为None）。
        """        
        obs_normalized = normalize(obs, self.normalization['obs'][0], self.normalization['obs'][1])
        actions_normalized = normalize(act, self.normalization['act'][0], self.normalization['act'][1])

        if obs_next is not None:
            delta = obs_next - obs
            deltas_normalized = normalize(delta, self.normalization['delta'][0], self.normalization['delta'][1])
            return obs_normalized, actions_normalized, deltas_normalized
        else:
            return obs_normalized, actions_normalized

    def compute_normalization(self, obs, act, obs_next):
        """计算数据归一化的均值和标准差的方法。

        参数obs表示观测数据，act表示动作数据，obs_next表示下一步观测数据。
        检查输入数据的形状是否满足要求。
        计算观测数据和差值的均值和标准差，并存储在self.normalization字典中。
        """
        assert obs.shape[0] == obs_next.shape[0] == act.shape[0]
        assert obs.shape[1] == obs_next.shape[1] == act.shape[1]
        delta = obs_next - obs

        assert delta.ndim == 3 and delta.shape[2] == obs_next.shape[2] == obs.shape[2]

        # store means and std in dict
        self.normalization = OrderedDict()
        self.normalization['obs'] = (np.mean(obs, axis=(0, 1)), np.std(obs, axis=(0, 1)))
        self.normalization['delta'] = (np.mean(delta, axis=(0, 1)), np.std(delta, axis=(0, 1)))
        self.normalization['act'] = (np.mean(act, axis=(0, 1)), np.std(act, axis=(0, 1)))

    def _adapt_sym(self, loss, params_var):
        """通过符号执行进行模型参数适应的方法。

        参数loss表示损失函数，params_var表示模型的参数变量。
        根据损失函数计算梯度，并进行梯度下降来得到适应后的模型参数。
        返回适应后的模型参数字典。
        """
        update_param_keys = list(params_var.keys())

        grads = tf.gradients(loss, [params_var[key] for key in update_param_keys])
        gradients = dict(zip(update_param_keys, grads))

        # Gradient descent
        adapted_policy_params = [params_var[key] - tf.multiply(self.inner_learning_rate, gradients[key])
                          for key in update_param_keys]

        adapted_policy_params_dict = OrderedDict(zip(update_param_keys, adapted_policy_params))

        return adapted_policy_params_dict

    def _create_placeholders_for_vars(self, vars):
        """创建与给定变量相同形状的占位符的方法。

        参数vars是一个变量字典，将使用它的形状来创建对应的占位符。
        返回占位符的字典。
        """
        placeholders = OrderedDict()
        for key, var in vars.items():
            placeholders[key] = tf.placeholder(tf.float32, shape=var.shape, name=key + '_ph')
        return OrderedDict(placeholders)

    @property
    def network_params_feed_dict(self):
        """获取网络参数的喂食字典的属性。

        返回网络参数的喂食字典，用于喂入模型中的网络参数变量。
        """
        return dict(list((self.network_phs_meta_batch[i][key], self._adapted_param_values[i][key])
                         for key in self._adapted_param_values[0].keys() for i in range(self._num_adapted_models)))

    def __getstate__(self):
        """获取对象序列化状态的方法。

        返回包含对象状态信息的字典。
        """
        state = dict()
        state['init_args'] = Serializable.__getstate__(self)
        state['normalization'] = self.normalization
        state['networks'] = [nn.__getstate__() for nn in self._networks]
        return state

    def __setstate__(self, state):
        """设置对象序列化状态的方法。

        参数state是对象状态信息的字典。
        根据字典中的信息设置对象的状态。
        """
        Serializable.__setstate__(self, state['init_args'])
        self.normalization = state['normalization']
        for i in range(len(self._networks)):
            self._networks[i].__setstate__(state['networks'][i])


def normalize(data_array, mean, std):
    """将数据进行归一化处理的函数。

    参数data_array表示要归一化的数据，mean表示均值，std表示标准差。
    返回归一化后的数据。
    """
    return (data_array - mean) / (std + 1e-10)


def denormalize(data_array, mean, std):
    """将数据进行反归一化处理的函数。

    参数data_array表示要反归一化的数据，mean表示均值，std表示标准差。
    返回反归一化后的数据。
    """
    return data_array * (std + 1e-10) + mean


def train_test_split(obs, act, delta, test_split_ratio=0.2):
    """将数据集划分为训练集和测试集的函数。

    参数obs表示观测数据，act表示动作数据，delta表示差值数据，test_split_ratio表示测试集比例（默认为0.2）。
    首先检查输入数据的形状是否满足要求。
    然后随机打乱数据集，并根据测试集比例划分为训练集和测试集。
    返回训练集和测试集的观测数据、动作数据和差值数据。
    """
    assert obs.shape[0] == act.shape[0] == delta.shape[0]
    dataset_size = obs.shape[0]
    indices = np.arange(dataset_size)
    np.random.shuffle(indices)
    split_idx = int(dataset_size * (1-test_split_ratio))

    idx_train = indices[:split_idx]
    idx_test = indices[split_idx:]
    assert len(idx_train) + len(idx_test) == dataset_size

    return obs[idx_train, :], act[idx_train, :], delta[idx_train, :], \
           obs[idx_test, :], act[idx_test, :], delta[idx_test, :]
