from learning_to_adapt.samplers.base import SampleProcessor
from learning_to_adapt.utils import tensor_utils
import numpy as np
from learning_to_adapt.logger import logger
import tensorflow as tf
class ModelSampleProcessor(SampleProcessor):
    def __init__(
            self,
            baseline=None,
            discount=0.99,
            gae_lambda=1,
            normalize_adv=False,
            positive_adv=False,
            recurrent=False
    ):
        self.baseline = baseline
        self.discount = discount
        self.gae_lambda = gae_lambda
        self.normalize_adv = normalize_adv
        self.positive_adv = positive_adv
        self.recurrent = recurrent

    def process_samples(self, paths, log=False, log_prefix=''):
        """ 
        与标准采样器相比，ModelBaseSampler.process_samples方法提供了3个额外的数据字段：

            observations_dynamics（动态观测）
            next_observations_dynamics（下一步动态观测）
            actions_dynamics（动态动作）

        因为动力学模型在训练过程中需要（观测，动作，下一步观测）这样的数据，observations_dynamics和actions_dynamics会跳过路径的最后一步，而next_observations_dynamics会跳过路径的第一步。
        """
        # 处理样本数据的函数
        # logger.log(type(paths[0]["observations"]))
        assert len(paths) > 0
        recurrent = self.recurrent
        # 计算折扣回报
        returns = []
        for idx, path in enumerate(paths):
            path["returns"] = tensor_utils.discount_cumsum(path["rewards"], self.discount)
            returns.append(path["returns"])

        # 记录统计信息（可选）
        self._log_path_stats(paths, log=log, log_prefix=log_prefix)

        # 将观测、动作和时间步连接起来，用于训练动力学模型
        observations_dynamics = tensor_utils.concat_tensor_list([path["observations"][:-1] for path in paths], recurrent)
        next_observations_dynamics = tensor_utils.concat_tensor_list([path["observations"][1:] for path in paths], recurrent)
        actions_dynamics = tensor_utils.concat_tensor_list([path["actions"][:-1] for path in paths], recurrent)
        timesteps_dynamics = tensor_utils.concat_tensor_list([np.arange((len(path["observations"]) - 1)) for path in paths])

        # 将奖励和回报连接起来
        rewards = tensor_utils.concat_tensor_list([path["rewards"][:-1] for path in paths], recurrent)
        returns = tensor_utils.concat_tensor_list([path["returns"] for path in paths], recurrent)
        # observations_dynamics = np.asarray([_[:30] for _ in observations_dynamics])
        # next_observations_dynamics = np.asarray([_[:30] for _ in next_observations_dynamics])
        # actions_dynamics = np.asarray([_[:30] for _ in actions_dynamics])
        # observations_dynamics  = tf.stack([tf.constant(_[:30]) for _ in observations_dynamics])
        logger.log(type(observations_dynamics))
        logger.log(type(observations_dynamics[0]))
        logger.log(type(observations_dynamics[0][0]))
        # logger.log(type(paths[0]))
        # logger.log(paths[0])
        logger.log(observations_dynamics)
        # 构建样本数据的字典
        samples_data = dict(
            observations=observations_dynamics,
            next_observations=next_observations_dynamics,
            actions=actions_dynamics,
            timesteps=timesteps_dynamics,
            rewards=rewards,
            returns=returns,
        )

        return samples_data
