from learning_to_adapt.utils.serializable import Serializable


class Policy(Serializable):
    # 策略类
    def __init__(self, env):
        Serializable.quick_init(self, locals())
        self.env = env
        while hasattr(self.env, 'wrapped_env'):
            self.env = self.env.wrapped_env

    def get_action(self, observation):
        #获取单个动作
        raise NotImplementedError

    def get_actions(self, observations):
        # 获取多个动作
        raise NotImplementedError

    def reset(self, dones=None):
        # 重置策略
        pass

    @property
    def vectorized(self):
        """
        策略是否可以向量化
        Indicates whether the policy is vectorized. If True, it should implement get_actions(), and support resetting
        with multiple simultaneous states.
        """
        return False

    @property
    def observation_space(self):
        return self.env.observation_space

    @property
    def action_space(self):
        return self.env.action_space

    @property
    def recurrent(self):
        """
        策略是否具有记忆性
        Indicates whether the policy is recurrent.
        :return:
        """
        return False

    def log_diagnostics(self, paths, prefix=''):
        """
        在每次迭代中根据收集的路径记录附加信息。
        Log extra information per iteration based on the collected paths
        """
        pass

    @property
    def state_info_keys(self):
        """
        与策略状态相关的信息的键
        Return keys for the information related to the policy's state when taking an action.
        :return:
        """
        return [k for k, _ in self.state_info_specs]

    @property
    def state_info_specs(self):
        """
        Return keys and shapes for the information related to the policy's state when taking an action.
        :return:
        """
        return list()

    def terminate(self):
        """
        完成清理操作
        Clean up operation
        """
        pass
