/*
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.javlov;

/**
 * An {@code Option} represents a temporally extended action. It is possibly
 * hierarchical, i.e. it can consist of other options.
 * 
 * Primitive actions are represented by the {@code Action} interface, which extends
 * this interface. Therefore, {@code Action}s are just a special case of
 * {@code Option}s: namely, one-step options.
 * 
 * See for further details on the ideas and algorithms behind options:
 * 
 * Sutton, Precup, and Singh (1999). Between MDPs and Semi-MDPs: A Framework
 * for Temporal Abstraction in Reinforcement Learning.
 * Artificial Intelligence 112, pp 181--211.
 *
 * 
 * Since options, like agents, have a policy to pick actions, and can learn
 * in order to improve that policy, options can be considered as special cases
 * of agents, and are implemented as such in Javlov. This design gives rise to
 * a natural decomposition of higher-level agents into lower-level agents,
 * and also allows for lower-level agents/options to execute in a different
 * environment from the higher-level agent(s), possibly with a different
 * state representation and reward function.
 * 
 * In Javlov, the only actions that can be executed on {@Environment}s are
 * primitive actions. At each timestep, the primitive action selected by the
 * {@code Option}, or one of its lower-level {@code Option}s, can be obtained
 * through the {@link #doStep(State, double)} (when continuing the {@code Option},
 * i.e., when the option was also executing in the previous timestep) or
 * {@link #firstStep(State)} (when starting to execute the option)
 * methods of the {@code Agent} interface.
 * 
 * @author Matthijs Snel
 *
 */
public interface Option<T> extends Agent<T> {

	/**
	 * Returns the termination probability of this option in state s. For
	 * primitive actions, this method always returns 1.
	 * 
	 * Note that {@code beta(s)} may be a fixed probability, but may also
	 * depend on the option's execution history, and/or on the termination
	 * status of any lower-level options. 
	 * 
	 * @param s the state for which the termination probability of this option is
	 * queried
	 * @return the termination probability
	 */
	double getBeta(State<? extends T> s);
	
	/**
	 * Get the option ID.
	 * 
	 * @return the id
	 */
	int getID();
	
	/**
	 * Indicates whether this option is eligible (can be initiated) in state
	 * {@code s}.
	 * 
	 * @param s the state
	 * @return {@code true} if this option can be initiated in {@code s},
	 * {@code false} otherwise
	 */
	boolean isEligible(State<? extends T> s);
	
	/**
	 * Indicates whether this option has finished executing. For primitive actions,
	 * this method always returns {@code true}.
	 * 
	 * An option can finish executing for two reasons:
	 * 
	 * 1. {@link #setFinished()} has been called; this usually occurs when option
	 * execution has been interrupted;
	 * 
	 * 2. The option has terminated during execution of {@link #doStep(State, double)}.
	 * If this happens, {@code doStep} will return {@code null} (see also
	 * {@link #setFinished()}).
	 * 
	 * @return {@code true} if the options has finished executing, {@code false}
	 * otherwise
	 */
	boolean isFinished();
	
	/**
	 * Tell the option it has finished executing, e.g. when interrupting
	 * one option to pick another.
	 * 
	 * Typically, after having terminated, the option may clean up its
	 * history (for semi-markov options), and will enter the "finished"
	 * state. While finished, calling {@link #doStep(State, double)} will
	 * return null. Option execution may be resumed by calling
	 * {@link #firstStep(State)}.
	 */
	void setFinished();
	
	/**
	 * Set the option ID.
	 * 
	 * @param id
	 */
	void setID(int id);
	
	/**
	 * Method for intra-option learning. If this option would also choose option
	 * {@code o} in state {@code s}, it updates its current value estimate of that
	 * option by {@code update}.
	 * 
	 * See for further details:
	 * 
	 * Sutton, Precup, and Singh (1999). Between MDPs and Semi-MDPs: A Framework
	 * for Temporal Abstraction in Reinforcement Learning.
	 * Artificial Intelligence 112, pp 181--211.
	 * 
	 * @param s
	 * @param o
	 * @param update
	 */
	void update(State<? extends T> s, Option<T> o, double update);

}
