/**
 * 
 */
package fr.utbm.gi.vi51.slizers.framework.learning;

import java.util.List;

/**
 * Interface of a Q-Learning problem.
 * 
 * @author Tarik Belabbas
 * 
 */
public interface QProblem< StateType extends QState, ActionType extends QAction >
{
	/**
	 * Returns the learning rate, alpha.
	 * 
	 * @return the learning rate
	 */
	public float getAlphaLearningRate();
	
	/**
	 * Returns the discount rate, gamma.
	 * 
	 * @return the discount rate
	 */
	public float getGammaDiscountRate();
	
	/**
	 * Returns rho.
	 * 
	 * @return rho
	 */
	public float getRho();
	
	/**
	 * Returns nu.
	 * 
	 * @return nu
	 */
	public float getNu();
	
	/**
	 * Returns the current state of the problem.
	 * 
	 * @return the current state of the problem
	 */
	public StateType getCurrentState();

	/**
	 * Returns a random state of the problem.
	 * 
	 * @return a random state of the problem
	 */
	public StateType getRandomState();
	
	/**
	 * Sets the current state of the problem.
	 * 
	 * @param _state the new state of the problem
	 */
	public void setCurrentState(StateType _state);
	
	/**
	 * Returns all the available states.
	 * 
	 * @return the available states
	 */
	public List< StateType > getAvailableStates();

	/**
	 * Returns all the available actions.
	 * 
	 * @return the available actions
	 */
	public List< ActionType > getAvailableActionsFor( StateType _state );
	
}
