package fr.utbm.gi.vi51.slizers.framework.learning;

import java.util.List;
import java.util.Map;
import java.util.Random;

/**
 * Implementation of a Q-learning problem solver.
 * 
 * @author Tarik Belabbas
 * 
 */
public abstract class QLearner<StateType extends QState, ActionType extends QAction>
{
	/**
	 * The problem for which the learning is performed.
	 */
	private final QProblem<StateType,ActionType>		problem;

	/**
	 * The store for states and their associated actions/Q-values pairs.
	 */
	private QValueStore<StateType,ActionType>	store;

	/**
	 * Parameterized constructor.
	 * 
	 * @param _problem the problem for which the learning is performed
	 */
	public QLearner( QProblem< StateType, ActionType > _problem )
	{
		this.problem = _problem;
		this.store = new QValueStore<StateType,ActionType>( _problem );
	}
	
	/**
	 * Parameterized constructor.
	 * 
	 * @param _problem the problem for which the learning is performed
	 * @param _qvaluestore the Q-value store
	 */
	public QLearner( QProblem< StateType, ActionType > _problem,
			QValueStore< StateType, ActionType > _qvaluestore )
	{
		this.problem = _problem;
		
		if ( _qvaluestore != null )
		{
			this.store = new QValueStore<StateType,ActionType>( _problem, _qvaluestore );
		}
		else
		{
			this.store = new QValueStore<StateType,ActionType>( _problem );
		}
	}

	/**
	 * Chooses the next action to take
	 * @return next action the creature will take
	 */
	public ActionType chooseAction()
	{
		Random random = new Random();

		// Get a starting state
		StateType state = this.problem.getCurrentState();

		// Pick a new randomly chosen state every once (or even twice) in a
		// while
		if ( random.nextFloat() < this.problem.getNu() )
		{
			state = this.problem.getRandomState();
		}

		// Should we use a random action ?
		ActionType action;
		if ( random.nextFloat() < this.problem.getRho() )
		{
			// Get the list of available actions
			List< ActionType > actions = ( List< ActionType > ) this.problem
					.getAvailableActionsFor( state );
			action = actions.get( random.nextInt( actions.size() ) );
		}
		else
		{
			action = this.store.getBestAction( state, random );
		}
		
		return action;

	}
	
	public Map<Float, ActionType> getBestActions(StateType _state) {
		return store.getBestActions(_state);
	}
	
	/**
	 * Takes into account the ActionResult (new state & reward) obtained 
	 * by taking action 'action' in state 'lastState'.
	 * 
	 * @param lastState previous live iteration state
	 * @param action last action taken
	 * @param result new state and reward gotten from that action in the last state
	 * @return 
	 */
	public StateType learn(StateType lastState, ActionType action, QActionResult<StateType,ActionType> result)
	{
		assert ( result != null );

		Random random = new Random();

		// Get the current QValue from the store
		float currentQValue = this.store.getQValue( lastState, action );

		// Get the QValue of the best action from the new state
		StateType newState = result.getState();
		float maxQValue = this.store.getQValue( newState,
				this.store.getBestAction( newState, random ) );

		// Perform the QLearning, compute the new QValue
		float newQValue = ( 1.0f - this.problem.getAlphaLearningRate() )
				* currentQValue
				+ this.problem.getAlphaLearningRate()
				* ( result.getReward() + this.problem
						.getGammaDiscountRate() * maxQValue );

		// Store the new QValue
		this.store.storeQValue( lastState, action, newQValue );

		// Set the new state as the current state
		lastState = newState;
		
		this.problem.setCurrentState( newState );
		return lastState;
	}
	
	/**
	 * Returns the Q-value store.
	 * 
	 * @return the Q-value store.
	 */
	protected QValueStore< StateType, ActionType> getQValueStore()
	{
		return this.store;
	}
	
	/**
	 * Computes the reward associated to a state, an action and the resulting
	 * new state.
	 * 
	 * @param lastState the last state
	 * @param action the action taken
	 * @return
	 */
	public abstract float computeReward(StateType lastState, ActionType action );
}
