/*
 * Javlov - a Java toolkit for reinforcement learning with multi-agent support.
 * 
 * Copyright (c) 2009 Matthijs Snel
 * 
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.javlov;

import java.util.Arrays;

/**
 * Agent implementing the Sarsa algorithm.
 * 
 * @author Matthijs Snel
 *
 */
//TODO! Now this implementation doesn't work with q-functions that are not tabular.
//TODO Doesn't work when using non-SMDP mode with options (i.e. update every step)
public class SarsaAgent extends TDAgent {

	/**
	 * The Q-value function.
	 */
	protected QFunction q;
	
	/**
	 * The value of the last state-action pair: Q(s_t, a_t).
	 */
	protected double lastQValue;
	
	/**
	 * Last option o_t
	 */
	protected Option lastOption;
	
	protected boolean learnV = false;
	
	/**
	 * Default constructor.
	 */
	public SarsaAgent() {}
	
	/**
	 * Constructs agent with given Q-value function and gamma = 0.9.
	 * @param q the value function to use.
	 */
	public SarsaAgent(QFunction q) {
		this(q, 0.9);
	}
	
	/**
	 * Constructs agent with given Q-value function and gamma.
	 * @param q the value function to use.
	 * @param gamma the discountfactor gamma e [0, 1].
	 */
	public SarsaAgent(QFunction q, double gamma) {
		setQFunction(q);
		setGamma(gamma);
	}
	
	@Override
	public <T> Action doStep( State<T> s, double reward ) {
		double[] qvalues = q.getValues(s);
		//pickAction also makes current option learn, if it is learnable option
		Action a = pickAction(s, reward, qvalues);
		
		if ( learning )
			doLearnStep(s, reward, qvalues);

		//System.out.println("s: " + s + ", q: " + Arrays.toString(qvalues) + ", a: " + a);
		return a;
	}

	@Override
	public <T> void lastStep(State<T> s, double reward) {
		//last learning step; will also terminate the option
		currentOption.lastStep(s, reward);
		currentOptionHasFinished();
		
		if ( learning )
			doLearnStep(s, reward, q.getValues(s));
		
		//System.out.println("s: " + s + ", q: " + Arrays.toString(q.getValues(s)));

	}
	
	protected <T> Action pickAction(State<T> s, double reward, double[] qvalues) {
		Action a = currentOption.doStep(s, reward);
		
		//if current option has finished, or agent decides to interrupt the current
		//option, pick a different option and return that option's first action.
		if ( currentOption.isFinished() || interruptCurrentOption(s, qvalues) ) {
			currentOptionHasFinished();
			currentOption = policy.getOption(s, qvalues);
			//learnRate.visit(s, currentOption);
			return currentOption.firstStep(s);
		}
		
		//otherwise, continue executing the policy of current option
		return a;
	}
	
	protected <T> void doLearnStep(State<T> s, double reward, double[] qvalues) {
		if ( SMDPMode ) {
			optionAccumulatedReward += optionDiscount*reward;
			optionDiscount *= gamma;
			if ( currentOptionFinished ) {
				if ( learnV )
					super.updateValueFunction(s, super.getSMDPTDError(s, reward) );
				updateValueFunction(s, qvalues, getSMDPTDError(s, reward, qvalues) );
				optionAccumulatedReward = 0;
				optionDiscount = 1;
				currentOptionFinished = false;

			}
		}
		else {
			if ( learnV )
				super.updateValueFunction(s, super.getTDError(s, reward) );
			updateValueFunction(s, qvalues, getTDError(s, reward, qvalues) );
		}
	}
	
	protected <T> void updateValueFunction(State<T> s, double[] qvalues, double TDError) {
		learnRate.visit(lastState, lastOption);
		q.update(lastState, lastOption, learnRate.get(lastState,lastOption)*TDError);
		lastQValue = qvalues[currentOption.getID()];
		lastState = s;
	}
	
	protected <T> double getTDError(State<T> s, double reward, double[] qvalues) {
		return reward + gamma*qvalues[currentOption.getID()] - lastQValue;
	}
	
	protected <T> double getSMDPTDError(State<T> s, double reward, double[] qvalues) {
		return optionAccumulatedReward + optionDiscount*qvalues[currentOption.getID()] - lastQValue;
	}
	
	@Override
	public <T> Action firstStep( State<T> s ) {
		currentOption = policy.getOption(s);
		Action a = currentOption.firstStep(s);
		//learnRate.visit(s,currentOption);
		
		lastQValue = q.getValue(s, currentOption);
		lastState = s;
		lastOption = currentOption;
		//System.out.println("s: " + s + ", q: " + Arrays.toString(q.getValues(s)) + ", a: " + a);
		return a;
	}
	
	@Override
	public void reset() {
		super.reset();
		lastQValue = 0;
		lastState = null;
		lastOption = null;
		q.reset();
	}
	
	@Override
	public void init() {
		super.init();
		lastQValue = 0;
		lastState = null;
		lastOption = null;
		q.init();
	}
	
	public QFunction getQFunction() {
		return q;
	}

	/**
	 * Sets the Q-Function to be the Q-function passed in as argument.
	 * 
	 * @param q the q-function
	 */
	public void setQFunction(QFunction q) {
		this.q = q;
	}
	
	public void setLearnStateValueFunction(boolean learnV) {
		this.learnV = learnV;
	}
	
	@Override
	protected void currentOptionHasFinished() {
		currentOptionFinished = true;
		lastOption = currentOption;
	}
	
	/**
	 * Decides whether the option the agent is currently executing should be interrupted. If
	 * {@link #interruptsOptions()} returns false, this method will always return false.
	 * 
	 * Otherwise, an option is interrupted if the value of the state under the current policy
	 * is higher than the Q-value of the current option.
	 * 
	 * @param s the state the agent is in
	 * @param qvalues the q-values for that state
	 * @return {@code true} if the current option should be interrupted. {@code false} otherwise.
	 */
	protected <T> boolean interruptCurrentOption(State<T> s, double[] qvalues) {
		if ( !interruptOptions )
			return false;
		double val = 0;
		double[] probs = policy.getOptionProbabilities(s, qvalues);
		for ( int i = 0; i < qvalues.length; i++ )
			val += probs[i]*qvalues[i];
		
		//System.out.println(" V: " + val + ", currQ: " + qvalues[currentOption.getID()] + ", allQ: " + Arrays.toString(qvalues));
		if ( val > qvalues[currentOption.getID()] ) {
			//System.out.println("Interrupting option for " + s);
			//System.out.println(" V: " + val + ", currQ: " + qvalues[currentOption.getID()] + ", allQ: " + Arrays.toString(qvalues));
			currentOption.setBeta(s,0.99);
			currentOption.setFinished();
			return true;
		}
		return false;
	}
}
