package net.javlov;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import net.javlov.util.ArrayUtil;

public class IntraStateSMDPSarsaAgent extends SarsaAgent {
	
	//TODO temp solution for maxing over allowed actions
	/**
	 * List of allowed actions. Index of actions in the list should correspond to their
	 * ID.
	 */
	protected List<? extends Option> optionPool;
	
	protected Map<State,double[]> stateCumDiscountReward;
	
	protected boolean qlearn;
	
	/**
	 * Constructs agent with given Q-value function and gamma = 0.9.
	 * @param q the value function to use.
	 */
	public IntraStateSMDPSarsaAgent(QFunction q, List<? extends Option> options, boolean qlearn) {
		this(q, 0.9, options, qlearn);
	}
	
	/**
	 * Constructs agent with given Q-value function and gamma.
	 * @param q the value function to use.
	 * @param gamma the discountfactor gamma e [0, 1].
	 */
	public IntraStateSMDPSarsaAgent(QFunction q, double gamma, List<? extends Option> options, boolean qlearn) {
		super(q,gamma);
		this.qlearn = qlearn;
		optionPool = options;
		stateCumDiscountReward = new HashMap<State,double[]>(35);
		setSMDPMode(true);
	}
	
	@Override
	protected <T> void doLearnStep(State<T> s, double reward, double[] qvalues) {
		if ( SMDPMode ) {
			updateStateCumDiscountReward(s, reward);
			if ( currentOptionFinished ) {
				double[] val;
				State storedS;
				double tderr,
						qval = (qlearn ? getMaxVal(s,qvalues) : qvalues[currentOption.getID()]);
				for ( Map.Entry<State, double[]> e : stateCumDiscountReward.entrySet() ) {
					val = e.getValue();
					storedS = e.getKey();
					tderr = val[1] + val[0]*qval - q.getValue(storedS, currentOption);
					updateValueFunction(storedS, qvalues, tderr );
				}
				stateCumDiscountReward.clear();
				currentOptionFinished = false;
			}
		}
		else {
			updateValueFunction(s, qvalues, getTDError(s, reward, qvalues) );
		}
	}

	protected <T> void updateValueFunction(State<T> s, State<T> currState, double[] qvalues, double TDError) {
		learnRate.visit(s, lastOption);
		q.update(s, lastOption, learnRate.get(s,lastOption)*TDError);
		lastQValue = qvalues[currentOption.getID()];
		lastState = currState;
	}
	
	protected <T> void updateStateCumDiscountReward(State<T> s, double reward) {
		if ( !stateCumDiscountReward.containsKey(s) )
			stateCumDiscountReward.put(s, new double[]{1,0});
		for ( double[] val : stateCumDiscountReward.values() ) {
			val[0] *= gamma;
			val[1] += reward;
		}	
	}
	
	//TODO can this be sped up, e.g. only storing allowed options in the q-table
	protected <T> double getMaxVal(State<T> s, double[] qvalues) {
		List<? extends Option> stateOptions = getOptionSet(s);
		double maxVal;
		if ( stateOptions == null || stateOptions.size() == qvalues.length )
			maxVal = ArrayUtil.max(qvalues);
		else {
			maxVal = Double.NEGATIVE_INFINITY;
			double val;
			for ( Option o : stateOptions ) {
				val = qvalues[o.getID()];
				if ( val > maxVal )
					maxVal = val;
			}
		}
		return maxVal;
	}
	
	//TODO inefficient implementation of determining this
	protected <T> List<Option> getOptionSet(State<T> s) {
		List<Option> stateOptionSet = new ArrayList<Option>(optionPool.size());
		for ( Option o : optionPool )
			if ( o.isEligible(s) )
				stateOptionSet.add(o);
		
		if ( stateOptionSet.size() == 0 )
			throw new RuntimeException("No eligible options for state: " + s);
		
		return stateOptionSet;
	}

}
