/*
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.javlov.policy;

import java.util.List;
import java.util.Random;
import java.util.Set;

import net.javlov.Option;
import net.javlov.Policy;
import net.javlov.vf.QFunction;
import net.javlov.State;
import net.javlov.vf.OptionValues;
import net.javlov.vf.Value;

/**
 * Epsilon-greedy (e-greedy) policy. Chooses action with maximum Q-value with probability
 * 1 - e, and a (uniform) random action with probability e. This means that the greedy action
 * will be chosen with probability 1 - e + e/(nr of actions).
 *  
 * @author Matthijs Snel
 *
 */
public class EGreedyPolicy<T> implements Policy<T> {

	/**
	 * The Q-value function.
	 */
	private QFunction<T> q;
	
	/**
	 * Epsilon.
	 */
	protected double e;
	
	/**
	 * List of allowed actions. Index of actions in the list should correspond to their
	 * ID.
	 */
	protected List<? extends Option<? super T>> optionPool;
	
	/**
	 * Random number generator.
	 */
	private Random rng;
	
	/**
	 * Creates an epsilon-greedy policy with the specified Q-function, epsilon, and pool
	 * of allowed actions.
	 * 
	 * @param q the Q-value function.
	 * @param epsilon action with maximum Q-value is chosen with probability
	 * 1 - e, and a (uniform) random action with probability e.
	 * @param actions list of allowed actions. Index of actions in the list should correspond to their
	 * ID.
	 */
	public EGreedyPolicy(QFunction<T> q, double epsilon, List<? extends Option<? super T>> options) {
		setQFunction(q);
		setEpsilon(epsilon);
		optionPool = options;
		rng = new Random();
	}

	/**
	 * Creates an epsilon-greedy policy with the specified Q-function, epsilon, and pool
	 * of allowed actions.
	 * 
	 * @param q the Q-value function.
	 * @param epsilon action with maximum Q-value is chosen with probability
	 * 1 - e, and a (uniform) random action with probability e.
	 * @param actions list of allowed actions. Index of actions in the list should correspond to their
	 * ID.
	 * @param rng random number generator used to pick actions.
	 */
	public EGreedyPolicy(QFunction<T> q, double epsilon, List<? extends Option<? super T>> options, Random rng) {
		setQFunction(q);
		setEpsilon(epsilon);
		optionPool = options;
		this.rng = rng;
	}
	
	public void setQFunction(QFunction<T> q) {
		this.q = q;
	}

	public QFunction<T> getQFunction() {
		return q;
	}
	
	public double getEpsilon() {
		return e;
	}

	public void setEpsilon(double epsilon) {
		e = epsilon;
	}

	/**
	 * Chooses option with maximum Q-value (greedy option) with probability
	 * 1 - e, and a (uniformly distributed) random option with probability e. 
	 * This means that the greedy option will be chosen with probability
	 * {@code 1 - e + e/(nr of options)}. If there is more than one greedy option, ties
	 * are broken randomly.
	 * 
	 * @param s the state based on which to choose the option. The Q-value function will
	 * be queried first to determine the Q-values of the options for this state, after which
	 * the option will be determined by calling {@link #pickNewOption(State, double[])}.
	 * @return an {@code Option} chosen according to the rule as specified above.
	 */
	@Override
	public Option<T> getOption(State<? extends T> s) {
		return getOption(s, q.getValues(s));
	}
	
	@Override
	public Option<T> getOption(State<? extends T> s, OptionValues values) {
		List<? extends Option<?>> opts;
		//exploit and pick max action
		if ( rng.nextDouble() > e ) {
			opts = values.getMaxOptions();
			if ( opts.size() == 0 ) {
				System.out.println(values.max());
				throw new RuntimeException("Impossible: no best options for state " + s + ": " + values );
			}
		//explore and pick random action
		} else {
			opts = values.getOptions();
		}
		Option o = opts.get( rng.nextInt(opts.size()) );
		return o;
	}

	@Override
	public double[] getOptionProbabilities( State<? extends T> s, OptionValues values ) {
		List<? extends Option<?>> maxOpts = values.getMaxOptions();
		double probs[] = new double[values.size()],
				max = values.max(),
				baseprob =  e / values.size(),
				maxprob = (1 - e) / maxOpts.size() + baseprob;
		int i = 0;
		for ( Value v : values.getValues() ) {
			if ( v.get() >= max )
				probs[i++] = maxprob;
			else
				probs[i++] = baseprob;
		}
		return probs;
	}	
		
	@Override
	public void init() {
		// TODO Auto-generated method stub
		
	}

	@Override
	public void reset() {
		// TODO Auto-generated method stub
		
	}
}
