/*
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.javlov.policy;

import java.util.Collection;
import java.util.List;
import java.util.Random;

import net.javlov.Option;
import net.javlov.Policy;
import net.javlov.vf.OptionValues;
import net.javlov.vf.QFunction;
import net.javlov.vf.Value;
import net.javlov.State;

/**
 * Picks actions from Q-values according to a softmax distribution over the values:
 * 
 * {@code p(s,a) = exp(Q(s,a)) / sum_all_actions( exp(Q(s,a)) )}.
 * 
 * @author Matthijs Snel
 *
 */
//TODO code duplication in getOption and getOptionProbabilities
public class SoftmaxPolicy<T> implements Policy<T> {

	/**
	 * The Q-value function.
	 */
	private QFunction<T> q;
	
	/**
	 * List of allowed actions. Index of actions in the list should correspond to their
	 * ID.
	 */
	protected List<? extends Option<T>> optionPool;
	
	/**
	 * Random number generator.
	 */
	private Random rng;
	
	public SoftmaxPolicy(QFunction<T> qf, List<? extends Option<T>> options) {
		setQFunction(qf);
		optionPool = options;
		rng = new Random();
	}
	
	public void setQFunction(QFunction<T> q) {
		this.q = q;
	}

	public QFunction<T> getQFunction() {
		return q;
	}
	
	public double[] getOptionProbabilities( State<? extends T> s, OptionValues qvalues ) {	
		return getSoftmaxValues(qvalues.getValues());
	}

	protected Option<T> pickSoftmaxOption(List<? extends Option<T>> options, OptionValues qvalues) {
		double softmaxVals[] = getCumSoftmaxValues(qvalues.getValues()),
				r = rng.nextDouble();
		int i;
		for ( i = 0; i < softmaxVals.length; i++ )
			if (r < softmaxVals[i])
				break;
		return options.get(i);
	}

	@Override
	public Option<T> getOption(State<? extends T> s) {
		return pickSoftmaxOption(optionPool, q.getValues(s));
	}
	
	@Override
	public Option<T> getOption(State<? extends T> s, OptionValues qvalues) {
		return pickSoftmaxOption(optionPool, qvalues);
	}

	@Override
	public void init() {
		// TODO Auto-generated method stub
		
	}

	@Override
	public void reset() {
		// TODO Auto-generated method stub
		
	}
	
	public static double[] getSoftmaxValues(double[] values) {
		double	exp[] = new double[values.length],
				sum = 0;
		for ( int i = 0; i < values.length; i++ ) {
			exp[i] = Math.exp(values[i]);
			sum += exp[i];
		}
		for ( int i = 0; i < values.length; i++ )
			exp[i] /= sum;
		return exp;
	}
	public static double[] getSoftmaxValues(Collection<Value> values) {
		double	exp[] = new double[values.size()],
				sum = 0;
		int i = 0;
		for ( Value v : values ) {
			exp[i] = Math.exp(v.get());
			sum += exp[i++];
		}
		for ( i = 0; i < values.size(); i++ )
			exp[i] /= sum;
		return exp;
	}
	public static double[] getCumSoftmaxValues(Collection<Value> values) {
		double	exp[] = new double[values.size()],
				sum = 0;
		int i = 0;
		for ( Value v : values ) {
			exp[i] = Math.exp(v.get());
			sum += exp[i++];
		}
		exp[0] /= sum;
		for ( i = 1; i < values.size(); i++ )
			exp[i] = exp[i-1] + exp[i]/sum;
		return exp;
	}

}
