package nl.rug.ml.dobbelen.learning;

import java.util.Collection;
import java.util.Random;

/**
 * @author Do Bich Ngoc (bichngocdo89@gmail.com)
 *
 */
public class SoftMaxExploreStratergy extends ExploreStratergy {

	private static Random random = new Random();

	private double temperature = 10000;
	private int numIterators = 0;

	public SoftMaxExploreStratergy() {

	}

	public double getTemperature() {
		return temperature;
	}

	public void setTemperature(double temperature) {
		this.temperature = temperature;
	}

	private DobbelenAction chooseActionSoftMax(DobbelenState state) {
		Collection<DobbelenAction> actions = stateActionMap.get(state);
		double[] probabilities = new double[actions.size()];

		// Calculate soft max probabilities
		int k = 0;
		double sum = 0;
		for (DobbelenAction action : actions) {
			probabilities[k] = Math.exp(action.getValue() / temperature);
			sum += probabilities[k];
			k++;
		}
		for (int i = 0; i < probabilities.length; i++) {
			probabilities[i] = probabilities[i] / sum;
		}

		// Sample
		double r = random.nextDouble();
		double s = 0;
		for (DobbelenAction action : actions) {
			s += Math.exp(action.getValue() / temperature);
			if (s > r) {
				return action;
			}
		}

		return null;
	}

	@Override
	public DobbelenAction chooseAction(DobbelenState state) {
		DobbelenAction action = chooseActionSoftMax(state);
		numIterators++;
		if (numIterators % getUpdateStep() == 0) {
			temperature *= getUpdateRate();
		}
		return action;
	}

}
