package nl.rug.ml.dobbelen.learning;

import java.util.Collection;
import java.util.Collections;
import java.util.Random;

/**
 * @author Do Bich Ngoc (bichngocdo89@gmail.com)
 * 
 */
public class EGreedyExploreStratergy extends ExploreStratergy {

	private static Random random = new Random();

	private double epsilon = 0.9;
	private int numIterators = 0;

	public EGreedyExploreStratergy() {

	}

	public EGreedyExploreStratergy(double epsilon) {
		this.epsilon = epsilon;
	}

	public double getEpsilon() {
		return epsilon;
	}

	public void setEpsilon(double epsilon) {
		this.epsilon = epsilon;
	}

	private DobbelenAction chooseActionEGreddy(DobbelenState state) {
		Collection<DobbelenAction> actions = stateActionMap.get(state);
		if (random.nextDouble() < epsilon) {
			// Explore
			double r = random.nextDouble();
			double s = 0;
			for (DobbelenAction action : actions) {
				s += ((double) 1) / actions.size();
				if (s > r) {
					return action;
				}
			}
		}
		else {
			// Exploit
			return Collections.max(actions, DobbelenAction.DOBBELEN_ACTION_COMPARATOR);
		}
		return null;
	}

	@Override
	public DobbelenAction chooseAction(DobbelenState state) {
		DobbelenAction action = chooseActionEGreddy(state);
		numIterators++;
		if (numIterators % getUpdateStep() == 0) {
			epsilon *= getUpdateRate();
		}
		return action;
	}

}
