package roborl;

import java.util.Random;
import java.util.Vector;

public class Decision {

	ValueApproximate approximate;
	Rewards rewards;

	public Decision(ValueApproximate approximate, Rewards rewards) {
		this.approximate = approximate;
		this.rewards = rewards;
	}

	Random random = new Random();

	public synchronized Action getAction(RobotState state, double rnd) {
		return getAction(state, rnd, null);
		// return null;
	}

	public synchronized Action getAction(RobotState state, double rnd,
			Vector<Action> as) {
		if (as == null)
			as = approximate.actionSet;

		if (random.nextDouble() < rnd) {
			System.out.print(" R ");
			return as.get((int) (as.size() * random.nextDouble()));

		} else {
			System.out.print("<<" + as + ">> ");
			return approximate.getBestAction(state, as);
		}
		// return null;
	}

	public synchronized double learn(RobotState previous, RobotState current,
			Action action, double rate, double gamma) {
		double Qo = approximate.getValue(previous, action);
		double Qc = approximate.getBestValue(current);
		double reward = rewards.getReward(previous, current);
		double Qn = Qo + rate * (reward + gamma * Qc - Qo);
		// approximate.updateValue(previous, action, Qn);
		if (reward > 20)
			System.currentTimeMillis();
		approximate.updateValue(previous, action, Qn);

		System.out.println("Learn ->" + previous + " " + action + " " + reward);

		return reward;
	}

	public synchronized ValueApproximate getApproximate() {
		return approximate;
	}

	public synchronized void setApproximate(ValueApproximate approximate) {
		this.approximate = approximate;
	}
}
