public class SarsaLambda {

	public static int TRACE_LIMIT = 100;
	public static int TRACE_POINT = 0;
	public static int TRACE_SIZE = 0;
	public static double[][] STATE_TRACE;
	public static int[] ACTION_TRACE;
	public final double GAMMA = 1;
	public final double LAMBDA = 0.9;
	public final double EPSILON = 0.0;
	
	public static void main(String[] arg) {

		SarsaLambda sarsa = new SarsaLambda();
		double[] stats = new double[150];
		int numBatches = 50;
		int numberEpisodes = 150;
		int aveStepsPerBatch = 0;
		int stepsInLastEpisode =0;
		int aveStepsInLastEpisode =0;
		for (int j = 0; j < numBatches; j++) {
			sarsa.init();
			int stepsInAEpisode =0;
			for (int i = 0; i < numberEpisodes; i++) {
				stepsInLastEpisode = sarsa.run();
				stats[i] += stepsInLastEpisode; 
				stepsInAEpisode += stepsInLastEpisode;
			}
			aveStepsInLastEpisode += stepsInLastEpisode;
			aveStepsPerBatch += stepsInAEpisode;
		}
		aveStepsPerBatch = aveStepsPerBatch/numBatches;
		aveStepsInLastEpisode = aveStepsInLastEpisode/numBatches;
		SuperLearn.writeF("graph.csv");
		System.out.println(aveStepsPerBatch);
		System.out.println(aveStepsInLastEpisode);
		System.out.println("---------------------------------------");
		for(int i=0;i<150;i++){
			System.out.println(stats[i]/numBatches);
		}
	}

	private void init() {
		SuperLearn.reset();
		newEpisode();

	}

	public void newEpisode() {
		TRACE_LIMIT = 100;
		TRACE_POINT = 0;
		TRACE_SIZE = 0;

		STATE_TRACE = new double[TRACE_LIMIT][2];
		ACTION_TRACE = new int[TRACE_LIMIT];
	}

	/*
	 * This method runs one episode of the Party
	 */

	private int run() {
		double[] state = MountainCar.init();
		double[] nextState = null;
		int action;
		int count = 0;
		while (state != null) {
			
			action = maxAction(state);
			nextState = MountainCar.transition(state, action);
			double reward = MountainCar.reward(state, action, nextState);
			if (nextState != null) {
				addToTrace(action, state);
				double sigma = reward - SuperLearn.f(action, state)
						+ SuperLearn.f(maxAction(nextState), nextState);
				backtrace(sigma);
			}
			state = nextState;
			count++;
		}
		return count;
	}
	
	private void addToTrace(int action, double[] state){
		ACTION_TRACE[TRACE_POINT] = action;
		STATE_TRACE[TRACE_POINT] = state;
		
		TRACE_POINT++;
		TRACE_POINT %= TRACE_LIMIT;
		TRACE_SIZE += TRACE_SIZE == TRACE_LIMIT? 0: 1;
		
	}

	private void backtrace(double sigma) {
		int count = 0;
		int index = TRACE_POINT;
		double coefficient = 1;

		
		while ( count < TRACE_SIZE) {
			index--;
			index = index == -1 ? TRACE_LIMIT - 1 : index;
			SuperLearn.learn(ACTION_TRACE[index], STATE_TRACE[index], sigma
					* coefficient);

			count++;
			coefficient *= GAMMA * LAMBDA;
			
		}
	}

	private int maxAction(double[] state) {
		int action = 0;
		if(Math.random()>EPSILON){
			double q0 = SuperLearn.f(0, state);
			double q1 = SuperLearn.f(1, state);
			double q2 = SuperLearn.f(2, state);
			if (q0 < q1) {
				action = 1;
				if (q1 < q2) {
					action = 2;
				}
			} else if (q0 < q2) {
				action = 2;
			}
		}
		else{
			action = (int) Math.random()*3;
		}
		return action;
	}
}