package kibahed.uni.mdp.calculation;

import java.text.DecimalFormat;
import java.util.*;

public class QLearning implements IFMDPAlgorithm {

	private Random random;

	private MDP_Data mdpData;
	private double epsilon;
	private double alpha;
	private int iterations;
	private int[] visitCount;
	private int size;
	private double[] bestQValues;
	private int[] policy;
	private int startPosition;
	private Map<Integer, List<Double>> Q;

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		MDP_Data data = new MDP_Data(5, 5, 0.1, 1, 0.9,false);
		QLearning q = new QLearning(1 , 10000000);
		q.setMDP_Data(data);
		q.qLearn(0);
		q.evaluatePolicy();
		q.printQValues();
	}

	public QLearning(MDP_Data mdpData, double epsilon, int iterations, int startPosition) {
		this.mdpData = mdpData;
		this.epsilon = epsilon;
		this.iterations = iterations;
		this.startPosition = startPosition;
	}
	
	public QLearning(double epsilon,  int iterations) {
		this.epsilon = epsilon;
		this.iterations = iterations;
	}

	@Override
	public void calculate() {
		qLearn(startPosition);
		evaluatePolicy();
		printQValues();
	}
	
	public void setMDP_Data(MDP_Data mdpData) {
		this.mdpData = mdpData;
	}

	public void qLearn(int startState) {
		size = mdpData.getRows() * mdpData.getCollumns();
		policy = new int[size];
		visitCount = new int[size];
		initQ();
		random = new Random();

		int state = startState;

		for (int i = 0; i < iterations; i++) {
			double rand = random.nextDouble();
			if (rand < epsilon) {
				state = doRandomStep(state);
	
			} else {
				state = doGreedyStep(state);
			}
		}

	}

	private void initQ() {
		Q = new HashMap<Integer, List<Double>>();
		for(int s = 0; s < size; s++) {
			List<Double> l = new ArrayList<Double>();
			for(int i = 0; i<5; i++) {
				l.add(0.);
			}
			Q.put(s,l);
		}
	}
	
	private int doGreedyStep(int state) {

		int nextState = 0;
		int action;
		double r;
		double maxQ = 0;
		double q_k;

		
		action = getActionWithMaxImmediateReward(state);
		nextState = getNextState(state, action);
		r = getMaxImmediateReward(state);
		maxQ = maxQ(nextState);
		q_k = getQ(state, action);

		double q_k1;
		
		visitCount[state]++;
		
		alpha = 1 / ((double)(visitCount[state]+1));
		
		q_k1 = alpha * (r + mdpData.getDiscount()* maxQ) + (1 - alpha) * q_k;

		setQ(state, action, q_k1);

		return nextState;
	}

	private int doRandomStep(int state) {
		
		int nextState = 0;
		int action;
		double r;
		double maxQ = 0;
		double q_k;
		double[] rewards = mdpData.getRewardsPerActionAndState()[state];
		action = random.nextInt(5);
		nextState =  getNextState(state, action);
		if(nextState == -1) {
			nextState = state;
		}
		// wrong
		r = rewards[action];
		
		maxQ = maxQ(nextState);
		q_k = getQ(state, action);

		double q_k1;


		visitCount[state]++;
		alpha = 1 / ((double)(visitCount[state]+1));
		
		q_k1 = alpha * (r + mdpData.getDiscount()* maxQ) + (1 - alpha) * q_k;
		
		setQ(state, action, q_k1);

		return nextState;
	
	}

	@SuppressWarnings("unused")
	private int getStateWithMaxImmediateReward(int state) {

		int nextState = 0;
		double max = -10;
		int bestAction = 0;
		double[] rewards = mdpData.getRewardsPerActionAndState()[state];

		for (int a = 0; a < rewards.length; a++) {
			if (rewards[a] > max) {
				max = rewards[a];
				bestAction = a + 1;
			}
		}

		Map<Integer, Integer> test = mdpData.getActionNeighbourMap(state);
		nextState = test.get(bestAction);
		if(nextState == -1) {
			nextState = state;
		}
		return nextState;
	}

	private int getActionWithMaxImmediateReward(int state) {

		double max = Double.MIN_VALUE;
		int bestAction = 0;
		double[] rewards = mdpData.getRewardsPerActionAndState()[state];

		for (int a = 0; a < rewards.length; a++) {
			if (rewards[a] > max) {
				max = rewards[a];
				bestAction = a;
			}
		}

		return bestAction;
	}

	private double getMaxImmediateReward(int state) {
		double max = -10;
		double[] rewards = mdpData.getRewardsPerActionAndState()[state];

		for (int a = 0; a < rewards.length; a++) {
			if (rewards[a] > max) {
				max = rewards[a];
			}
		}

		return max;
	}

	private double maxQ(int state) {
		return Collections.max(Q.get(state));
	}

	private void setQ(int state, int action, double value) {
		Q.get(state).set(action, value);
	}

	private double getQ(int state, int action) {
		return Q.get(state).get(action);
	}

	public void evaluatePolicy() {
		bestQValues = new double[size];
		for (int s = 0; s < size; s++) {
			policy[s] = Q.get(s).indexOf(maxQ(s));
			bestQValues[s] = maxQ(s);
		}
	}

	private void printQValues() {
		DecimalFormat df = new DecimalFormat();
		df.setMaximumFractionDigits(2);

		System.out.println("\n\n\n Policy evaluated with q-Learning");

		for (int i = 0; i < mdpData.getRows(); i++) {
			System.out
					.println("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n");
			for (int j = 0; j < mdpData.getCollumns(); j++) {
				int index = i * mdpData.getCollumns() + j;
				System.out.print(" | "
						+ mdpData.getActions().get(policy[index] + 1) + " " +
						df.format(Collections.max(Q.get(index))));

			}
		}
	}
	
	private int getNextState(int state, int action) {

		double sum = 0;
		int nextState = 0;
		double[] probs = mdpData.getProbabilitiesPerStateAndAction()[state];

		double p_a = probs[action];
		double p_na = (1 - p_a) / 4;
		
		if(p_a == 0) {
			p_a = 1/5.;
			p_na = 1/5.;
		}
		
		double r = random.nextDouble();
		double[] helps = new double[5];

		for(int i = 0; i < 5; i++) {
			if(i==action) {
				helps[i] = p_a;
			} else {
				helps[i] = p_na;
			}
		}
		
		for (int a = 0; a < 5; a++) {
			sum += helps[a];
			if (sum > r) {
				nextState = mdpData.getActionNeighbourMap(state).get(a + 1);
				break;
			}
		}

		if (nextState < 0) {
			nextState = state;
		}
		return nextState;
	}

	@Override
	public int[] getEvaluatedPolicy() {
		return policy;
	}

	@Override
	public double[] getEvaluatedValues() {
		return bestQValues;
	}


}
