package ass2;
import java.util.AbstractMap.SimpleEntry;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Vector;
import java.lang.Math;


public class Predator extends Agent {

	protected Float[][][] Q;
	protected float Alpha;
	protected float Gamma;
	protected float Epsilon;
	protected String Algorithm;
	protected HashMap<SimpleEntry<State, Action>, Vector<Float> > Returns;
	protected Vector<SimpleEntry<State, Action>> SAHistory;
	protected HashMap<SimpleEntry<State, Action>, Float> N;
	protected HashMap<SimpleEntry<State, Action>, Float> D;
	protected float tau;

	public Predator(Location spawn, Environment env, float alpha, float gamma, float epsilon, String algorithm) {
		super(spawn, env);
		Alpha = alpha;
		Gamma = gamma;
		Epsilon = epsilon;
		Algorithm = algorithm;
		initializeQ();
		Returns = new HashMap<SimpleEntry<State, Action>, Vector<Float> >();
		SAHistory = new Vector<SimpleEntry<State, Action> >();
		N = new HashMap<SimpleEntry<State, Action>, Float>();
		D = new HashMap<SimpleEntry<State, Action>, Float>();
		tau = 1f;
	}

	private void initializeQ() {
		Q = new Float[11][11][5];
		for(int x = 0; x < 11; x++)
		{
			for(int y = 0; y < 11; y++)
			{
				for( int move = 0; move < 5; move++ )
				{
					if(x == 5 && y == 5)
						Q[x][y][move] = 0f; //TODO initiate with 15!
					else
						Q[x][y][move] = 15f; //TODO initiate with 15!
				}
			}
		}
	}

	@Override
	void move() {
		State s = Environment.getState();
		if(s.x == 5 && s.y == 5)
			return;
		if(Algorithm.equals("Q") )
			qMove();
		else if(Algorithm.equals("Sarsa") )
			sarsaMove();
		else if(Algorithm.equals("MCOnP") )
			mcOnPMove();
		else if(Algorithm.equals("MCOffP") )
			mcOffPMove();
	}

	private void mcOnPMove() {
		State s = Environment.getState();
		int moveNr = eGreedy(s);
		Action a = Tools.int2Action(moveNr);
		updateLocation(a);
		float r = Environment.getReward();
		SimpleEntry<State, Action> SApair = new SimpleEntry<State, Action>(s, a);
		if(!Returns.containsKey(SApair)) {
			Returns.put(SApair, new Vector<Float>());
		}
		for(SimpleEntry<State, Action> SApair_i : Returns.keySet()) {
			Returns.get(SApair_i).add(r);
		}
	}

	private void mcOffPMove() {
		State s = Environment.getState();
		int moveNr_Behavior = eGreedy(s);
		int moveNr_Estimation = greedy(s);
		if(moveNr_Behavior != moveNr_Estimation) {
			Returns = new HashMap<SimpleEntry<State, Action>, Vector<Float> >();
			SAHistory = new Vector<SimpleEntry<State, Action> >();
		}
		Action a = Tools.int2Action(moveNr_Behavior);
		updateLocation(a);
		float r = Environment.getReward();
		SimpleEntry<State, Action> SApair = new SimpleEntry<State, Action>(s, a);
		if(!Returns.containsKey(SApair)) {
			Returns.put(SApair, new Vector<Float>());
			SAHistory.add(SApair);
		}
		for(SimpleEntry<State, Action> SApair_i : Returns.keySet()) {
			Returns.get(SApair_i).add(r);
		}
	}

	public void finishEpisode() {
		if(Algorithm.equals("MCOnP")) {
			for(SimpleEntry<State, Action> SApair : Returns.keySet()) {
				float total = 0;
				Vector<Float> SAreturns = Returns.get(SApair);
				if(SAreturns.size() == 0)
					continue;
				for(Float r : SAreturns) {
					total += r;
				}
				total /= SAreturns.size();
				Q[SApair.getKey().x][SApair.getKey().y][Tools.action2int(SApair.getValue())] = total; 
			}
			Returns = new HashMap<SimpleEntry<State, Action>, Vector<Float> >(); 
		} else if(Algorithm.equals("MCOffP")) {
			for(int i = 0; i < SAHistory.size(); i++) {
				float w = 1;
				SimpleEntry<State, Action> SApair = SAHistory.elementAt(i);
				for(int j = i; j < SAHistory.size(); j++){
					w *= 1/(1-Environment.epsilon);	// 1/pi'(s_k, a_k)
				}
				if(N.containsKey(SApair)) { //if SApair is in N it is also in D
					N.put(SApair, N.get(SApair) + w * Returns.get(SApair).get(0)); // .get(0) = R_t because it was the first added since a != pi
					D.put(SApair, D.get(SApair) + w);
				} else {
					N.put(SApair, w * Returns.get(SApair).get(0)); // .get(0) = R_t because it was the first added since a != pi
					D.put(SApair, w);
				}
				Q[SApair.getKey().x][SApair.getKey().y][Tools.action2int(SApair.getValue())] = N.get(SApair) / D.get(SApair);
			}
			Returns = new HashMap<SimpleEntry<State, Action>, Vector<Float> >();
			N = new HashMap<SimpleEntry<State, Action>, Float>();
			D = new HashMap<SimpleEntry<State, Action>, Float>();
			SAHistory = new Vector<SimpleEntry<State, Action> >();
		}
	}

	private void qMove() {
		State s = Environment.getState();
		int moveNr = eGreedy(s); //softmax(s);
		Action action = Tools.int2Action(moveNr);
		updateLocation(action);
		
		State newState = Environment.getState();
		int reward = Environment.getReward();
		float oldQvalue = Q[s.x][s.y][moveNr];
		float maxNewStateValue =  Tools.max(Q[newState.x][newState.y]);
		float newQvalue = oldQvalue + Alpha*(reward+ Gamma*maxNewStateValue - oldQvalue);
		Q[s.x][s.y][moveNr] = newQvalue;
		//System.out.println("Curstate: " + s.toString() + " values... " + Q[s.x + 5][s.y + 5][0] + " " + Q[s.x + 5][s.y + 5][1]+ " " + Q[s.x + 5][s.y + 5][2]+ " " + Q[s.x + 5][s.y + 5][3]+ " " + Q[s.x + 5][s.y + 5][4]);
		//System.out.println("Action: " + move.toString());
	}

	private void sarsaMove() {
		State s = Environment.getState();
		int moveNr = eGreedy(s);
		Action action = Tools.int2Action(moveNr);
		updateLocation(action);
		State newState = Environment.getState();
		int reward = Environment.getReward();
		float oldQvalue = Q[s.x][s.y][moveNr];
		float newStateValue = Q[newState.x][newState.y][eGreedy(newState)];
		float newQvalue = oldQvalue + Alpha*(reward+ Gamma*newStateValue - oldQvalue);
		Q[s.x][s.y][moveNr] = newQvalue;
	}

	private int eGreedy(State s) {
		Vector<Integer> maxMoves = new Vector<Integer>();
		Float maxQValue = null;
		int bestAction = -1;
		for(int action = 0; action < 5; action++)
		{
			Float QValue = Q[s.x][s.y][action];
			if( maxQValue == null )
			{
				maxQValue = QValue;
				maxMoves.add(action);
			}
			else if( QValue == maxQValue )
			{
				maxMoves.add(action);
			}
			else if( QValue > maxQValue )
			{
				maxMoves = new Vector<Integer>();
				maxQValue = QValue;
				maxMoves.add(action);
			}
		}
		float randomFloat = Tools.RandomGenerator.nextFloat();
		if(randomFloat > Epsilon)
		{
			bestAction = maxMoves.get(Tools.RandomGenerator.nextInt(maxMoves.size()));
		}
		else
		{
			bestAction = Tools.RandomGenerator.nextInt(5);
		}
		return bestAction;
	}

	private int greedy(State s) {
		Vector<Integer> maxMoves = new Vector<Integer>();
		Float maxQValue = null;
		int bestAction = -1;
		for(int action = 0; action < 5; action++) {
			Float QValue = Q[s.x][s.y][action];
			if( maxQValue == null )
			{
				maxQValue = QValue;
				maxMoves.add(action);
			}
			else if( QValue == maxQValue )
			{
				maxMoves.add(action);
			}
			else if( QValue > maxQValue )
			{
				maxMoves = new Vector<Integer>();
				maxQValue = QValue;
				maxMoves.add(action);
			}
		}
		return bestAction = maxMoves.get(Tools.RandomGenerator.nextInt(maxMoves.size()));
	}

	private int softmax(State s) {
		Vector<Float> probabilities = new Vector<Float>();
		float total = 0;
		for(int action = 0; action < 5; action++) {
			float v = (float) Math.exp(Q[s.x][s.y][action]/tau);
			total += v;
			probabilities.add(v);
		}
		// Normalise
		for(int action = 0; action < 5; action++) {
			probabilities.set(action, probabilities.get(action) / total);
		}
		// Select
		float randomFloat = Tools.RandomGenerator.nextFloat();
		float inc = 0;
		for(int action = 0; action < 5; action++) {
			inc += probabilities.get(action);
			if (inc > randomFloat) {
				return action;
			}
		}
		return 5;
	}
}
