package a3;

import java.util.AbstractMap.SimpleEntry;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.TreeSet;
import java.util.Vector;

import org.apache.commons.math.optimization.GoalType;
import org.apache.commons.math.optimization.OptimizationException;
import org.apache.commons.math.optimization.RealPointValuePair;
import org.apache.commons.math.optimization.linear.LinearConstraint;
import org.apache.commons.math.optimization.linear.LinearObjectiveFunction;

import org.apache.commons.math.optimization.linear.Relationship;
import org.apache.commons.math.optimization.linear.SimplexSolver;



public abstract class Agent {
	protected Location Location;
	protected Environment Environment;
	protected Vector<Action> PossibleActions;
	protected Q q;
	protected float Alpha;
	protected float Gamma;
	protected float Epsilon;
	protected String Algorithm;
	protected SimpleEntry<State, Action> lastSApair;
	protected MiniMax miniMax;
	int ID;
		
	public Agent(Location spawn, Environment env, float alpha, float gamma, float epsilon, String algorithm, float initQ, int ID) {
		this.ID = ID;
		Location = spawn;
		Environment = env;
		q = new Q(initQ);
		Algorithm = algorithm;
		Gamma = gamma;
		Epsilon = epsilon;
		Alpha = alpha;
		miniMax = new MiniMax();
		//Adding the possible movement of the agent
		PossibleActions = new Vector<Action>();
		PossibleActions.add(new Action( 1,  0));
		PossibleActions.add(new Action( 0,  1));
		PossibleActions.add(new Action(-1,  0));
		PossibleActions.add(new Action( 0, -1));
	}

	public Action planAction() {
		State s = getState();
		if(Algorithm.equals("Q") )
			return planActionQ(s);
		else if(Algorithm.equals("miniMax"))
			return planActionMinMax(s);
		return null;
	}
	
	private Action planActionMinMax(State s) {
		Action actiontoTake = null;
		Vector<Action> actions = getActions();
		actions.add(new Action(0,0));
		float randomExploreFloat = Tools.RandomGenerator.nextFloat();
		if(randomExploreFloat > Epsilon)
		{
			float randomActionFloat = Tools.RandomGenerator.nextFloat();
			float curFloat = 0;
			for( Action a : actions)
			{
				SimpleEntry<State, Action> sa = new SimpleEntry<State, Action>(s, a);
				float actionFloat = miniMax.getPi(sa);
				curFloat += actionFloat;
				if(curFloat > randomActionFloat){
					actiontoTake = a;
					break;
				}
			}
		}
		else
		{
			actiontoTake =  actions.get(Tools.RandomGenerator.nextInt(5));
		} 
		if(actiontoTake == null)
			actiontoTake =  actions.get(Tools.RandomGenerator.nextInt(5));
		lastSApair = new SimpleEntry<State, Action>(s.clone(), actiontoTake);
		return actiontoTake;
	}

	private Action planActionQ(State s) { //returns the agents preferred action
		Action a = eGreedy(s);
		lastSApair = new SimpleEntry<State, Action>(s.clone(), a);
		//System.out.println(ID + " " + "PLAN: " + lastSApair.toString());
//		System.out.println(ID+ " " + lastSApair.toString());
//		System.exit(0);
		return a;
	}
	
	Location getLocation() {
		return Location;
	}

	public void setLocation(Location newloc) {
		Location = newloc;
	}

	public void takeAction() {
		//System.out.println(ID + " " + "TAKE: " + lastSApair.toString());
		Action a = lastSApair.getValue();
		Location.x = Tools.mod((Location.x + a.x), Environment.BoardSize);
		Location.y = Tools.mod((Location.y + a.y), Environment.BoardSize);
	}
	
	public int update() {
		if(Algorithm.equals("Q") )
		{
			return updateQ();
		}
		else if( Algorithm.equals("miniMax"))
		{
			return updateMiniMax();
		}
		return 0;
	}
	
	private int updateMiniMax() {
		int reward = getReward();
		State s = lastSApair.getKey();
		Action a = lastSApair.getValue();
		Action o = Environment.getPrey().lastSApair.getValue();
		Sao sao = new Sao(s,a,o);
		//System.out.println(ID + " " + "Updating for: " + sao.toString());
		Float oldSaoValue = miniMax.getQ(sao);
		float newSaoValue = (1f-miniMax.alpha) * oldSaoValue + miniMax.alpha * (reward + Gamma * miniMax.getV(getState()));
		//System.out.println("saochange from " +oldSaoValue+ " -> " + newSaoValue + ": " + miniMax.alpha + " " + Gamma + " " + miniMax.getV(getState()));
		
		miniMax.setQ(sao, newSaoValue);
		double[][] Qa = new double[5][5];
		Vector<Action> actions = getActions();
		actions.add(new Action(0,0));
		Vector<Action> oppActions = getActions();
		oppActions.add(new Action(0,0));
		for(int i = 0; i< oppActions.size(); i++)
		{
			Action oppA = oppActions.get(i);
			for(int j = 0; j< actions.size(); j++)
			{
				Action myA = actions.get(j);
				Sao Qsao = new Sao(s,myA,oppA);
				Qa[i][j] = miniMax.getQ(Qsao);
				//System.out.printf("%.2f ", Qa[i][j]);
			}
			//System.out.println( " " + miniMax.alpha + " " + sao.toString());
		}
		
		LinearObjectiveFunction f = new LinearObjectiveFunction(new double[] { 0,0,0,0,0, 1 }, 0);
		Collection<LinearConstraint> constraints = new ArrayList<LinearConstraint>();
		constraints.add(new LinearConstraint(new double[] { Qa[0][0], Qa[0][1], Qa[0][2], Qa[0][3], Qa[0][4], -1 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { Qa[1][0], Qa[1][1], Qa[1][2], Qa[1][3], Qa[1][4], -1 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { Qa[2][0], Qa[2][1], Qa[2][2], Qa[2][3], Qa[2][4], -1 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { Qa[3][0], Qa[3][1], Qa[3][2], Qa[3][3], Qa[3][4], -1 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { Qa[4][0], Qa[4][1], Qa[4][2], Qa[4][3], Qa[4][4], -1 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { 1,1,1,1,1,0 }, Relationship.EQ, 1));
		constraints.add(new LinearConstraint(new double[] { 1,0,0,0,0,0 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { 0,1,0,0,0,0 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { 0,0,1,0,0,0 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { 0,0,0,1,0,0 }, Relationship.GEQ, 0));
		constraints.add(new LinearConstraint(new double[] { 0,0,0,0,1,0 }, Relationship.GEQ, 0));
		
		// create and run the solver
		RealPointValuePair solution;
		try {
			solution = new SimplexSolver().optimize(f, constraints, GoalType.MINIMIZE, true);
			// get the solution
			double a1 = solution.getPoint()[0];
			double a2 = solution.getPoint()[1];
			double a3 = solution.getPoint()[2];
			double a4 = solution.getPoint()[3];
			double a5 = solution.getPoint()[4];
			double a6 = solution.getPoint()[5];
			
			for(int i = 0; i< actions.size(); i++)
			{
				Action myA = actions.get(i);
				miniMax.setPi(new SimpleEntry<State,Action>(s, myA), (float)solution.getPoint()[i]);
			}
			System.out.println(a1 + " " + a2+ " " + a3+ " " + a4+ " " + a5+ " " + a6 + " SET V TO " + (float)solution.getPoint()[5] );
			miniMax.setV(s, (float)solution.getPoint()[5]);
		} catch (OptimizationException e) {
			// TODO Auto-generated catch block
			//e.printStackTrace();
		}
		
		

		
		
		miniMax.decay();
		return reward;
	}

	

	abstract int getReward();
	
	private int updateQ() {
		//System.out.println(ID + " " + "UPDATE: " + lastSApair.toString());
		int reward = getReward();
		float oldQvalue = q.get(lastSApair);
		float maxNewStateValue = getMax(getState());
		float newQvalue = oldQvalue + Alpha*((float)reward + Gamma*maxNewStateValue - oldQvalue);
		q.set(lastSApair, newQvalue);
//		System.out.println("reward: " + reward);
//		System.out.println("lastSApair: " + lastSApair.toString());
//		System.out.println("oldQvalue: " + oldQvalue);
//		System.out.println("maxNewStateValue: " + maxNewStateValue);
//		System.out.println("newQvalue: " + newQvalue);
//		System.out.println("thingy: " + ((float)reward + Gamma*maxNewStateValue - oldQvalue));
//		System.out.println("Alpha: " + Alpha);
		//System.out.println(lastSApair);
		//if(reward != 0)
			//System.out.println(ID + " " +getState().toString());
		return reward;
	}

	public Vector<Action> getActions() {
		Vector<Action> v = (Vector<Action>) PossibleActions.clone();
		return v;
	}
	
	State getState(){
		Location locSelf = (Location)Location.clone();
		State s = new State(Environment, ID);
		return s;
	}
	
	protected Action eGreedy(State s) {
		Vector<Action> actions = getActions();
		Vector<Action> maxActions = new Vector<Action>();
		actions.add(new Action(0,0));
		Float maxQValue = null;
		Action bestAction = null;
		for(Action a: actions)
		{
			
			Float QValue = q.get(new SimpleEntry<State, Action>(s,a));
			
			if( maxQValue == null )
			{
				maxQValue = QValue;
				maxActions.add(a);
			}
			else if( QValue- maxQValue == 0.0 )
			{
				maxActions.add(a);
			}
			else if( QValue > maxQValue )
			{
				maxActions = new Vector<Action>();
				maxQValue = QValue;
				maxActions.add(a);
			}
		}
		float randomFloat = Tools.RandomGenerator.nextFloat();
		if(randomFloat > Epsilon)
		{
			bestAction = maxActions.get(Tools.RandomGenerator.nextInt(maxActions.size()));
		}
		else
		{
			bestAction = actions.get(Tools.RandomGenerator.nextInt(5));
		}
		return bestAction;
	}
	
	public Float getMax(State s){
		Float maxvalue = null;
		Vector<Action> actions = getActions();
		actions.add(new Action(0,0));
		for(Action a : actions )
		{
			Float value = q.get(new SimpleEntry<State,Action>(s,a));
			if(maxvalue == null)
				maxvalue = value;
			else if( value > maxvalue )
				maxvalue = value;
		}
		return maxvalue;
	}

	protected Action greedy(State s) {
		Vector<Action> actions = getActions();
		Vector<Action> maxActions = new Vector<Action>();
		actions.add(new Action(0,0));
		Float maxQValue = null;
		Action bestAction = null;
		for(Action a: actions)
		{
			Float QValue = q.get(new SimpleEntry<State, Action>(s,a));
			if( maxQValue == null )
			{
				maxQValue = QValue;
				maxActions.add(a);
			}
			else if( QValue == maxQValue )
			{
				maxActions.add(a);
			}
			else if( QValue > maxQValue )
			{
				maxActions = new Vector<Action>();
				maxQValue = QValue;
				maxActions.add(a);
			}
		}
		bestAction = maxActions.get(Tools.RandomGenerator.nextInt(maxActions.size()));
		return bestAction;
	}
	
}
