package tetris.agent;

import tetris.simulator.State;


public class Agent {
	
	// weights 
//	public double wHeight = 0.9;
//	public double wArea = 0.1;
//	public double wHoles,wBlockades,wWall,wBlock,wFree,wFlat,wWell,wLines;
	
	
	public GeneticAIFinder ga = new GeneticAIFinder();

	//implement this function to have a working system
	//Inputs:
	//- s is the current state of the board
	//- legalMoves is a nx2 matrix of the n possible actions for the current piece.
	//	An action is the orientation & column to place the current piece
	//Outputs:
	//- index n of the action to execute in legalMoves
	public int chooseAction(State s, int[][] legalMoves) 
	{
		/*//example random agent
		//return random action
		return (int)(Math.random()*legalMoves.length);*/
		
		// Greedy approach
		State sCopy = new State(s);
		double bestReward = 0;
		int bestAction = 0;
		
		for (int i = 0; i< legalMoves.length; i++){
			double reward = getScore(sCopy, i);
			if (reward > bestReward){
				bestReward = reward;
				bestAction = i;
			}
		}
		
//		printWeights();
		return bestAction;
	}
	
	// sets the best learned policy
	public void setWeights()
	{
//		MAX SCORE4662[9.5, 34.13, -25.9, 0.57, -8.64, -20.26, -8.11, 7.81, -2.39, -4.49]
//		MAX SCORE2936[0.93, 32.26, -36.01, 1.42, -11.83, -17.5, -0.64]
		String str ="9.5, 34.13, -25.9, 0.57, -8.64, -20.26, -8.11, 7.81, -2.39, -4.49";
		String []vals= str.split(",");
		int i = 0;
		ga.wHeight 	= Double.parseDouble(vals[i++]);
		ga.wArea 	= Double.parseDouble(vals[i++]);
		ga.wHoles 	= Double.parseDouble(vals[i++]);
		ga.wBlockades 	= Double.parseDouble(vals[i++]);
		ga.wFlat 	= Double.parseDouble(vals[i++]);
		ga.wWell 	= Double.parseDouble(vals[i++]);
		ga.wLines 	= Double.parseDouble(vals[i++]);
		ga.wWall 	= Double.parseDouble(vals[i++]);
		ga.wBlock 	= Double.parseDouble(vals[i++]);
		ga.wFree 	= Double.parseDouble(vals[i++]);
	}
	
	public void printWeights(){
		String s = " " + ga.wHeight
				+ " " + ga.wArea
				+ " " + ga.wHoles
				+ " " + ga.wBlockades
				+ " " + ga.wFlat
				+ " " + ga.wWell
				+ " " + ga.wLines
				+ " " + ga.wWall
				+ " " + ga.wBlock
				+ " " + ga.wFree;
		System.out.println(s);
	}
	
	public double getScore (State s, int action){
//		System.out.println(wHeight);
//		System.out.println(wArea);
		State newState = new State(s);
		newState.makeMove(action);
		
		int maxTop = 0;
		for (int i = 0; i < newState.getTop().length; i++){
			if (newState.getTop()[i] > maxTop)
				maxTop = newState.getTop()[i];				
		}
		//return State.ROWS - maxTop;
		double metricHeight = State.ROWS - maxTop;
//		System.out.println(maxTop);
		
		int sumArea = 0;
		for (int i = 0; i < newState.getTop().length; i++){
			sumArea+=newState.getTop()[i];			
		}
		//return State.ROWS*State.COLS - sumArea;
		double metricArea = State.ROWS*State.COLS - sumArea;
		
		//comment this out to set new learned weights
		setWeights();	
		int [] adjacent = s.findAdjacency2(newState);
		int [] holesNBlockades = newState.findHolesNBlockades();
		int [] flatNwell = newState.findFlatnWell();
		int lines = newState.findLines(maxTop);
//		System.out.println("WALL: "+adjacent[0]+" , BLOCK:"+adjacent[1]);
		double score = metricHeight*ga.wHeight + metricArea*ga.wArea
						+ holesNBlockades[0] *ga.wHoles
						+ holesNBlockades[1] * ga.wBlockades
						+ flatNwell[0]* ga.wFlat
						+ flatNwell[1]* ga.wWell
						+ lines * ga.wLines
						+ adjacent[0] * ga.wWall
						+ adjacent[1] * ga.wBlock
						+ adjacent[2] * ga.wFree
						;
		return score;
		
	}
	
	
}
