package tetris.agent;

import tetris.simulator.State;


public class Agent {

	//implement this function to have a working system
	//Inputs:
	//- s is the current state of the board
	//- legalMoves is a nx2 matrix of the n possible actions for the current piece.
	//	An action is the orientation & column to place the current piece
	//Outputs:
	//- index n of the action to execute in legalMoves
	public int chooseAction(State s, int[][] legalMoves) 
	{
		/*//example random agent
		//return random action
		return (int)(Math.random()*legalMoves.length);*/
		
		// Greedy approach
		State sCopy = new State(s);
		double bestReward = 0;
		int bestAction = 0;
		
		for (int i = 0; i< legalMoves.length; i++){
			double reward = oneStepReward(sCopy, i);
			if (reward > bestReward){
				bestReward = reward;
				bestAction = i;
			}
		}
		return bestAction;
	}
	
	private double oneStepReward (State s, int action){
		System.out.println(action);
		State newState = new State(s);
		newState.makeMove(action);
		
		int maxTop = 0;
		for (int i = 0; i < newState.getTop().length; i++){
			if (newState.getTop()[i] > maxTop)
				maxTop = newState.getTop()[i];				
		}
		//return State.ROWS - maxTop;
		double metricHeight = State.ROWS - maxTop;
		
		int sumArea = 0;
		for (int i = 0; i < newState.getTop().length; i++){
			sumArea+=newState.getTop()[i];			
		}
		//return State.ROWS*State.COLS - sumArea;
		double metricArea = State.ROWS*State.COLS - sumArea;
		
		return metricHeight*0.9 + metricArea*0.1;
		
	}
}
