package team6.ai;

import team6.*;
import team6.gamegui.GameState;
import team6.gametree.*;

public class LearnBot extends LearningAI {
	/**
	 * The maximum depth to build the tree to
	 */
	int maxSearchDepth=5;
	/**
	 * The current depth to build the tree to.  Must be odd and maxes out maxSearchDepth.
	 */
	int depthOfSearch=1;
	/**
	 * The tree that is rebuilt for each move.  It is depthOfSearch deep.
	 */
	UtilityTree gameTree;
	/**
	 * double[]:  Constants for the utilityFunction
	 * Format is: 1st constant is value of pieces
	 * 2nd constant is value of opponent moves (usually negative)
	 * 3rd constant is value of corners
 	 * Constants for this AI: {20, -3, 300}
	 */
	double[] constants = {20.0, -3.0, 300.0};
	/**
	 * The utility from the opponent on the last check.
	 * Initialized to Double.MIN_EXPONENT.
	 */
	double prevUtility1=Double.MIN_EXPONENT;
	/**
	 * The utility from the opponent two checks ago.
	 * Initialized to Double.MIN_EXPONENT.
	 */
	double prevUtility2=Double.MIN_EXPONENT;

    /**
     * Constructs a LearnBotA
     * @param player The player (Black or White) to be represented by LearnBotA
     * @throws java.lang.IllegalArgumentException Player should not be Empty
     */
	public LearnBot(Player player)
			throws IllegalArgumentException {
		super(player);
		gameTree = new UtilityTree(depthOfSearch, constants);
		System.out.print(this.descriptionOfAI());
		
	}

	/**
	 * Varies the depth of the utilityTree's search based on player behavior.
	 * Starts at 1 and progresses to maxSearchDepth.
	 * Formula is as follows: 
	 * If (Utility Before Moving One Turn Ago < Utility Before Moving Two Turns Ago - (Value of Number of Pieces * Total Pieces)/33.3
	 * Note that the human player's utility is negative, hence the formula uses - Value of Pieces
	 * Then Increase Depth of Search and therefore difficulty
	 * 
	 * Implementation is otherwise the same as MinMax AI.
	 * @param boardState The board state that the AI must move in
	 * @param lastMove Either the actual last move, or a dummy last move that points
	 * to the player who's turn it is.
	 */
	public int[] makeMove(Player[][] boardState, int[] lastMove) {
		GameState state = new GameState(boardState, lastMove);
		state.setPlayer(lastMove);
		prevUtility2=prevUtility1;
		prevUtility1=gameTree.utilityFunction(state);
        if (prevUtility1 < prevUtility2 - constants[0]*state.numTotPieces()/33.3)
        	incDepth();
        gameTree.changeDepth(depthOfSearch);
		gameTree.buildTree(state);
		
		int[] bestMove = gameTree.bestMove();
		state.moveNoGui(bestMove[0], bestMove[1]);
		state.switchTurn();

        /* "Learn" here.
         * As time goes, on the computer will learn to think ahead.
         * The more time that passes, the farther it will begin to think.
         */
		System.out.println("I am currently thinking " + depthOfSearch + " moves ahead");

		return gameTree.bestMove();
	}
	
	/**
	 * Increases the depth of the search until it reaches maxSearchDepth.
	 */
	private void incDepth() {
		if(depthOfSearch<maxSearchDepth && prevUtility1!=Double.MIN_EXPONENT && prevUtility2!=Double.MIN_EXPONENT) {
			depthOfSearch+=2;
			System.out.println("Opponent has proven able. Increasing brain activity.");
		}
	}

    /**
     * Returns a description of the AI as a string
     * @return Returns a string describing the AI.
     */
    public String descriptionOfAI() {
        return "LearnBot bases its depth of search on current player's performance.\n" +
        		"If the AI consistently performs badly or the player performs well the depth is increased until it is\n" +
        		"maxSearchDepth, which is 5.  If the AI isn't being difficult enough, you aren't beating it badly enough.\n\n" +
        		"The AI decides to increase it's difficulty based on the following formula:\n" +
        		"If (Utility Before Moving One Turn Ago < Utility Before Moving Two Turns Ago - (Value of Number of Pieces * Total Pieces)/33.3\n" +
        		"Increase depth of search (increase difficulty)\n\n" +
        		"Skips increasing difficulty on the first two moves\n" +
        		"This means the AI will increase difficulty if the player manages to dcrease their utility the value of 3% the total pieces.\n" +
        		"Since human player utility is negative, decreasing utility is evidence of an increase in skill.\n" +
        		"Because corners are so highly valued, taking a corner will most of the time cause the AI to increase difficulty\n";
    }
}
