package team3;

import project2.AI.Player;

/**An AI that tries to learn what its opponent is doing.
 * <p>
 * Uses a learning goodness function which changes itself based on the
 * last move made by an enemy. Also utilizes a variable depth search of the min
 * max tree, to take advantage of the fact that the end-of-game search space is
 * much smaller then the middle game search space.
 * @see team3.LearningFunction
 */
public class LearningAI extends project2.LearningAI
{
    private BoardState previousState;
    private LearningFunction goodness;

    /**Constructs a Learning AI
     * @param player The player the AI should play as.
     */
	public LearningAI(Player player)
	{
		super(player);
        previousState = BoardState.getStartingBoard();
        goodness = new LearningFunction(getPlayer());
	}

    /**Updates the learning function, then uses it to find the best available
     * move.
     * @param boardState Current state of the board
     * @param lastMove Move made by the evil enemies of <code>getPlayer()</code>
     * @return A legal move, or BoardState.INVALID_MOVE
     */
	public int[] makeMove(Player[][] boardState, int[] lastMove)
	{
        //the following protects our Learning AI from possible
        //  diffferences in how another team represents invalid moves
        //this primarily prevents issues such as undesired null pointer exceptions
        AI.Player otherPlayer = getPlayer() == AI.Player.BLACK ? AI.Player.WHITE : AI.Player.BLACK;
        if(!previousState.moveIsValid(otherPlayer, lastMove))
            lastMove=BoardState.INVALID_MOVE;
        //previous state is the state the enemy made his move on
        goodness.updateGoodness(lastMove, previousState);
        
        BoardState currentState=new BoardState(boardState);
        MinMaxTree minMax = new MinMaxTree(currentState, getPlayer());
        int depth = getDepth(currentState);

        int[] move = minMax.getBestMove(depth, goodness);

        //Update previos state with what the enemy will be faced with next
        previousState = currentState.makeMove(getPlayer(), move);
        System.out.println(getPlayer()+": "+goodness.getReason());

        return move;
	}

    /** Calculates the ideal depth at which to explore the board,
     * given that towards the end of the game, state can be explored much deeper
     * then at start/middle; due to computation constraints.
     * @return The depth at which to evaluate the min max tree
     */
    private int getDepth(BoardState state)
    {
        int movesLeft=getMovesLeft(state);
        if(movesLeft > 50)
            return 4;
        if (movesLeft > 15)
            return 3;
        if(movesLeft > 10)
            return 5;
        if(movesLeft > 8)
            return 6;
        return movesLeft; //at end, explore all the way to the bottom
    }

    /**Calculates roughly how many moves remain in the game.
     * <p>
     * Perform the calculation by determining how many tokens are on the board,
     * and subtracting that from a full board: does not account for early
     * end of game, or cases where a player has no legal move other then passing.
     * @param state Board State to evaluate for
     * @return Estimated number of turns remaining in the game.
     */
    private int getMovesLeft(BoardState state)
    {
        int count = 0;
        for (int row = 0; row < 8; row++)
            for (int col = 0; col < 8; col ++)
                if (state.getState()[row][col] != AI.Player.EMPTY)
                    count++;
        return 64-count;
    }

    /**@return A description of the reasoning used behind the last move made by the AI.
     */
	public String descriptionOfAI()
	{
		return goodness.getReason();
	}
}
