package edu.columbia.aicheckers.agent;


import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Queue;
import java.util.Set;

import javax.swing.tree.DefaultTreeModel;
import edu.columbia.aicheckers.model.Color;
import edu.columbia.aicheckers.model.IBoardState;
import edu.columbia.aicheckers.model.Move;

/**
 * An Intelligent Agent that implements the Minimax algorithm
 * @author C.M.R.
 *
 */

public class MinimaxAgent implements IAgent
{
	private Queue<Move> pastMoves = new LinkedList<Move>();
	Move m;
	Set<Move> moves;
	Set<Move> nextCycleMoves;
	int depth;
	
	public MinimaxAgent(int depth)
	{
		this.depth = depth;
	}
	
	
	/**
	 * Creates a Minimax tree of utilities
	 * @param board
	 * @param myColor
	 */
	private BoardStateWrapper makeMinimaxTree(IBoardState board, Color myColor)
	{
		BoardStateWrapper bsw = new BoardStateWrapper(board);
		depthFirstSearch(bsw, myColor, depth, 0);
		return bsw;
	}
	
	public void depthFirstSearch(BoardStateWrapper root, Color myColor, int maxDepth, int depth) 
	{
		if(depth == maxDepth)
		{
			root.utility = getUtility(root.s, myColor, depth); 
		}
		
		else
		{
			IBoardState state = root.s;
			boolean myMove = (state.getCurrentPly() == myColor);
			Set<Move> moves = state.getAvailableMoves(state.getCurrentPly());
			depth++;
			
			//HashMap<Move,Double> opponentProbability = null;
			//if(!myMove)
			//{
			//	opponentProbability = getOpponentMoveProbability(state, myColor);
			//}
			for (Move m : moves) 
			{	
				IBoardState next = state.simulateMove(m);
				
				BoardStateWrapper child = new BoardStateWrapper(next);
				child.m = m;
				root.children.add(child);
				depthFirstSearch(child, myColor,maxDepth,depth);
			
				root.utility += child.utility;
			}
		}
	}
	
	public Move moveSelection(IBoardState state, Color myColor, boolean checkStalemate)
	{
		Set<Move> moves = state.getAvailableMoves(myColor);
		double maxUtility = Double.MIN_VALUE;
		Move bestMove = null;
		Move nextBestMove = null;
		if(moves.isEmpty())
		{
			return null;
		}
		for(Move m : moves)
		{
			IBoardState next = state.simulateMove(m);
			BoardStateWrapper w = new BoardStateWrapper(next);
			w.m = m;
			
			depthFirstSearch(w, myColor, depth, 0);
			
			if(w.utility > maxUtility || maxUtility == Double.MIN_VALUE)
			{
				maxUtility = w.utility;
				nextBestMove = bestMove;
				bestMove = m;
			}
		}
		
		pastMoves.add(bestMove);
		if(pastMoves.size() > 4)
		{
			pastMoves.remove();
		}
		
		return bestMove;
	}
	
	private	class BoardStateWrapper
	{
		public int utility = 0;

		IBoardState s;
		Move m;
		
		ArrayList<BoardStateWrapper> children = new ArrayList<BoardStateWrapper>();
		
		public BoardStateWrapper(IBoardState board) 
		{
			this.s = board;
		}	
	}
	

	/**
	 * Returns the utility of a particular color of the current BoardState 
	 * @param color
	 * @return
	 */
	public static int getUtility(IBoardState state, Color myColor, int depth)
	{
		//Defines the utility of the BoardState as the difference in the number of checkers normalized to be >= zero
		//A utility of 12 means that the difference in number of checkers is zero
		int utility = state.getNumCheckers(myColor) - state.getNumCheckers(Color.getOponnentColor(myColor)) + 12;
		
		return utility;
	}

	
	
	/**
	 * Moves the Agent's piece based on Max choice
	 */
	public Move doPly(IBoardState board, Color myColor)
	{
		//Create the Minimax tree
		Move bestMove = moveSelection(board, myColor, true);
		return bestMove;
	}

}