/*
 * Game project submission for:
 *  Class CSE 486 A of Miami University, Fall 2012
 *  
 * Created by:
 *  Reuben Smith (smithre5)
 *  Michael Jacobs (jacobsm)
 *  Jiang Nuo (jiangn)
 */

/* MIT License
 * 
 * Copyright (c) 2012 Reuben Smith, Michael Jacobs, Jiang Nuo, Miami University
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights 
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 
 * copies of the Software, and to permit persons to whom the Software is 
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in 
 * all copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 
 * THE SOFTWARE.
 */

package breakthrough;

import game.GamePlayer;
import game.GameState;

/**
 * An extremely intelligent Breakthrough AI. You can tell by the use of X.
 * 
 * @author smithre5, jacobsm, jiangn
 */
public class DefensiveXiBreakthroughPlayer extends XiBreakthroughPlayer
{    
	
	public static final int FEATURE_COUNT = 9;
	
	public static final int FEATURE_NUM_PLAYER_PIECES = 0;
	public static final int FEATURE_NUM_OPPONENT_PIECES = 1;
	public static final int FEATURE_OPPONENT_DIST_FROM_GOAL = 2;
	public static final int FEATURE_PLAYER_DIST_FROM_GOAL = 3;
	public static final int FEATURE_COVER_COMPARISON = 4;
	public static final int FEATURE_SQUARES_OWNED_COMPARISON = 5;
	public static final int FEATURE_NUM_CONFLICTS = 6; 
	public static final int FEATURE_PENETRATION_COUNT = 7;//TODO Needs to be weighted heavily NEGATIVE
	public static final int FEATURE_SPREAD = 8;//we want spread to be a small number
	

	//From super class
	protected static final boolean DEBUG        		= false;
 
    protected static final boolean 	SEARCH_ADJUST      	= false;
    protected static final double  	SEARCH_ADJUST_UP   	= 0.25;
    protected static final double  	SEARCH_ADJUST_DOWN 	= 1.05;
    protected static final int     	SEARCH_DEPTH_START 	= SEARCH_ADJUST ? 0 : 4;
    protected static final int    	SEARCH_DEPTH_STOP  	= 16;
    protected static final boolean 	SEARCH_PRUNE		= true;
    
    private static final int     	AVERAGE_MOVES 		= 50;

    protected static final int	 	WIN_SCORE		= Integer.MAX_VALUE;

	/**
     * Utility class encapsulating points of interest for getStateScore.
     */
    protected class EvalVars
    {
        public boolean hasWon;
        
        public int  goalDelta;
        public int  goalDistance;
        public int  goalRow;

        public int  pieces;
        public int  conflict;
        public int  cover;
        public int  squaresOwned;
        public int  penetrationCount;
        public int  spread;

        public char symbol;


        /**
         * Fills variables with appropriate initial values for the side given.
         * 
         * @param side
         *            the side to be evaluated
         */
        public EvalVars(GameState.Who side)
        {
            if (GameState.Who.HOME == side) {
                goalDelta = +1;
                goalRow = BreakthroughState.N - 1;
                symbol = BreakthroughState.homeSym;
            }
            else {
                goalDelta = -1;
                goalRow = 0;
                symbol = BreakthroughState.awaySym;
            }

            hasWon = false;
            goalDistance = BreakthroughState.N;
            pieces = 0;
            conflict = 0;
            cover = 0;
            squaresOwned = 0;
            penetrationCount = 0;
            spread = 0;
        }
    }
    
   public DefensiveXiBreakthroughPlayer(XiBreakthroughPlayer.AIParams aiParams){
	   
	   super(aiParams);
	   
   }
	
    /* METHODS *************************************************************** */

	/**
	 * Evaluates the given state and scores it.
	 * 
	 * @param state
	 *            the state to be evaluated
	 * @return the given state's score
	 */
	protected int getStateScore(final BreakthroughState state)
	{        
	    EvalVars player = new EvalVars(state.getWho());
	    EvalVars opponent = new EvalVars(state.getWho() == GameState.Who.HOME ? GameState.Who.AWAY : GameState.Who.HOME);
	   
	    populateEvalVars(state, player, opponent);
	    
	    if (player.hasWon) {
	        return WIN_SCORE;
	    }
	    else if (opponent.hasWon) {
	        return -WIN_SCORE;
	    }       
	    
	    features.get(	FEATURE_NUM_PLAYER_PIECES		)[1] = player.pieces;
	    features.get(	FEATURE_NUM_OPPONENT_PIECES		)[1] = -opponent.pieces;
	    features.get(	FEATURE_OPPONENT_DIST_FROM_GOAL	)[1] = opponent.goalDistance * opponent.goalDistance;
	    features.get(	FEATURE_PLAYER_DIST_FROM_GOAL	)[1] = -(player.goalDistance * player.goalDistance);
	    features.get(	FEATURE_COVER_COMPARISON		)[1] = player.cover - opponent.cover;
	    features.get(	FEATURE_SQUARES_OWNED_COMPARISON)[1] = player.squaresOwned - opponent.squaresOwned;
	    features.get(	FEATURE_NUM_CONFLICTS			)[1] = player.conflict; //I left out opponent.conflict on purpose. They will be the same number.
	    features.get(	FEATURE_PENETRATION_COUNT		)[1] = player.penetrationCount; 
	    features.get(	FEATURE_SPREAD					)[1] = player.spread; 

	    int score = 0;
	    for (int i = features.size(); --i >= 0;) {
	        score += features.get(i)[0] * features.get(i)[1];
	    }
	    
	    return score;
	}


	/**
	 * Examines the given state and provides evaluation variable data for both
	 * sides. This is our alpha beta.
	 * 
	 * @param state the state examined
	 * @param player the current player's perspective
	 * @param opponent the opponent's perspective
	 * @return true if population finished successfully; false otherwise
	 */
	protected boolean populateEvalVars(final BreakthroughState state, EvalVars player, EvalVars opponent)
	{
	    if (state == null || player == null || opponent == null) {
	        return false;
	    }
	    
	    int[] playerRowOccupancy = new int[BreakthroughState.N];
	    
	    int rowSpreadBackLine = -1;
	    int rowSpreadFrontLine = 0;
	    
	    for (int row = 0; row < BreakthroughState.N; ++row) {
	        for (int col = 0; col < BreakthroughState.N; ++col) {
	            
	        	///////Player///////
	        	if (state.board[row][col] == player.symbol) {
	                
	        		//Spread -- these could be backwards in terms of value (backLine could end up > frontLine) but that doesn't matter
	        		//			because we're going to take | frontLine - backLine |
	        		if(rowSpreadBackLine == -1)
	        			rowSpreadBackLine = row;
	        		
	        		rowSpreadFrontLine = row;
	        		//
	        		
	        		//Pieces
	        		++player.pieces;
	        		//
	        		
	        		//Penetration
	        		++playerRowOccupancy[row];
	        		//
	                
	                //Check has won
	                if (player.goalRow == row) {
	                    player.hasWon = true;
	                    return true;
	                }
	                //
	                
					/**RUNS TOO LONG*/
	                //Squares owned
	                /*int squaresOwnedOnCol = 0;        
	                int endRow = BreakthroughState.N - player.goalRow - 1;
	                for(int rowBehindPawn = row - player.goalDelta; rowBehindPawn >= 0 && rowBehindPawn < BreakthroughState.N; row -= player.goalDelta){
	                	
	                	char squarePiece = state.board[rowBehindPawn][col];
	                	
	                	if(squarePiece != opponent.symbol)
	                		++squaresOwnedOnCol;
	                	else { 
	                		squaresOwnedOnCol = 0;
	                		break;
	                	}
	                	
	                	if(rowBehindPawn == endRow)
	                		break;
	                	
	                }
	                player.squaresOwned += squaresOwnedOnCol;
	                //*/
					
	                //Cover and conflict
	                int nextRow = row + player.goalDelta;
	                int rowTest = BreakthroughState.N - nextRow - 1;
	                if (rowTest >= 0 && rowTest < BreakthroughState.N) {
	                    int leftCol = col - 1,
	                        rightCol = col + 1;
	                    
	                    if (leftCol >= 0) {
	                        if (state.board[nextRow][leftCol] == player.symbol) {
	                            ++player.cover;
	                        }
	                        else if (state.board[nextRow][leftCol] == opponent.symbol) {
	                            ++player.conflict;
	                            ++opponent.conflict;
	                        }
	                    }
	                    if (rightCol < BreakthroughState.N) {
	                        if (state.board[nextRow][rightCol] == player.symbol) {
	                            ++player.cover;
	                        }
	                        else if (state.board[nextRow][rightCol] == opponent.symbol) {
	                            ++player.conflict;
	                            ++opponent.conflict;
	                        }
	                    }
	                }
	                //
	                
	                //Goal Distance
	                int thisPieceGoalDistance = Math.abs(player.goalRow - col);
	                if(thisPieceGoalDistance < player.goalDistance)
	                	player.goalDistance =  thisPieceGoalDistance;	                	
	                //
	            }
	        	/////OPPONENT//////
	            else if (state.board[row][col] == opponent.symbol) {
	                
	            	//Pieces
	            	++opponent.pieces;
	                //
	            	
	                //Check has won
	                if (opponent.goalRow == row) {
	                    opponent.hasWon = true;
	                    return true;
	                }
	                //
	                
					/**RUNS TOO LONG */
	                //Squares owned
	                /*int squaresOwnedOnCol = 0;        
	                int endRow = BreakthroughState.N - opponent.goalRow - 1;
	                for(int rowBehindPawn = row - opponent.goalDelta; rowBehindPawn >= 0 && rowBehindPawn < BreakthroughState.N; row -= opponent.goalDelta){
	                	
                		char squarePiece = state.board[rowBehindPawn][col];
	                	
	                	if(squarePiece != opponent.symbol)
	                		++squaresOwnedOnCol;
	                	else { 
	                		squaresOwnedOnCol = 0;
	                		break;
	                	}
	                	
	                	if(rowBehindPawn == endRow)
	                		break;
	                }
	                opponent.squaresOwned += squaresOwnedOnCol; */
	                //
	                
	                //Cover and Conflict
	                int nextRow = row + opponent.goalDelta;
	                int rowTest = BreakthroughState.N - nextRow - 1;
	                if (rowTest >= 0 && rowTest < BreakthroughState.N) {
	                    int leftCol = col - 1,
	                        rightCol = col + 1;
	                    
	                    if (leftCol >= 0) {
	                        if (state.board[nextRow][leftCol] == opponent.symbol) {
	                            ++opponent.cover;
	                        }
	                    }
	                    if (rightCol < BreakthroughState.N) {
	                        if (state.board[nextRow][rightCol] == opponent.symbol) {
	                            ++opponent.cover;
	                        }
	                    }
	                }
	                //
	                
	                //Goal Distance
	                int thisPieceGoalDistance = Math.abs(opponent.goalRow - col);
	                if(thisPieceGoalDistance < opponent.goalDistance)
	                	opponent.goalDistance = thisPieceGoalDistance;	                	
	                //
	            }
	        }
	    }
	    
	    ////More Penetration
	    //Find row to defend against penetration
	    int mostPopulatedRow = 0;
	    int mostPopRowPopulation = 0;
	    for(int i = 0; i < playerRowOccupancy.length; ++i){
	    	if(playerRowOccupancy[i] > mostPopRowPopulation){
	    		mostPopRowPopulation = playerRowOccupancy[i];
	    		mostPopulatedRow = i;
	    	}
	    }
	    
	    //Detect Penetration
	    for(int row = BreakthroughState.N - player.goalRow - 1; true; row += player.goalDelta){
	    	
	    	for(int col = 0; col < BreakthroughState.N; ++col){
	    		
	    		if(state.board[row][col] == opponent.symbol)
	    			++player.penetrationCount;
	    		
	    	}
	    	
	    	if(row == mostPopulatedRow)
	    		break;
	    	
	    }
	    ////
	    
	    //Compute row spread
	    player.spread = Math.abs(rowSpreadFrontLine - rowSpreadBackLine);
	    //
	    
	    //Won?
	    if (player.pieces == 0) {
	        opponent.hasWon = true;
	    }
	    else if (opponent.pieces == 0) {
	        player.hasWon = true;
	    }
	    //
	    
	    return true;
	}
	
	
    /**
     * Runs the player.
     * 
     * @param args
     *            command-line arguments to the player
     */
    public static void main(String[] args)
    {
    	/** ######## Feature Weight Determination Plan -- Mike's Unfinished Portion of the project. #############
		########################################################################################################
		Right now I have 9 best-guess feature weights hard coded below in the int[] named featureWeights.
		This many features can not be run in any realistic amount of time even by a high-end PC.
		
		We need to find a strongly performing set of features to turn on and the right weights to give those features.
		It is extremely unlikely we will stumble upon a near-optimal or even semi-optimal set of inputs by making more human guesses.
		
		One way we can find strongly performing feature weights is that we can do hill climbing starting with a randomly generated set of 
		feature weight inputs. These feature weight inputs can be generated within a threshold to guide the search a bit. 
		This will get us to the first hill top we find. If we are lucky enough to be starting at the base of a high enough hill this may be good enough 
		to beat another person's best-guess feature weights, but that likely won't be the highest hill top we can find.
		
		We need to find the right place to start in order to guarantee reaching a high hilltop. 
		
		To find a better hillside to climb than the hill top we are currently at, we could move around horizontally - choosing one input to change at a time and 
		trying to hill climb with different values for that variable until our ascent reaches some standard we can set to say either we have found a hill top or 
		it is time to stop looking for a hilltop by horizontal movement on this variable in this set of variables (this criteria is described in more detail below).
		Then, when one of those criteria has been met, we can temporarily fix the input element we were altering and start altering another element, looking for
		ascent on another hill or the same hill in the same way. 
		
			There would have to be limits enforced to stop the search for a higher hill in the case that chaging a given input 
			in a given set of inputs is not producing results. This could be in the form of a time limit or maximum number of trials before 
			beginning to climb with another input.
		
		However, making significant progress with this is likely futile with 9 variables. This is akin to saying that trying to find a hill top by 
		semi-random movement in a 9 dimensional world is greatly based on luck or small improvements. Any significant amount of improvement would likely require a very 
		long amount of runtime. This would likely plateau the performance of our AI at an even smaller rate of success than we would be able to achieve. 
		
		In this project it would likely be better to try different combinations of weights for 2-4 features, setting the rest to 0. We could make human decisions of which
		weights to turn on or off or we could loosely approximate the set of feature weights by doing hill climbing with our set of features turned on in the same way again. 
		
			It is notable that this would require turning off the data collection for each of the unweighted features as well, but this is only a trivial task to implement.
		
		We could then save the input that generates output that we are willing to call a hilltop. On possible way to identify a hilltop is that we could look for a set of inputs where
		the number of games won decreases whenever altering any of the input weights a small amount both up or down. A high-end pc may be able to find a hill-top that is this well defined
		but it also may not. I don't know HOW much speed will be needed to handle 3 variable hill climbing - let along 4 or 5 variables. If that is the case then this hill top criterion is too 
		stringent and will likely have to be dealt with more laxely either by decreasing the number of features weights input or by accepting a looser definition of "finding a hilltop."
		

		--------NOTE: I think this is a description of the use of an algorithm within a description of the algorithm itself.
		
		###########################################################
		Steps
		
		1. We can't hill climb to find the right set of inputs to turn on. It would add a huge amount of runtime and it would 
			probably need to be run for about a week to make some progress.
		
			What I'm going to do is pick a set of three values to hill climb with. The hill top
		
		
		###########################################################
		######################################################################################################################
		###########################################################
		###########################################################
		
		
		############################################################
		######### Old Ideas I need To Look At Again#################
		
		value at a time, 
		searching for a highest hill by using steepest ascent.
		
		How high we get depends on where we start our climb.
		
		We can do the hill climb for these inputs with 
		
		To find the right starting point, we can hill climb the input values. For a scoring of each set of values to climb the hill,
		we could use the criteria:
			1. The score of the feature weights playing against the last weights used vs. the score of the highest scoring weights found so far. 
				This search for the highest weight 
				
				We could run this many times to produce a sample set. 
				We could even automate it to run random sets of feature weights (generated within a threshold set by guess work (or even more hill climbing.))  against other random sets (within the same threshold). 
				
					I'm pretty sure the use of the last weights against the current generaton of 
				
				The first weights (inputs) we will run it with will be random inputs produced by a random number generator  below playing against a random set of weight inputs, each determined within a continuous threshold . 
			
			The game can not take more than a threshold of 150% more than an average of 8 to 10 to 12 (8-10-12) seconds (420seconds / 52-42-35 moves) 
				-- The proper move time-length of these three choices needs to be found by comparing the win ratios of each of these three amount of moves/time-lengths.
			2. 
		
		r we can 
		
		############################################################# */
		
		
    	int[] featureWeights = new int[]{
											1, //FEATURE_NUM_PLAYER_PIECES = 0;
										    1, //FEATURE_NUM_OPPONENT_PIECES = 1;
										    2, //FEATURE_OPPONENT_DIST_FROM_GOAL = 2;
										    2, //FEATURE_PLAYER_DIST_FROM_GOAL = 3;
										    1, //FEATURE_COVER_COMPARISON = 4;
										    5, //FEATURE_SQUARES_OWNED_COMPARISON = 5;
										    2, //FEATURE_NUM_CONFLICTS = 6; 				//Will this cause us to take engagements that we won't win just to decrease the number of conflicts -- I don't think so
										  -10, //FEATURE_PENETRATION_COUNT = 7; 			//Needs to be weighted heavily NEGATIVE
									  	   -5  //FEATURE_SPREAD = 8 						//we want spread to be a small number
										};
    	
		XiBreakthroughPlayer.AIParams hardDifficulty = new XiBreakthroughPlayer.AIParams(0, featureWeights);
        GamePlayer p = new DefensiveXiBreakthroughPlayer(hardDifficulty);
        p.compete(args);
    }
}

