from captureAgents import CaptureAgent
from captureAgents import AgentFactory
from baselineAgents import *
import distanceCalculator
import random, time, util
from game import Directions
from math import *
import copy
import keyboardAgents
import game
import time
import itertools
from itertools import *
from util import nearestPoint

NUM_KEYBOARD_AGENTS = 0

class CustomAgents(AgentFactory):
  "Custom Agents"

  def __init__(self, isRed, first='offense', second='defense', rest='offense'):
    AgentFactory.__init__(self, isRed)
    self.agents = [first, second]
    self.rest = rest

  def getAgent(self, index):
    if len(self.agents) > 0:
      return self.choose(self.agents.pop(0), index)
    else:
      return self.choose(self.rest, index)

  def choose(self, agentStr, index):
    # if not (self.isRed): #added
      # return DefensiveReflexAgent(index) #added
    if agentStr == 'keys':
      global NUM_KEYBOARD_AGENTS
      NUM_KEYBOARD_AGENTS += 1
      if NUM_KEYBOARD_AGENTS == 1:
        return keyboardAgents.KeyboardAgent(index)
      elif NUM_KEYBOARD_AGENTS == 2:
        return keyboardAgents.KeyboardAgent2(index)
      else:
        raise Exception('Max of two keyboard agents supported')
    elif agentStr == 'offense':
      return OffensiveReflexAgent(index)
    elif agentStr == 'defense':
      return DefensiveReflexAgent(index)
    elif agentStr == 'custom':
      return CustomAgent(index)    
    else:
      raise Exception("No staff agent identified by " + agentStr)

class CustomAgent(CaptureAgent):
  """
  A base class for reflex agents that chooses score-maximizing actions
  """
  def __init__( self, index, timeForComputing = .1 ):
    CaptureAgent.__init__( self, index, timeForComputing = .1 )    
    self.lastAction = Directions.STOP
    self.depth = 7
    self.minimaxDepth = 1
    self.gamma = 0.8
    # 0 = dead_end, 1 = tunnel, 2 = corner, 3 = open
    
    self.positionCharacDictionary = {'dead_end' : 0, 'tunnel' : 1, 'corner' : 2, 'open' : 3}
    self.currStateCharac = self.positionCharacDictionary['dead_end'] 
    
    self.stateDictionary = {'spawnToCentroidPathFinding' : 1, 'spawnToCentroidPathFollow': 2, 
                                'positionTSPPathFinding' : 3, 'positionTSPPathFollow' : 4, 
                                'positionToCentroidFinding' : 5, 'positionToCentroidFollow' : 6}
    self.currState = self.stateDictionary['spawnToCentroidPathFinding']
    
    self.spawnPosition = (0,0)
    
    self.pathFromSpawnToCentroidFound = False
    self.pathFromPositionTSPFound = False
    self.pathFromPositionToCentroidFound = False
    
    self.currentPathFromSpawnToCentroid = []
    self.currentPathTSP = []
    self.currentPathFromPositionToCentroid = []
    
    self.spawnToCentroidListIndex = 0
    self.positionTSPIndex = 0
    self.positionToCentroidListIndex = 0
    
    self.usingSpawnToCentroid = False
    self.usingTSP = False
    self.usingPositionToCentroid = False
    
    self.radius = 3
    
  def setStateCharac(self, actions):
    if(len(actions)==2):
      self.currStateCharac = self.positionCharacDictionary['dead_end']
    elif(len(actions)==3):
      if( ((Directions.NORTH in actions) and (Directions.SOUTH in actions)) or ((Directions.EAST in actions) and (Directions.WEST in actions)) ):
        self.currStateCharac = self.positionCharacDictionary['tunnel']
      else:
        self.currStateCharac = self.positionCharacDictionary['corner']
    else:
      self.currStateCharac = self.positionCharacDictionary['open']

  def chooseAction(self, gameState): 
    """
    Picks among the actions with the highest Q(s,a).
    """
    actions = gameState.getLegalActions(self.index)
    
    curPos = gameState.getAgentState(self.index).getPosition()
    
    #if spawn point is unknown, set it
    if(self.spawnPosition == (0,0)):
        self.spawnPosition = gameState.getAgentState(self.index).getPosition()
    
    if(self.spawnPosition == curPos and not len(self.currentPathFromSpawnToCentroid) == 0):
        # mark the centroid path to be recalculated
        self.currState = self.stateDictionary['spawnToCentroidPathFinding']
    
    if(self.currState == self.stateDictionary['spawnToCentroidPathFinding']):
        # set the new path from spawn to centroid
        centroidToTarget = random.choice(self.getKMeansCentroids(gameState))
        self.currentPathFromSpawnToCentroid = self.findCentroidPath(gameState, centroidToTarget,2)
        if(self.currentPathFromSpawnToCentroid == None):
            self.currentPathFromSpawnToCentroid = self.findCentroidPath(gameState, centroidToTarget,5)
            if(self.currentPathFromSpawnToCentroid == None):
                closestFood = self.findClosestFood(gameState)
                self.currentPathFromPositionToCentroid = self.findCentroidPath(gameState, closestFood, 1)
        self.spawnToCentroidListIndex = 0
        print "imhere in 0"
        self.currState = self.stateDictionary['spawnToCentroidPathFollow']
        return self.chooseAction(gameState)
    elif(self.currState == self.stateDictionary['spawnToCentroidPathFollow']):
        newAction = self.currentPathFromSpawnToCentroid[self.spawnToCentroidListIndex]
        self.spawnToCentroidListIndex = self.spawnToCentroidListIndex + 1
        if(self.spawnToCentroidListIndex == len(self.currentPathFromSpawnToCentroid)-2):
            self.spawnToCentroidListIndex = 0
            self.stateDictionary['positionTSPPathFinding']
        print "im in 1"
        return newAction 
    elif(self.currState == self.stateDictionary['positionTSPPathFinding']):
        self.currentPathTSP = self.travelMatrixGen(gameState, self.radius)
        self.positionTSPIndex = 0
        self.currState = self.stateDictionary['positionTSPPathFollow']
        if(len(self.currentPathTSP) == 0):
            self.positionTSPIndex = 0
            self.currState = self.stateDictionary['positionToCentroidFinding']
        print "im in 2"
        return self.chooseAction(gameState)
    elif(self.currState == self.stateDictionary['positionTSPPathFollow']):
        newAction = self.currentPathTSP[self.positionTSPIndex]
        self.positionTSPIndex = self.positionTSPIndex + 1
        
        succ = self.getSuccessor(gameState, newAction)
        nextPos = succ.getAgentPosition(self.index)
        oldFood = self.getFood(gameState)
        col = oldFood[int(nextPos[0])]
        isFoodHere = col[int(nextPos[1])]
        if(isFoodHere == True):
            self.currState = self.stateDictionary['positionTSPPathFinding']
            return self.chooseAction(gameState)
        if(self.positionTSPIndex == len(self.currentPathTSP)):
            self.positionTSPIndex = 0
            self.currState = self.stateDictionary['positionToCentroidFinding']
        return newAction
    elif(self.currState == self.stateDictionary['positionToCentroidFinding']):
        centroidList = self.getKMeansCentroids(gameState)
        minDistance = 100000
        centroidToTarget = (0,0)
        for centroid in centroidList:
            if(minDistance > self.getMazeDistance(curPos, centroid)):
                minDistance = self.getMazeDistance(curPos, centroid)
                centroidToTarget = centroid
        centroidList.append(centroidToTarget)
        self.currentPathFromPositionToCentroid = self.findCentroidPath(gameState, random.choice(centroidList),2)
        if(self.currentPathFromPositionToCentroid == None):
            self.positionToCentroidListIndex = 0
            self.currState = self.stateDictionary['positionTSPPathFinding']
            return random.choice(gameState.getLegalActions(self.index))                
        #if near centroid, avoid thrashing by going to nearest food directly
        if(len(self.currentPathFromPositionToCentroid) == 0):
            closestFood = self.findClosestFood(gameState)
            self.currentPathFromPositionToCentroid = self.findCentroidPath(gameState, closestFood, 1)
            if(self.currentPathFromPositionToCentroid == None):
                self.positionToCentroidListIndex = 0
                self.currState = self.stateDictionary['positionTSPPathFinding']
                return random.choice(gameState.getLegalActions(self.index))
        self.positionToCentroidListIndex = 0
        self.currState = self.stateDictionary['positionToCentroidFollow']
    elif(self.currState == self.stateDictionary['positionToCentroidFollow']):
        newAction = self.currentPathFromPositionToCentroid[self.positionToCentroidListIndex]
        self.positionToCentroidListIndex = self.positionToCentroidListIndex + 1
        if(self.positionToCentroidListIndex == len(self.currentPathFromPositionToCentroid)):
            self.positionToCentroidListIndex = 0
            self.currState = self.stateDictionary['positionTSPPathFinding']
        return newAction 
    
    """
    enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]
    invaders = [a for a in enemies if a.getPosition() != None]
    minGhostDist = 1000
    if(gameState.getAgentState(self.index).isPacman):
        for a in invaders:
            dist = self.getMazeDistance(curPos, a.getPosition())
            if (dist < minGhostDist):
                minGhostDist = dist
    if(minGhostDist < 2):
        # visitedPos = []
        # if not (self.getPreviousObservation() == None): 
            # visitedPos = [(self.getPreviousObservation()).getAgentPosition(self.index)]
        # dummyValue, actionTaken = self.maxiMax(gameState, self.index, 0, visitedPos, Directions.STOP)
        myState = gameState.getAgentState(self.index)
        agentList = [self.index]
        for i in self.getOpponents(gameState):
            a = gameState.getAgentState(i)
            if a.getPosition() != None:
                agentList.append(i)
        start = time.time()
        dummyValue, actionTaken = self.miniMax(gameState, 0, 0, agentList)
        print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)
        self.pathFromSpawnToCentroidFound = False
        self.pathFromPositionToCentroidFound = False
        self.pathFromPositionTSPFound = False
        return actionTaken
            
    #we are at the spawn point
    if(self.spawnPosition == gameState.getAgentState(self.index).getPosition()):
        # mark the centroid path to be recalculated
        self.pathFromSpawnToCentroidFound = False
    
    if(self.pathFromSpawnToCentroidFound == False):
        # set the new path from spawn to centroid
        centroidToTarget = random.choice(self.getKMeansCentroids(gameState))
        self.currentPathFromSpawnToCentroid = self.findCentroidPath(gameState, centroidToTarget,2)
        if(self.currentPathFromSpawnToCentroid == None):
            self.currentPathFromSpawnToCentroid = self.findCentroidPath(gameState, centroidToTarget,5)
            if(self.currentPathFromSpawnToCentroid == None):
                closestFood = self.findClosestFood(gameState)
                self.currentPathFromPositionToCentroid = self.findCentroidPath(gameState, closestFood, 1)
        self.spawnToCentroidListIndex = 0
        self.usingSpawnToCentroid = True
        self.pathFromSpawnToCentroidFound = True
        
    if(self.usingSpawnToCentroid == True):
        newAction = self.currentPathFromSpawnToCentroid[self.spawnToCentroidListIndex]
        self.spawnToCentroidListIndex = self.spawnToCentroidListIndex + 1
        if(self.spawnToCentroidListIndex == len(self.currentPathFromSpawnToCentroid)-2):
            self.spawnToCentroidListIndex = 0
            self.usingSpawnToCentroid = False
            self.pathFromPositionTSPFound = False
        return newAction   
    else:
        if(self.pathFromPositionTSPFound == False):
            self.currentPathTSP = self.travelMatrixGen(gameState, self.radius)
            self.pathFromPositionTSPFound = True
            self.positionTSPIndex = 0
            self.usingTSP = True
            if(len(self.currentPathTSP) == 0):
                self.positionTSPIndex = 0
                self.usingTSP = False
                self.usingPositionToCentroid = True
                self.pathFromPositionToCentroidFound = False
        if(self.usingTSP == True):
            newAction = self.currentPathTSP[self.positionTSPIndex]
            self.positionTSPIndex = self.positionTSPIndex + 1
            
            succ = self.getSuccessor(gameState, newAction)
            lastPos = succ.getAgentPosition(self.index)
            oldFood = self.getFood(gameState)
            col = oldFood[int(lastPos[0])]
            isFoodHere = col[int(lastPos[1])]
            if(isFoodHere == True):
                self.positionTSPIndex = 0
                self.currentPathTSP = self.travelMatrixGen(succ, self.radius)
            if(self.positionTSPIndex == len(self.currentPathTSP)):
                self.positionTSPIndex = 0
                self.usingTSP = False
                self.usingPositionToCentroid = True
                self.pathFromPositionToCentroidFound = False
            return newAction
        else:
            if(self.pathFromPositionToCentroidFound == False):
                centroidList = self.getKMeansCentroids(gameState)
                minDistance = 100000
                centroidToTarget = (0,0)
                for centroid in centroidList:
                    if(minDistance > self.getMazeDistance(curPos, centroid)):
                        minDistance = self.getMazeDistance(curPos, centroid)
                        centroidToTarget = centroid
                centroidList.append(centroidToTarget)
                centroidToVisit = random.choice(centroidList)
                print centroidToVisit
                self.currentPathFromPositionToCentroid = self.findCentroidPath(gameState, centroidToVisit,2)
                if(self.currentPathFromPositionToCentroid == None):
                    self.positionToCentroidListIndex = 0
                    self.usingPositionToCentroid = False
                    self.pathFromPositionTSPFound = False
                    self.usingTSP = True
                    return random.choice(gameState.getLegalActions(self.index))                
                #if near centroid, avoid thrashing by going to nearest food directly
                if(len(self.currentPathFromPositionToCentroid) == 0):
                    closestFood = self.findClosestFood(gameState)
                    self.currentPathFromPositionToCentroid = self.findCentroidPath(gameState, closestFood, 1)
                    if(self.currentPathFromPositionToCentroid == None):
                        self.positionToCentroidListIndex = 0
                        self.usingPositionToCentroid = False
                        self.pathFromPositionTSPFound = False
                        self.usingTSP = True
                        return random.choice(gameState.getLegalActions(self.index))
                self.positionToCentroidListIndex = 0
                self.usingPositionToCentroid = True
                self.pathFromPositionToCentroidFound = True
            if(self.usingPositionToCentroid == True):
                newAction = self.currentPathFromPositionToCentroid[self.positionToCentroidListIndex]
                self.positionToCentroidListIndex = self.positionToCentroidListIndex + 1
                if(self.positionToCentroidListIndex == len(self.currentPathFromPositionToCentroid)):
                    self.positionToCentroidListIndex = 0
                    self.usingPositionToCentroid = False
                    self.pathFromPositionTSPFound = False
                    self.usingTSP = True
                return newAction 
    """
    
    
    # You can profile your evaluation time by uncommenting these lines
    # start = time.time()
    values = [self.evaluate(gameState, a) for a in actions]
    # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)
    maxValue = max(values)
    bestActions = [a for a, v in zip(actions, values) if v == maxValue]
    actionTaken = random.choice(bestActions)
    visitedPos = []
    if not (self.getPreviousObservation() == None): 
      visitedPos = [(self.getPreviousObservation()).getAgentPosition(self.index)]
    dummyValue, actionTaken = self.maxiMax(gameState, self.index, 0, visitedPos, Directions.STOP)
        
    succ = self.getSuccessor(gameState, actionTaken)
    lastPos = succ.getAgentPosition(self.index)
    
    if((curPos,actionTaken) in self.visitedPosChooseAction):
        if(random.random() > 0.5):
            actionTaken = random.choice(gameState.getLegalActions(self.index))
    
    oldFood = self.getFood(gameState)
    col = oldFood[int(lastPos[0])]
    isFoodHere = col[int(lastPos[1])]
    if(isFoodHere == True):
        self.visitedPosChooseAction = []
        self.centroidChanged = True
            
    self.lastAction = actionTaken
    if(len(self.visitedPosChooseAction) == 5):
        self.visitedPosChooseAction.pop(0)
    succ = self.getSuccessor(gameState, actionTaken)
    lastPos = succ.getAgentPosition(self.index)
    #self.visitedPosChooseAction.append(lastPos,actionTaken)
    self.visitedPosChooseAction.append((curPos, actionTaken))
    return actionTaken

  def evaluationFunction(self, currentGameState):
    curPos = currentGameState.getAgentState(self.index).getPosition()
    
    # Computes distance to invaders we can see
    enemies = [currentGameState.getAgentState(i) for i in self.getOpponents(currentGameState)]
    invaders = [a for a in enemies if a.getPosition() != None]
    #invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]
    ghostWeight = 0
    minGhostDist = 1000
    if(currentGameState.getAgentState(self.index).isPacman):
        for a in invaders:
            dist = self.getMazeDistance(curPos, a.getPosition())
            if (dist < minGhostDist):
                minGhostDist = dist
            ghostWeight = ghostWeight + dist
    
    if (len(invaders) == 0 or not (currentGameState.getAgentState(self.index).isPacman)):
        minGhostDist = 0
    
    #curGhostStates = currentGameState.getGhostStates()
    #curScaredTimes = [ghostState.scaredTimer for ghostState in curGhostStates]
    
    # ghostWeight = 0
    # for ghostState in curGhostStates:
        # ghostPosition = ghostState.getPosition()
        # manhattanDistance = abs(ghostPosition[0] - curPos[0]) + abs(ghostPosition[1] - curPos[1])
        # if(ghostState.scaredTimer > 0):
            # ghostWeight = ghostWeight + 10.0*(ghostState.scaredTimer/manhattanDistance)
        # else:
            # ghostWeight = ghostWeight + manhattanDistance        
    
    oldFood = self.getFood(currentGameState)
    listOfOldFood = []
    sumOfOldFood = 0
    minOfOldFood = -1000
    colIndex = 0
    for col in oldFood:
        rowIndex = 0
        for value in col:
            if value is True:
                listOfOldFood.append((colIndex,rowIndex))
                if (colIndex - curPos[0] == 0) and (rowIndex - curPos[1] == 0):
                    sumOfOldFood = sumOfOldFood - 1;    
                elif(minOfOldFood < 1.0/(abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))):
                    minOfOldFood = 1.0/(abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))                 
                sumOfOldFood = sumOfOldFood + 1
            rowIndex = rowIndex + 1
        colIndex = colIndex + 1 
    
    # distToCentroid = 0
    # opponentFoodCentroid = (int(sumOfOldFoodForCentroid[0]/numOfOldFoodForCentroid), int(sumOfOldFoodForCentroid[1]/numOfOldFoodForCentroid))
    # if((abs(opponentFoodCentroid[0]- curPos[0]) + abs(opponentFoodCentroid[1] - curPos[1])) != 0 and self.reachedCentroid == False):
        # distToCentroid = 1.0/(abs(opponentFoodCentroid[0]- curPos[0]) + abs(opponentFoodCentroid[1] - curPos[1]))
    # else:
        # reachedCentroid = True

    # capsuleBonus = len(currentGameState.getCapsules())
    # minOfInverseCaps = 100000
    # for position in currentGameState.getCapsules():
        # if(minOfInverseCaps < 1.0/(abs(position[0] - curPos[0]) + abs(position[1] - curPos[1]))):
            # minOfInverseCaps = 1.0/(abs(position[0] - curPos[0]) + abs(position[1] - curPos[1]))
  
    
    # Compute distance to the nearest food
    #foodList = self.getFood(currentGameState).asList()
    #if len(foodList) > 0: # This should always be True,  but better safe than sorry
    #  minDistance = min([self.getMazeDistance(curPos, food) for food in foodList])
    #  minOfOldFood = minDistance    
    
    curScore = currentGameState.getScore()
    
    # curisWin = currentGameState.isWin()
    # curisLose = currentGameState.isLose()
    
    # numOfActions = len(currentGameState.getLegalActions(0))
    
    #sum = curScore + ghostWeight - 100*minOfInverseCaps - 1000*capsuleBonus - 10*minOfOldFood
    
    #minToOtherSide =  sqrt(pow(abs(15 - curPos[0]),2) + pow(abs(1 - curPos[1]),2))
    
    #sum = curScore + - 10*minOfOldFood
    #print sum
    
    sum = 400*ghostWeight +700*minGhostDist + 100*minOfOldFood - 1000*sumOfOldFood  + 10000*curScore #+ 10000*distToCentroid
    return sum
  
  def miniMax(self, gameState, agentListIndex, currentDepth, agentList):
    agentIndex = agentList[agentListIndex]
    if agentIndex == self.index:
        if currentDepth == self.minimaxDepth:
            return self.evaluationFunction(gameState), Directions.STOP
    
    newAgentListIndex = (agentListIndex + 1) % len(agentList)
    newAgentNumber = agentList[newAgentListIndex]
    if(newAgentNumber == self.index):
        nextDepth = currentDepth+1
    else:
        nextDepth = currentDepth        
    
    if agentIndex == self.index:
        bestAction = -100000000000000000000
        bestValue = -1000000000000000000000
        for action in gameState.getLegalActions(agentIndex):
            if(action == Directions.STOP):
                continue
            newValue, newAction = self.miniMax(gameState.generateSuccessor(agentIndex, action), newAgentListIndex, nextDepth, agentList) 
            if (newValue > bestValue):
                bestValue = newValue
                bestAction = action
    else:
        bestAction = 100000000000000000000000
        bestValue = 1000000000000000000000000
        for action in gameState.getLegalActions(agentIndex):
            newValue, newAction = self.miniMax(gameState.generateSuccessor(agentIndex, action), newAgentListIndex, nextDepth, agentList) 
            if (newValue < bestValue):
                bestValue = newValue
                bestAction = action
                
    return bestValue, bestAction
  
  def maxiMax(self, gameState, agentIndex, currentDepth, visitedPos, lastAction):
    #if agentIndex == 0:
    curPos = gameState.getAgentState(self.index).getPosition()
    if currentDepth == self.depth:
        # print "Maximax eval leaf function:"
        # print self.evaluationFunction(gameState)
        # print "Maximax eval leaf position:"
        # print gameState.getAgentPosition(agentIndex)
        return self.evaluationFunction(gameState), Directions.STOP
        
    oldFood = self.getFood(gameState)
    temp = oldFood[int(curPos[0])]
    if temp[int(curPos[1])] == True:
        return 100000+self.evaluationFunction(gameState), lastAction
    
    
    #if gameState.isWin() or gameState.isLose():
    #    return self.evaluationFunction(gameState), Directions.STOP
    
    # newAgentNumber = (agentIndex + 1) % gameState.getNumAgents()
    # if(newAgentNumber == 0):
        # nextDepth = currentDepth+1
    # else:
        # nextDepth = currentDepth        
    
    #if agentIndex == 0:
    bestAction = -100000
    bestValue = -100000
    for action in gameState.getLegalActions(agentIndex):
        if(action == Directions.STOP):
            continue
        succ = self.getSuccessor(gameState, action)
        lastPos = succ.getAgentPosition(self.index)
        if (lastPos in visitedPos):
            continue
        newvPos = copy.deepcopy(visitedPos)
        newvPos.append(lastPos)
        
        newValue, newAction = self.maxiMax(succ, self.index, currentDepth+1, newvPos, action) 
        if (self.gamma*newValue > bestValue):
            bestValue = self.gamma*newValue
            bestAction = action
    # else:
        # bestAction = 100000
        # bestValue = 100000
        # for action in gameState.getLegalActions(agentIndex):
            # newValue, newAction = self.miniMax(gameState.generateSuccessor(agentIndex, action), newAgentNumber, nextDepth) 
            # if (newValue < bestValue):
                # bestValue = newValue
                # bestAction = action
    #print bestValue, bestAction   
    if(bestAction == -100000):
        bestAction = random.choice(gameState.getLegalActions(agentIndex))
    
    return bestValue, bestAction
    
  def findClosestFood(self, gameState):
    curPos = gameState.getAgentState(self.index).getPosition()
    yummyFood = self.getFood(gameState)
    
    minOfYummyFood = 100000
    closestYummyFood = (0,0)
    colIndex = 0
    for col in yummyFood:
        rowIndex = 0
        for value in col:
            if value is True:
                if(minOfYummyFood > (abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))):
                    minOfYummyFood = (abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))  
                    closestYummyFood = (colIndex, rowIndex)
            rowIndex = rowIndex + 1
        colIndex = colIndex + 1
        
    return closestYummyFood
    
  def getKMeansCentroids(self, gameState):
    yummyFood = self.getFood(gameState)
    
    listOfYummyFood = []
    colIndex = 0
    for col in yummyFood:
        rowIndex = 0
        for value in col:
            if value is True:
                listOfYummyFood.append((colIndex,rowIndex))
            rowIndex = rowIndex + 1
        colIndex = colIndex + 1
        
    listOfCentroids = []
    if not(self.red):
        listOfCentroids.append((int(yummyFood.width/2), int(yummyFood.height/2)))
        listOfCentroids.append((0, int(yummyFood.height)))
        listOfCentroids.append((0, 0))
    else:
        listOfCentroids.append((int(yummyFood.width/2), int(yummyFood.height/2)))
        listOfCentroids.append((int(yummyFood.width), int(yummyFood.height)))
        listOfCentroids.append((int(yummyFood.width), 0))
    
    numOfPointsCentroid = [0,0,0]
    sumOfPointsCentroid = [(0,0), (0,0), (0,0)]
    for i in range(3):   
        numOfPointsCentroid = [0,0,0]
        sumOfPointsCentroid = [(0,0), (0,0), (0,0)]
        
        for item in listOfYummyFood:
            minDistance = 100000
            minCentroid = 0
            for idx, centroid in enumerate(listOfCentroids):
                if(self.getMazeDistance(centroid, item) < minDistance):
                    minCentroid = idx
                    minDistance = self.getMazeDistance(centroid, item)
            numOfPointsCentroid[minCentroid] = numOfPointsCentroid[minCentroid] + 1
            sumOfPointsCentroid[minCentroid] = ((sumOfPointsCentroid[minCentroid])[0] + item[0],(sumOfPointsCentroid[minCentroid])[1] + item[1])
        for idx, centroid in enumerate(listOfCentroids):
            if not (numOfPointsCentroid[idx] == 0):
                listOfCentroids[idx] = (int((sumOfPointsCentroid[idx])[0]/numOfPointsCentroid[idx]),int((sumOfPointsCentroid[idx])[1]/numOfPointsCentroid[idx]))
    
    newList = []
    for idx, a in enumerate(listOfCentroids):
        if not numOfPointsCentroid[idx] == 0:
            newList.append(a)
            
    return newList
    
  def findExitPath(self, gameState):
    curPos = gameState.getAgentState(self.index).getPosition()
    "Search the node that has the lowest combined cost and heuristic first."
    from util import PriorityQueue
    import copy 
    ourFood = self.getFoodYouAreDefending(gameState)
    yummyFood = self.getFood(gameState)
    
    minOfYummyFood = 100000
    closestYummyFood = (0,0)
    colIndex = 0
    for col in yummyFood:
        rowIndex = 0
        for value in col:
            if value is True:
                if(minOfYummyFood > (abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))):
                    minOfYummyFood = (abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))  
                    closestYummyFood = (colIndex, rowIndex)
            rowIndex = rowIndex + 1
        colIndex = colIndex + 1
    
    closestFood = (0,0)
    minOfOurFood = +100000
    foodDistance = 0
    colIndex = 0
    for col in ourFood:
        rowIndex = 0
        for value in col:
            if value is True:
                # print 1.0/(abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))
                if(minOfOurFood > (abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))):
                    minOfOurFood = (abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))
                    foodDistance = self.getMazeDistance((colIndex,rowIndex), closestYummyFood)                    
                    closestFood = (colIndex,rowIndex)
                elif(minOfOurFood == (abs(colIndex - curPos[0]) + abs(rowIndex - curPos[1]))):
                    if(foodDistance > self.getMazeDistance((colIndex,rowIndex), closestYummyFood)):
                        foodDistance = self.getMazeDistance((colIndex,rowIndex), closestYummyFood)
                        closestFood = (colIndex, rowIndex)
            rowIndex = rowIndex + 1
        colIndex = colIndex + 1
    
    statePriorityQueue=PriorityQueue()
    cloud=[]
    manDist=0+self.getMazeDistance(curPos, closestFood)
    statePriorityQueue.push((gameState,[],0),manDist)
    while not statePriorityQueue.isEmpty():
        currState=statePriorityQueue.pop()
        if currState[0].getAgentState(self.index).getPosition() in cloud:
            continue;
        
        cloud.append(currState[0].getAgentState(self.index).getPosition())
        if self.getMazeDistance(currState[0].getAgentState(self.index).getPosition(), closestFood) == 0:
            return currState[1]
        for action in currState[0].getLegalActions(self.index):
           newState = self.getSuccessor(currState[0], action)
           if newState.getAgentState(self.index).getPosition() not in cloud:
                w=copy.deepcopy(currState[1])
                w.append(action)
                newCost=currState[2]+self.getMazeDistance(currState[0].getAgentState(self.index).getPosition(), newState.getAgentState(self.index).getPosition())+self.getMazeDistance(newState.getAgentState(self.index).getPosition(), closestFood)
                statePriorityQueue.push((newState,w,currState[2]+self.getMazeDistance(currState[0].getAgentState(self.index).getPosition(), newState.getAgentState(self.index).getPosition())),newCost)
   
  def findOpponentFoodCentroid(self, gameState):
    oldFood = self.getFood(gameState)
    listOfOldFood = []
    opponentFoodCentroid = (0,0)
    sumOfOldFoodForCentroid = (0,0)
    numOfOldFoodForCentroid = 0
    colIndex = 0
    for col in oldFood:
        rowIndex = 0
        for value in col:
            if value is True:
                listOfOldFood.append((colIndex,rowIndex))
                numOfOldFoodForCentroid = numOfOldFoodForCentroid + 1
                sumOfOldFoodForCentroid = (sumOfOldFoodForCentroid[0] + colIndex, sumOfOldFoodForCentroid[1] + rowIndex)
            rowIndex = rowIndex + 1
        colIndex = colIndex + 1 
    
    return (int(sumOfOldFoodForCentroid[0]/numOfOldFoodForCentroid), int(sumOfOldFoodForCentroid[1]/numOfOldFoodForCentroid))
   
  def findCentroidPath(self, gameState, opponentFoodCentroid, tolerance):
    curPos = gameState.getAgentState(self.index).getPosition()
    "Search the node that has the lowest combined cost and heuristic first."
    from util import PriorityQueue
    import copy 
            
    statePriorityQueue=PriorityQueue()
    cloud=[]
    manDist=0+self.getMazeDistance(curPos, opponentFoodCentroid)
    statePriorityQueue.push((gameState,[],0),manDist)
    while not statePriorityQueue.isEmpty():
        currState=statePriorityQueue.pop()
        if currState[0].getAgentState(self.index).getPosition() in cloud:
            continue;
        
        cloud.append(currState[0].getAgentState(self.index).getPosition())
        if self.getMazeDistance(currState[0].getAgentState(self.index).getPosition(), opponentFoodCentroid) < tolerance:
            return currState[1]
        for action in currState[0].getLegalActions(self.index):
           newState = self.getSuccessor(currState[0], action)
           if newState.getAgentState(self.index).getPosition() not in cloud:
                w=copy.deepcopy(currState[1])
                w.append(action)
                newCost=currState[2]+self.getMazeDistance(currState[0].getAgentState(self.index).getPosition(), newState.getAgentState(self.index).getPosition())+self.getMazeDistance(newState.getAgentState(self.index).getPosition(), opponentFoodCentroid)
                statePriorityQueue.push((newState,w,currState[2]+self.getMazeDistance(currState[0].getAgentState(self.index).getPosition(), newState.getAgentState(self.index).getPosition())),newCost)
  
  def getSuccessor(self, gameState, action):
    """
    Finds the next successor which is a grid position (location tuple).
    """
    successor = gameState.generateSuccessor(self.index, action)
    pos = successor.getAgentState(self.index).getPosition()
    if pos != nearestPoint(pos):
      # Only half a grid position was covered
      return successor.generateSuccessor(self.index, action)
    else:
      return successor
   
  def travelMatrixGen(self, gameState, radius):
    curPos = gameState.getAgentState(self.index).getPosition()

    yummyFood = self.getFood(gameState)
    
    colBoundLower = curPos[0] - radius
    if(colBoundLower < 0):
        colBoundLower = 0
    colBoundUpper = curPos[0] + radius
    if(colBoundUpper > yummyFood.width):
        colBoundUpper = yummyFood.width - 1
    rowBoundLower = curPos[1] - radius
    if(rowBoundLower < 0):
        rowBoundLower = 0
    rowBoundUpper = curPos[1] + radius
    if(rowBoundUpper > yummyFood.height):
        rowBoundUpper = yummyFood.height - 1        
    
    listOfFood = []
    listUpperBound = 5
    for col in range(int(colBoundLower), int(colBoundUpper)):
        tmpCol = yummyFood[col]
        for row in range(int(rowBoundLower), int(rowBoundUpper)):
            if(tmpCol[row] == True):
                if(listUpperBound > len(listOfFood)):
                    listOfFood.append((col,row))

    #for now this is naive over all up to 5 factorial positions
    permutationsOfFoodPositions = itertools.permutations(listOfFood)
    pathDistance = 100000
    minPath = []
    for permutation in permutationsOfFoodPositions:
        start = curPos
        distance = 0
        for point in permutation:
            distance = distance + self.getMazeDistance(start, point)
            start = point
        if(distance < pathDistance):
            pathDistance = distance
            minPath = permutation
    
    start = curPos
    succ = gameState
    listOfActions = []
    for item in minPath:
        newListOfActions = self.findCentroidPath(succ,item,1)
        if(newListOfActions == None):
            newListOfActions = self.findCentroidPath(succ,item,2)
        if(newListOfActions == None):
            newListOfActions = self.findCentroidPath(succ,item,5)
        for action in newListOfActions:
            listOfActions.append(action)        
        start = item
        for action in newListOfActions:
            succ = self.getSuccessor(succ, action)

    return listOfActions
   
  def evaluate(self, gameState, action):
    """
    Computes a linear combination of features and feature weights
    """
    features = self.getFeatures(gameState, action)
    weights = self.getWeights(gameState, action)
    return features * weights

  def getFeatures(self, gameState, action):
    features = util.Counter()
    successor = self.getSuccessor(gameState, action)
    features['successorScore'] = self.getScore(successor)

    # Compute distance to the nearest food
    foodList = self.getFood(successor).asList()
    if len(foodList) > 0: # This should always be True,  but better safe than sorry
      myPos = successor.getAgentState(self.index).getPosition()
      minDistance = min([self.getMazeDistance(myPos, food) for food in foodList])
      features['distanceToFood'] = minDistance
    return features

  def getWeights(self, gameState, action):
    return {'successorScore': 100, 'distanceToFood': -100}
  
#def getFeatures(self, gameState, action):
#    """
#    Returns a counter of features for the state
#    """
#    features = util.Counter()
#    successor = self.getSuccessor(gameState, action)
#    features['successorScore'] = self.getScore(successor)
#    return features

#  def getWeights(self, gameState, action):
#    """
#    Normally, weights do not depend on the gamestate.  They can be either
#    a counter or a dictionary.
#    """
#    return {'successorScore': 1.0}
