# multiAgents.py
# --------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html

from game import Agent, Directions
from util import manhattanDistance
import random
import util


class ReflexAgent(Agent):
  """
    A reflex agent chooses an action at each choice point by examining
    its alternatives via a state evaluation function.

    The code below is provided as a guide.  You are welcome to change
    it in any way you see fit, so long as you don't touch our method
    headers.
  """


  def getAction(self, gameState):
    """
    You do not need to change this method, but you're welcome to.

    getAction chooses among the best options according to the evaluation function.

    Just like in the previous project, getAction takes a GameState and returns
    some Directions.X for some X in the set {North, South, West, East, Stop}
    """
    # Collect legal moves and successor states
    legalMoves = gameState.getLegalActions()
    
    # Choose one of the best actions
    scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
    bestScore = max(scores)
    bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
    chosenIndex = random.choice(bestIndices) # Pick randomly among the best

    "Add more of your code here if you want to"

    return legalMoves[chosenIndex]

  def evaluationFunction(self, currentGameState, action):
    """
    Design a better evaluation function here.

    The evaluation function takes in the current and proposed successor
    GameStates (pacman.py) and returns a number, where higher numbers are better.

    The code below extracts some useful information from the state, like the
    remaining food (newFood) and Pacman position after moving (newPos).
    newScaredTimes holds the number of moves that each ghost will remain
    scared because of Pacman having eaten a power pellet.

    Print out these variables to see what you're getting, then combine them
    to create a masterful evaluation function.
    """
    # Useful information you can extract from a GameState (pacman.py)
    successorGameState = currentGameState.generatePacmanSuccessor(action)
    newPos = successorGameState.getPacmanPosition()
    newFood = successorGameState.getFood()
    newGhostStates = successorGameState.getGhostStates()
    newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]

    "*** YOUR CODE HERE ***"
    
    oldPos = currentGameState.getPacmanPosition()
    oldFoodList = currentGameState.getFood().asList()
    oldCapsules = currentGameState.getCapsules()
    
    score = 0
    scoreFood = 0
    scoreCapsule=0
    maxDist = (newFood.width + newFood.height)
    
    lisCapsulesDistPos = []
    for oldCapsulesPos in oldCapsules:
        lisCapsulesDistPos.append((manhattanDistance(oldPos, oldCapsulesPos),oldCapsulesPos))
    if len(lisCapsulesDistPos) > 0:
        minDistCapsule=min(lisCapsulesDistPos)
        scoreCapsule += maxDist-manhattanDistance(newPos, minDistCapsule[1])
    
    listFoodDistPos = []
    for oldFoodPos in oldFoodList:
        listFoodDistPos.append((manhattanDistance(oldPos, oldFoodPos),oldFoodPos))
    if len(listFoodDistPos) > 0:
        minDistFood=min(listFoodDistPos)
        scoreFood += maxDist-manhattanDistance(newPos, minDistFood[1])
    
    scaredPacman=False
    for newGhost in newGhostStates:
        newGhostPos = newGhost.configuration.pos
        if newGhost.scaredTimer <= 40 and manhattanDistance(newPos, newGhostPos)==0:
            scaredPacman=True
        
    if scaredPacman:
        score = -1000
    elif action == Directions.STOP:
        score = 0
    elif scoreFood < scoreCapsule+2:
        score = scoreCapsule
    else:
        score = scoreFood

    return int(score)

def scoreEvaluationFunction(currentGameState):
  """
    This default evaluation function just returns the score of the state.
    The score is the same one displayed in the Pacman GUI.

    This evaluation function is meant for use with adversarial search agents
    (not reflex agents).
  """
  return currentGameState.getScore()

class MultiAgentSearchAgent(Agent):
  """
    This class provides some common elements to all of your
    multi-agent searchers.  Any methods defined here will be available
    to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.

    You *do not* need to make any changes here, but you can if you want to
    add functionality to all your adversarial search agents.  Please do not
    remove anything, however.

    Note: this is an abstract class: one that should not be instantiated.  It's
    only partially specified, and designed to be extended.  Agent (game.py)
    is another abstract class.
  """

  def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
    self.index = 0 # Pacman is always agent index 0
    self.evaluationFunction = util.lookup(evalFn, globals())
    self.depth = int(depth)

class MinimaxAgent(MultiAgentSearchAgent):
  """
    Your minimax agent (question 2)
  """

  def getAction(self, gameState):
    """
      Returns the minimax action from the current gameState using self.depth
      and self.evaluationFunction.

      Here are some method calls that might be useful when implementing minimax.

      gameState.getLegalActions(agentIndex):
        Returns a list of legal actions for an agent
        agentIndex=0 means Pacman, ghosts are >= 1

      Directions.STOP:
        The stop direction, which is always legal

      gameState.generateSuccessor(agentIndex, action):
        Returns the successor game state after an agent takes an action

      gameState.getNumAgents():
        Returns the total number of agents in the game
    """
    "*** YOUR CODE HERE ***"
    numAgents = gameState.getNumAgents() # get the number of agents in the game
    legalMoves = gameState.getLegalActions(self.index) #get the legal actions available fot the current agent
    #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP)
    successorGameStatesAction = [(gameState.generateSuccessor(self.index, nextMove),nextMove) for nextMove in legalMoves] #get the firts nodes for the root
    valueAction = [self.minimaxActionsValue(successorGameState, succMove, self.index, ((self.depth*numAgents)-1), numAgents) for successorGameState,succMove in successorGameStatesAction] #call minimax for each one of the firsts nodes of the root
    bestAction = legalMoves[valueAction.index(max(valueAction))] #get the best move for the minimax function
    return bestAction #return the best move based on the minimax value
    
  def minimaxActionsValue(self, currentGameState, nextMove, index, depth, numAgents):
    index += 1 
    if index >= numAgents: index = 0 #reset the index every time it reach the last agent
    legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
    #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP)
    successorGameStatesAction = [(currentGameState.generateSuccessor(index, nextMove),nextMove) for nextMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
    if depth < 1 or currentGameState.isWin() or currentGameState.isLose(): #if it's the end of the game, or if reach the end of the limit depth (it's the terminal node), get the evaluationFunction and the action
        valueAction = self.evaluationFunction(currentGameState)
    elif index == 0: #else, get the max value for pacman, or the min value for the ghosts
        valueAction = float("-infinity")
        for successorGameState,succMove in successorGameStatesAction:
            valueAction = max(self.minimaxActionsValue(successorGameState, succMove, index, depth-1, numAgents), valueAction)
    else:
        valueAction = float("infinity")
        for successorGameState,succMove in successorGameStatesAction:
            valueAction = min(self.minimaxActionsValue(successorGameState, succMove, index, depth-1, numAgents), valueAction)
    return valueAction  #and return the value with its move
            
class AlphaBetaAgent(MultiAgentSearchAgent):
  """
    Your minimax agent with alpha-beta pruning (question 3)
  """

  def getAction(self, gameState):
    """
      Returns the minimax action using self.depth and self.evaluationFunction
    """
    "*** YOUR CODE HERE ***"
    numAgents = gameState.getNumAgents() # get the number of agents in the game
    legalMoves = gameState.getLegalActions(self.index) #get the legal actions available fot the current agent
    if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP) #remove the stop action from the lega moves
    currentGameState = gameState #get the gamestate
    nextMove = Directions.STOP #get the first move like the stop action of the pacman at the start of the game
    index = self.index #get the index of pacman =0
    depth = (self.depth*numAgents) #get the total depth like the depth of the search * the number of agents on the game (each agent will be a layer
    alpha = (float("-infinity"),Directions.STOP) #init alpha 
    beta = (float("infinity"),Directions.STOP) #init beta
    valueAction = self.alphaBetaActionsValue(currentGameState, nextMove, index, depth, numAgents, alpha, beta) #call the alphabeta method
    return valueAction[1] #return the best move
    
  def alphaBetaActionsValue(self, currentGameState, nextMove, index, depth, numAgents, alphaValueAction, betaValueAction):
    if index >= numAgents: index = 0 #reset the index every time it reach the last agent
    if depth < 1 or currentGameState.isWin() or currentGameState.isLose(): #if it's the end of the game, or if reach the end of the limit depth (it's the terminal node), get the evaluationFunction and the action
        valueAction = (self.evaluationFunction(currentGameState),nextMove)
        return valueAction 
    elif index == 0: #else, get the max value for pacman, or the min value for the ghosts
        valueAction = self.maxAlphaBetaValue(currentGameState, nextMove, index, depth, numAgents, alphaValueAction, betaValueAction)
    else:
        valueAction = self.minAlphaBetaValue(currentGameState, nextMove, index, depth, numAgents, alphaValueAction, betaValueAction)
    return valueAction  #and return the value with its move
    
  def maxAlphaBetaValue(self, currentGameState, nextMove, index, depth, numAgents, alphaValueAction, betaValueAction):
    valueAction = (float("-infinity"),Directions.STOP) #init the value with action default
    legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
    if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP) #remove the stop action from the lega moves
    successorGameStatesAction = [(currentGameState.generateSuccessor(index, proxMove),proxMove) for proxMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
    for successorGameState,succMove in successorGameStatesAction:
        maxValue = self.alphaBetaActionsValue(successorGameState, succMove, index+1, depth-1, numAgents, alphaValueAction, betaValueAction)
        maxValueAction=(maxValue[0],succMove)
        valueAction = max(valueAction, maxValueAction)
        if (valueAction[0] >= betaValueAction[0]): #if current value is bigger or equal the min past values (betavalues), prune it (dont expand the next nodes), else, set the max value for alphavalue
            return valueAction
        else:
            alphaValueAction = max(valueAction, alphaValueAction)
    return valueAction
  
  def minAlphaBetaValue(self, currentGameState, nextMove, index, depth, numAgents, alphaValueAction, betaValueAction):
    valueAction = (float("infinity"),Directions.STOP) #init the value with action default
    legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
    if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP) #remove the stop action from the lega moves
    successorGameStatesAction = [(currentGameState.generateSuccessor(index, proxMove),proxMove) for proxMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
    for successorGameState,succMove in successorGameStatesAction:
        minValue = self.alphaBetaActionsValue(successorGameState, succMove, index+1, depth-1, numAgents, alphaValueAction, betaValueAction)
        minValueAction=(minValue[0],succMove)
        valueAction = min(valueAction, minValueAction)
        if (valueAction[0] <= alphaValueAction[0]): #if current value is lower or equal the max past values (alphavalues), prune it (dont expand the next nodes), else, set the min value for betavalue
            return valueAction
        else:
            betaValueAction = min(valueAction, betaValueAction)
    return valueAction

class ExpectimaxAgent(MultiAgentSearchAgent):
  """
    Your expectimax agent (question 4)
  """

  def getAction(self, gameState):
    """
      Returns the expectimax action using self.depth and self.evaluationFunction

      All ghosts should be modeled as choosing uniformly at random from their
      legal moves.
    """
    "*** YOUR CODE HERE ***"
    numAgents = gameState.getNumAgents() # get the number of agents in the game
    legalMoves = gameState.getLegalActions(self.index) #get the legal actions available fot the current agent
    #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP)
    successorGameStatesAction = [(gameState.generateSuccessor(self.index, nextMove),nextMove) for nextMove in legalMoves] #get the firts nodes for the root
    valueAction = [self.expectimaxActionsValue(successorGameState, succMove, self.index, ((self.depth*numAgents)-1), numAgents) for successorGameState,succMove in successorGameStatesAction] #call expctimax for each one of the firsts nodes of the root
    bestAction = legalMoves[valueAction.index(max(valueAction))] #get the best move for the expctimax function
    return bestAction #return the best move based on the expctimax value
    
  def expectimaxActionsValue(self, currentGameState, nextMove, index, depth, numAgents):
    index += 1 
    if index >= numAgents: index = 0 #reset the index every time it reach the last agent
    legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
    #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP)
    successorGameStatesAction = [(currentGameState.generateSuccessor(index, nextMove),nextMove) for nextMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
    if depth < 1 or currentGameState.isWin() or currentGameState.isLose(): #if it's the end of the game, or if reach the end of the limit depth (it's the terminal node), get the evaluationFunction and the action
        valueAction = self.evaluationFunction(currentGameState)
    elif index == 0: #else, get the max value for pacman, or the expectation value for the ghosts
        valueAction = max([self.expectimaxActionsValue(successorGameState, succMove, index, depth-1, numAgents) for successorGameState,succMove in successorGameStatesAction])
    else:
        valueActions = [self.expectimaxActionsValue(successorGameState, succMove, index, depth-1, numAgents) for successorGameState,succMove in successorGameStatesAction]
        valueAction = sum(valueActions)/len(valueActions)
    return valueAction  #and return the value with its move
    

def betterEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).
    
      DESCRIPTION: <write something here so we know what you did>
      
      get the distances for every ghost, every food and every capsule
      sum the inverse of all this distances (food and capsules) (1/distance) 
      and subtract from the inverse of ghost distances, the sum with the game score
      if pacman get scared by one ghost ( if the ghost scared time is low, and distance from this ghost is short)
      return a very low score
      
    """
    "*** YOUR CODE HERE ***"
    
    currPos = currentGameState.getPacmanPosition()
    #currFood = currentGameState.getFood()
    currFoodList = currentGameState.getFood().asList()
    currCapsules = currentGameState.getCapsules()
    currGhostStates = currentGameState.getGhostStates()
    
    gameScore = currentGameState.getScore()
    score = gameScore
    scoreFood = float(0)
    scoreCapsule= float(0)
    scoreGhost = float(0)
    #maxDist = (currFood.width + currFood.height)
    #maxDist = (currFood.width * currFood.height)
    
    lisCapsulesDistPos = []
    for currCapsulesPos in currCapsules:
        #capsulesDist = mazeDistance(currPos, currCapsulesPos, currentGameState)
        capsulesDist = manhattanDistance(currPos, currCapsulesPos)
        #lisCapsulesDistPos.append(maxDist-capsulesDist)
        if capsulesDist > 0:
            lisCapsulesDistPos.append(float(1)/capsulesDist)
        else:
            lisCapsulesDistPos.append(float(2))
    if len(lisCapsulesDistPos) > 0:
        scoreCapsule += max(lisCapsulesDistPos)
        scoreCapsule += sum(lisCapsulesDistPos)
    
    listFoodDistPos = []
    for currFoodPos in currFoodList:
        #foodDist = mazeDistance(currPos, currFoodPos, currentGameState)
        foodDist = manhattanDistance(currPos, currFoodPos)
        #listFoodDistPos.append(maxDist-foodDist)
        if foodDist > 0:
            listFoodDistPos.append(float(1)/foodDist)
        else:
            listFoodDistPos.append(float(2))
    if len(listFoodDistPos) > 0:
        scoreFood += max(listFoodDistPos)
        scoreFood += sum(listFoodDistPos)
    
    scaredPacman=False
    scaredGhost=False
    scaredGhostScore=0
    scaredGhostScoreList=[]
    listGhostDistPos = []
    for currGhost in currGhostStates:
        currGhostPos = currGhost.configuration.pos
        #distGhost = mazeDistance(currPos, currGhostPos, currentGameState)
        distGhost = manhattanDistance(currPos, currGhostPos)
        #listGhostDistPos.append(distGhost)
        if distGhost > 0:
            listGhostDistPos.append(float(1)/distGhost)
        else:
            listGhostDistPos.append(float(2))
        if currGhost.scaredTimer <= 40 and distGhost==0:
        #if currGhost.scaredTimer <= 5 and distGhost<=5:
        #if currGhost.scaredTimer <= distGhost:
            scaredPacman=True
        if currGhost.scaredTimer >= 5 and distGhost<=20:
            scaredGhost=True
            if distGhost > 0:
                scaredGhostScoreList.append(float(1)/distGhost)
            else:
                scaredGhostScoreList.append(float(2))
    if len(scaredGhostScoreList) > 0:
        scaredGhostScore +=  max(scaredGhostScoreList)
        scaredGhostScore +=  sum(scaredGhostScoreList)
            
    if len(listGhostDistPos) > 0:
        scoreGhost += min(listGhostDistPos)
        scoreGhost += sum(listGhostDistPos)
        
    if scaredPacman:
        score += -100000
    elif scaredGhost:
        score += (scaredGhostScore*10)+(scoreCapsule*6)+(scoreFood*2)-(scoreGhost*2)
    else:
        score += (scoreCapsule*6)+(scoreFood*2)-(scoreGhost*2)
    return score

def betterMazeEvaluationFunction(currentGameState):
    """
      Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
      evaluation function (question 5).
    
      DESCRIPTION: <write something here so we know what you did>
      
      get the distances for every ghost, every food and every capsule
      sum the inverse of all this distances (food and capsules) (1/distance) 
      and subtract from the inverse of ghost distances, the sum with the game score
      if pacman get scared by one ghost ( if the ghost scared time is low, and distance from this ghost is short)
      return a very low score
      
      Trying to use MazDistance and not ManhattanDistance
      
    """
    "*** YOUR CODE HERE ***"
    currPos = currentGameState.getPacmanPosition()
    #currFood = currentGameState.getFood()
    currFoodList = currentGameState.getFood().asList()
    currCapsules = currentGameState.getCapsules()
    currGhostStates = currentGameState.getGhostStates()
    
    gameScore = currentGameState.getScore()
    score = gameScore
    #score = float(0)
    scoreFood = float(0)
    scoreCapsule= float(0)
    scoreGhost = float(0)
    #maxDist = (currFood.width + currFood.height)
    #maxDist = (currFood.width * currFood.height)
    
    #print currentGameState._closedDistFood
    
    lisCapsulesDistPos = []
    noCapsules = False
    closestCapsule = float(0)
    if not currPos in currCapsules:
        noCapsules = True
    for currCapsulesPos in currCapsules:
        #capsulesDist = mazeDistance(currPos, currCapsulesPos, currentGameState)
        capsulesDist = manhattanDistance(currPos, currCapsulesPos)
        #lisCapsulesDistPos.append(maxDist-capsulesDist)
        if capsulesDist > 0:
            lisCapsulesDistPos.append(float(1)/capsulesDist)
        else:
            lisCapsulesDistPos.append(float(2))
    if len(lisCapsulesDistPos) > 0:
        scoreCapsule += max(lisCapsulesDistPos)
        scoreCapsule += sum(lisCapsulesDistPos)
        closestCapsule = max(lisCapsulesDistPos)
    
    listFoodDistPos = []
    noFood = False
    closestFood = float(0)
    if not currPos in currFoodList:
        noFood = True
    for currFoodPos in currFoodList:
        #if not (currPos,currFoodPos) in currentGameState._closedDistFood:
            #foodDist = mazeDistance(currPos, currFoodPos, currentGameState)
        foodDist = manhattanDistance(currPos, currFoodPos)
            #currentGameState._closedDistFood[(currPos,currFoodPos)] = foodDist
        #else:
            #foodDist = currentGameState._closedDistFood[(currPos,currFoodPos)]
        #listFoodDistPos.append(maxDist-foodDist)
        if foodDist > 0:
            listFoodDistPos.append(float(1)/foodDist)
        else:
            listFoodDistPos.append(float(2))
    if len(listFoodDistPos) > 0:
        scoreFood += max(listFoodDistPos)
        scoreFood += sum(listFoodDistPos)
        closestFood = max(listFoodDistPos)
    
    scaredPacman=False
    scaredGhost=False
    scaredGhostScore=0
    scaredGhostScoreList=[]
    listGhostDistPos = []
    noGhost = True
    closestGhost = float(0)
    for currGhost in currGhostStates:
        currGhostPos = currGhost.configuration.pos
        #distGhost = mazeDistance(currPos, currGhostPos, currentGameState)
        distGhost = manhattanDistance(currPos, currGhostPos)
        #listGhostDistPos.append(distGhost)
        if distGhost > 0:
            listGhostDistPos.append(float(1)/distGhost)
        else:
            listGhostDistPos.append(float(2))
        if currGhost.scaredTimer <= 40 and distGhost==0:
        #if currGhost.scaredTimer <= 5 and distGhost<=5:
        #if currGhost.scaredTimer <= distGhost:
            scaredPacman=True
        if currGhost.scaredTimer >= 5 and distGhost<=20:
            scaredGhost=True
            if distGhost > 0:
                scaredGhostScoreList.append(float(1)/distGhost)
            else:
                scaredGhostScoreList.append(float(2))
        if currPos == currGhostPos:
            noGhost = False
    if len(scaredGhostScoreList) > 0:
        scaredGhostScore +=  max(scaredGhostScoreList)
        scaredGhostScore +=  sum(scaredGhostScoreList)
            
    if len(listGhostDistPos) > 0:
        scoreGhost += min(listGhostDistPos)
        scoreGhost += sum(listGhostDistPos)
        if not noGhost:
            closestGhost += max(listGhostDistPos)
        
    if scaredPacman or not noGhost:
        score += -100000
    elif scaredGhost:
        score += (scaredGhostScore*1)+(scoreCapsule*6)+(scoreFood*4)-(scoreGhost*2)
    else:
        score += (scoreCapsule*6)+(scoreFood*4)-(scoreGhost*2)
        
    if not noCapsules:
        score -= (closestCapsule*6)
    else:
        score += (closestCapsule*6)
        
    if not noFood:
        score -= (closestFood*4)
    else:
        score += (closestFood*4)
        
    return score

# Abbreviation
better = betterEvaluationFunction

class ContestAgent(MultiAgentSearchAgent):
    """
      Your agent for the mini-contest
    """
    
    #-l contestClassic -p ExpectimaxAgent -a depth=3 -a evalFn=better -g DirectionalGhost -q
    #-l contestClassic -p ExpectimaxAgent -a depth=3 -a evalFn=betterMazeEvaluationFunction -g DirectionalGhost -n 10 -q
    #-l contestClassic -p ContestAgent -g DirectionalGhost -q -n 1
    
    #-l contestClassic -p ExpectimaxAgent -a depth=3 -a evalFn=betterMazeEvaluationFunction -g DirectionalGhost
    #-l contestClassic -p ExpectimaxAgent -a depth=3 -a evalFn=better -g DirectionalGhost
    
    #-l contestClassic -p AlphaBetaAgent -a depth=3 -a evalFn=betterMazeEvaluationFunction -g DirectionalGhost
    #-l contestClassic -p AlphaBetaAgent -a depth=3 -a evalFn=betterMazeEvaluationFunction -g DirectionalGhost -n 10 -q      -> Best I think
    
    #-l contestClassic -p ContestAgent -g DirectionalGhost -a depth=3 -a evalFn=betterMazeEvaluationFunction -a evalAg=AlphaBetaAgent
    #-l contestClassic -p ContestAgent -g DirectionalGhost -a evalAg=ExpectimaxAgent -a depth=4 -a evalFn=betterEvaluationFunction 
    
    #-l contestClassic -p ContestAgent -g DirectionalGhost -q -n 1
    
    #def __init__(self, evalAg = 'AlphaBetaAgent', depth = '5', evalFn = 'betterMazeEvaluationFunction'):
    def __init__(self, evalAg = 'ExpectimaxPolicyIterationAgent', depth = '3', evalFn = 'betterMazeEvaluationFunction'):
        self.index = 0 # Pacman is always agent index 0
        #self.searchType = AlphaBetaAgent(evalFn, depth)
        self.evaluationFunction = util.lookup(evalFn, globals())
        self.searchType = util.lookup(evalAg, globals())(evalFn, depth)
        self.depth = int(depth)
        self._closedDistFood = {}
        self._closedDistCapsule = {}
        self._closedDistGhost = {}
        #self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
        
        #import time
        #self.starttime = time.time()
        #print "DFS: ", depthFirstSearch(iniPos, endPos, gameState), time.time() - starttime
        
    def getAction(self, gameState):
        """
          Returns an action.  You can use any method you want and search to any depth you want.
          Just remember that the mini-contest is timed, so you have to trade off speed and computation.
        
          Ghosts don't behave randomly anymore, but they aren't perfect either -- they'll usually
          just make a beeline straight towards Pacman (or away from him if they're scared!)
        """
        "*** YOUR CODE HERE ***"
        return self.searchType.getAction(gameState)
        #util.raiseNotDefined()

class ExpectimaxPolicyIterationAgent(MultiAgentSearchAgent):
    """
      Your expectimax agent for PolicyIteration
    """
    
    def __init__(self, evalAg = 'ExpectimaxPolicyIterationAgent', depth = '3', evalFn = 'betterMazeEvaluationFunction'):
        self.index = 0 # Pacman is always agent index 0
        self.evaluationFunction = util.lookup(evalFn, globals())
        #self.searchType = util.lookup(evalAg, globals())(evalFn, depth)
        self.depth = int(depth)
        self._closedDistFood = {}
        self._closedDistCapsule = {}
        self._closedDistGhost = {}
        '''
        Constructor
        '''
        self.rewardruns = 0
        self.policyruns = 0
        self.actionruns = 0
        self.actions = [] #self.actions = ["CW","CCW"] #ex: Directions.EAST #legal moves
        self.nodes = [] #self.nodes = ["A","B","C"] #ex: (successorGameState,nextMove) #the node with its action
        self.gama = 0.5 #self.gama = 1.0 #value decay
        self.TransactionsValue = {} #self.T[("A","CW","B")] = 1.0 #probability of s' given s and a. for a transaction function.
        self.TransactionsFunctions = [] #self.T = ("A","CW","B") #transactions functions with s, a, s'
        self.Reward = {} #self.R[("A","CW","B")] = 2.0 #first value of a timestep for a transaction function
        self.Values = {} #self.V["A"] = 2.2 #value of the state
        self.Vstars = {} #self.Vstar["A"] = 2.329 #optimal value of the state
        self.policies = {} #self.policies["A"] = "CW" #an action for a node

    def getAction(self, gameState):
        """
          Returns the expectimax action using self.depth and self.evaluationFunction
        
          All ghosts should be modeled as choosing uniformly at random from their
          legal moves.
        """
        "*** YOUR CODE HERE ***"
        
        """
        numAgents = gameState.getNumAgents() # get the number of agents in the game
        legalMoves = gameState.getLegalActions(self.index) #get the legal actions available fot the current agent
        #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP)
        
        successorGameStatesAction=[]
        print "legalMoves: ", legalMoves
        for nextMove in legalMoves:
            print "nextMove: ", nextMove
            node = (gameState.generateSuccessor(self.index, nextMove),nextMove)
            self.actions.append(nextMove)
            self.nodes.append(node)
            successorGameStatesAction.append(node)
        #successorGameStatesAction = [(gameState.generateSuccessor(self.index, nextMove),nextMove) for nextMove in legalMoves] #get the firts nodes for the root
        
        valueAction = []
        for successorGameState,succMove in successorGameStatesAction:
            valueAction.append(self.expectimaxPolicyIterationActionsValue(successorGameState, succMove, self.index, ((self.depth*numAgents)-1), numAgents))
        #valueAction = [self.expectimaxPolicyIterationActionsValue(successorGameState, succMove, self.index, ((self.depth*numAgents)-1), numAgents) for successorGameState,succMove in successorGameStatesAction] #call expctimax for each one of the firsts nodes of the root
        
        bestAction = legalMoves[valueAction.index(max(valueAction))] #get the best move for the expctimax function
        return bestAction #return the best move based on the expctimax value
        """
        
        import time
        starttime = time.time()
        
        #self.actions = [action for action in Directions]
        self.actions = [Directions.EAST,Directions.LEFT,Directions.NORTH,Directions.REVERSE,Directions.RIGHT,Directions.SOUTH,Directions.STOP,Directions.WEST]
        numAgents = gameState.getNumAgents() # get the number of agents in the game
        actualMove = Directions.STOP #get the first move like the stop action of the pacman at the start of the game
        index = self.index #get the index of pacman =0
        depth = (self.depth*numAgents) #get the total depth like the depth of the search * the number of agents on the game (each agent will be a layer
        
        #legalMoves = gameState.getLegalActions(index) #get the legal actions available fot the current agent
        #successorGameStatesAction = [(gameState.generateSuccessor(index, proxMove),proxMove) for proxMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
        #valueAction = self.getValueIteration(gameState,actualMove,successorGameStatesAction,index)
        
        valueAction = self.expectimaxPolicyIterationActionsValue(gameState, actualMove, index, depth, numAgents) #call the expectimaxPolicyIterationActionsValue method
        #return valueAction #return the best move
        print "valueAction: ", valueAction, "Time: ", time.time() - starttime
        return valueAction[1] #return the best move
    
    def expectimaxPolicyIterationActionsValue(self, currentGameState, nextMove, index, depth, numAgents):
        
        """
        def expectimaxPolicyIterationActionsValue(self, currentGameState, nextMove, index, depth, numAgents):
        index += 1 
        if index >= numAgents: index = 0 #reset the index every time it reach the last agent
        legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
        #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP)
        successorGameStatesAction=[]
        print "nextMove: ", nextMove, "legalMoves: ", legalMoves
        for nextAction in legalMoves:
            print "nextAction: ", nextAction
            node = (currentGameState.generateSuccessor(self.index, nextAction),nextAction)
            self.actions.append(nextAction)
            self.nodes.append(node)
            successorGameStatesAction.append(node)
        #successorGameStatesAction = [(currentGameState.generateSuccessor(index, nextMove),nextMove) for nextMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
        
        if depth < 1 or currentGameState.isWin() or currentGameState.isLose(): #if it's the end of the game, or if reach the end of the limit depth (it's the terminal node), get the evaluationFunction and the action
            valueAction = self.evaluationFunction(currentGameState)
        else:
            valueActions = []
            for successorGameState,succMove in successorGameStatesAction:
                valueActions.append(self.expectimaxPolicyIterationActionsValue(successorGameState, succMove, index, depth-1, numAgents))
            
            if index == 0: #else, get the max value for pacman, or the expectation value for the ghosts
                #valueAction = max([self.expectimaxPolicyIterationActionsValue(successorGameState, succMove, index, depth-1, numAgents) for successorGameState,succMove in successorGameStatesAction])
                valueAction = max(valueActions)
            else:
                #valueActions = [self.expectimaxPolicyIterationActionsValue(successorGameState, succMove, index, depth-1, numAgents) for successorGameState,succMove in successorGameStatesAction]
                valueAction = sum(valueActions)/len(valueActions)
            
        return valueAction  #and return the value with its move
        """
        
        if index >= numAgents: index = 0 #reset the index every time it reach the last agent
        if depth < 1 or currentGameState.isWin() or currentGameState.isLose(): #if it's the end of the game, or if reach the end of the limit depth (it's the terminal node), get the evaluationFunction and the action
            value = self.evaluationFunction(currentGameState)
            valueAction = (value,nextMove)
            self.Values[currentGameState]=value
            return valueAction 
        elif index == 0: #else, get the max value for pacman, or the average value for the ghosts
            valueAction = self.maxExpectimaxPolicyIterationValue(currentGameState, nextMove, index, depth, numAgents)
        else:
            valueAction = self.averageExpectimaxPolicyIterationValue(currentGameState, nextMove, index, depth, numAgents)
        return valueAction  #and return the value with its move
      
    def maxExpectimaxPolicyIterationValue(self, currentGameState, nextMove, index, depth, numAgents):
        legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
        #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP) #remove the stop action from the lega moves
        successorGameStatesAction = [(currentGameState.generateSuccessor(index, proxMove),proxMove) for proxMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
        
        #valueIteration = self.getValueIteration(currentGameState,nextMove,successorGameStatesAction,index)
        #self.Values[currentGameState]=valueIteration[1]
        
        valueActions = []
        for successorGameState,succMove in successorGameStatesAction:
            
            valueIteration = self.getValueIteration(successorGameState,succMove,successorGameStatesAction,index)
            self.Values[currentGameState]=valueIteration[0]
            
            value = self.expectimaxPolicyIterationActionsValue(successorGameState, succMove, index+1, depth-1, numAgents)
            #valueActions.append(value[0])
            valueActions.append(valueIteration[0])
        valueAction = (max(valueActions),nextMove)
        return valueAction
    
    def averageExpectimaxPolicyIterationValue(self, currentGameState, nextMove, index, depth, numAgents):
        legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
        #if Directions.STOP in legalMoves: legalMoves.remove(Directions.STOP) #remove the stop action from the lega moves
        successorGameStatesAction = [(currentGameState.generateSuccessor(index, proxMove),proxMove) for proxMove in legalMoves] #get the nexts gamestates based on the possible moves for the current gamestate
        valueActions = []
        for successorGameState,succMove in successorGameStatesAction:
            
            valueIteration = self.getValueIteration(successorGameState,succMove,successorGameStatesAction,index)
            self.Values[currentGameState]=valueIteration[0]
            
            value = self.expectimaxPolicyIterationActionsValue(successorGameState, succMove, index+1, depth-1, numAgents)
            #valueActions.append(value[0])
            valueActions.append(valueIteration[0])
        valueAction = (sum(valueActions)/len(valueActions),nextMove)
        return valueAction
        
        
        
        
        
        
        
        
        
        
        
    
    

    def getValueIteration(self, currentGameState,succMove,successorGameStatesAction,index):
        return max(self.getActionValue(currentGameState,succMove,successorGameStatesAction,index))
    
    def getActionValue(self, currentGameState,succMove,successorGameStatesAction,index):
        actionValues=[]
        legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
        for action in legalMoves:
            self.actionruns += 1
            actionValues.append(self.getRewardValue(currentGameState,succMove,successorGameStatesAction,index))
        return actionValues
    
    def getPolicyIteration(self, currentGameState,succMove,successorGameStatesAction,index):
        return self.getPolicyValue(currentGameState,succMove,successorGameStatesAction,index)
    
    def getPolicyValue(self, currentGameState,succMove,successorGameStatesAction,index):
        action=""
        actionValues=[]
        legalMoves = currentGameState.getLegalActions(index) #get the legal actions available fot the current agent
        if self.policies[currentGameState] in legalMoves:
            self.policyruns += 1
            action = self.policies[currentGameState]
            actionValues.append(self.getRewardValue(currentGameState,succMove,successorGameStatesAction,index))
        return actionValues
    
    def getRewardValue(self,currentGameState,succMove,successorGameStatesAction,index):
        rewards = []
        for nodePrime,nodePrimeAction in successorGameStatesAction:
            self.rewardruns += 1
            if (currentGameState,nodePrimeAction,nodePrime) in self.TransactionsFunctions:
                transition = self.TransactionsValue[(currentGameState,nodePrimeAction,nodePrime)]
                reward = self.Reward[(currentGameState,nodePrimeAction,nodePrime)]
            else:
                self.TransactionsFunctions.append((currentGameState,nodePrimeAction,nodePrime))
                transition = 0.8
                self.TransactionsValue[(currentGameState,nodePrimeAction,nodePrime)] = transition
                reward = self.evaluationFunction(currentGameState)
                self.Reward[(currentGameState,nodePrimeAction,nodePrime)] = reward
            if nodePrime in self.Values:
                value = self.Values[nodePrime]
            else:
                value = self.evaluationFunction(nodePrime)
                self.Values[nodePrime] = value
            rewards.append(transition * (reward + (self.gama * value)))
        return (sum(rewards),succMove)

def mazeDistance(iniPos, endPos, gameState):
    """
    Returns the maze distance between any two points, using the search functions
    you have already built.  The gameState can be any game state -- Pacman's position
    in that state is ignored.

    Example usage: mazeDistance( (2,4), (5,6), gameState)

    This might be a useful helper function for your ApproximateSearchAgent.
    """
    x1, y1 = iniPos
    x2, y2 = endPos
    x1 = int(x1)
    y1 = int(y1)
    x2 = int(x2)
    y2 = int(y2)
    
    iniPos = (x1,y1)
    endPos = (x2,y2)
    
    walls = gameState.getWalls()
    assert not walls[x1][y1], 'iniPos is a wall: ' + iniPos
    assert not walls[x2][y2], 'endPos is a wall: ' + str(endPos)
    
    """
    import time
    starttime = time.time()
    print "DFS: ", depthFirstSearch(iniPos, endPos, gameState), time.time() - starttime
    starttime = time.time()
    print "BFS: ", breadthFirstSearch(iniPos, endPos, gameState), time.time() - starttime
    starttime = time.time()
    print "UCS: ", uniformCostSearch(iniPos, endPos, gameState), time.time() - starttime
    starttime = time.time()
    print "ASTAR: ", aStarSearch(iniPos, endPos, gameState), time.time() - starttime
    starttime = time.time()
    print "ASTAR betterEvaluationFunction: ", aStarSearch(iniPos, endPos, gameState, betterEvaluationFunction), time.time() - starttime
    """
    
    #return len(depthFirstSearch(iniPos, endPos, gameState))
    return len(breadthFirstSearch(iniPos, endPos, gameState))
    #return len(uniformCostSearch(iniPos, endPos, gameState))
    #return len(aStarSearch(iniPos, endPos, gameState))
    #return len(aStarSearch(iniPos, endPos, gameState, betterEvaluationFunction))

def depthFirstSearch(iniPos, endPos, gameState):
    """
    Search the deepest nodes in the search tree first
    return a list of directions to go from point 1 to point 2
    Example usage: depthFirstSearch( (2,4), (5,6), gameState)
    Example return: [Directions.South, Directions.West]
    """
    "*** YOUR CODE HERE ***"
    currPos=iniPos
    if gameState.isWin() or gameState.isLose() or iniPos == endPos: 
        return []
    else:
        closedSet=set()
        fringe=util.Stack()
        fringe.push((currPos,[]))
        while not fringe.isEmpty():
            currPos,path=fringe.pop()
            if gameState.isWin() or gameState.isLose() or currPos == endPos:
                return path
            elif currPos in closedSet:
                continue       
            else:
                closedSet.add(currPos)
                for currPos,nextMove in getDirectionsToLegalNeighbors(currPos, gameState.getWalls()):
                    childNode = (currPos, path + [nextMove])
                    fringe.push(childNode)
                    
def breadthFirstSearch(iniPos, endPos, gameState):
    """
    Search the shallowest nodes in the search tree first.
    return a list of directions to go from point 1 to point 2
    Example usage: breadthFirstSearch( (2,4), (5,6), gameState)
    Example return: [Directions.South, Directions.West]
    """
    "*** YOUR CODE HERE ***"
    currPos=iniPos
    if gameState.isWin() or gameState.isLose() or iniPos == endPos: 
        return []
    else:
        closedSet=set()
        fringe=util.Queue()
        fringe.push((currPos,[]))
        while not fringe.isEmpty():
            currPos,path=fringe.pop()
            if gameState.isWin() or gameState.isLose() or currPos == endPos:
                return path
            elif currPos in closedSet:
                continue       
            else:
                closedSet.add(currPos)
                for currPos,nextMove in getDirectionsToLegalNeighbors(currPos, gameState.getWalls()):
                    childNode = (currPos, path + [nextMove])
                    fringe.push(childNode)

def uniformCostSearch(iniPos, endPos, gameState):
    """
    Search the node of least total cost first.
    return a list of directions to go from point 1 to point 2
    Example usage: uniformCostSearch( (2,4), (5,6), gameState)
    Example return: [Directions.South, Directions.West]
    """
    "*** YOUR CODE HERE ***"
    currPos=iniPos
    if gameState.isWin() or gameState.isLose() or iniPos == endPos: 
        return []
    else:
        closedSet=set()
        fringe=util.PriorityQueue()
        fringe.push((currPos,[],0),0)
        while not fringe.isEmpty():
            currPos,path,cost=fringe.pop()
            if gameState.isWin() or gameState.isLose() or currPos == endPos:
                return path
            elif currPos in closedSet:
                continue       
            else:
                closedSet.add(currPos)
                for currPos,nextMove in getDirectionsToLegalNeighbors(currPos, gameState.getWalls()):
                    nextCost = getCostOfActions(gameState, currPos, [nextMove])
                    childNode = (currPos, path + [nextMove],cost + nextCost)
                    fringe.push(childNode,cost + nextCost)
                    
def aStarSearch(iniPos, endPos, gameState, heuristic=scoreEvaluationFunction):
    """
    Search the node that has the lowest combined cost and heuristic first.
    return a list of directions to go from point 1 to point 2
    Example usage: aStarSearch( (2,4), (5,6), gameState)
    Example return: [Directions.South, Directions.West]
    """
    "*** YOUR CODE HERE ***"
    currPos=iniPos
    if gameState.isWin() or gameState.isLose() or iniPos == endPos: 
        return []
    else:
        closedSet=set()
        fringe=util.PriorityQueue()
        fringe.push((currPos,[]),0+heuristic(gameState))
        while not fringe.isEmpty():
            currPos,path=fringe.pop()
            if gameState.isWin() or gameState.isLose() or currPos == endPos:
                return path
            elif currPos in closedSet:
                continue       
            else:
                closedSet.add(currPos)
                for currPos,nextMove in getDirectionsToLegalNeighbors(currPos, gameState.getWalls()):
                    childNode = (currPos, path + [nextMove])
                    fringe.push(childNode, getCostOfActions(gameState, currPos, path + [nextMove]) + heuristic(gameState))

def getDirectionsToLegalNeighbors(iniPos, walls):
    """
        Modifying the Actions.getLegalNeighbors from game
        to return a list of tuple with legal neighbors positions and it's direction of a given inicial position
        [((pox_x,pos_y),direction)]
        Example usage: getDirectionsToLegalNeighbors( (2,4), gameState.getWalls)
        Example return: [((1,2),Directions.North), ((3,4),Directions.South)]
    """
    from game import Actions
    x, y = iniPos
    x_int, y_int = int(x + 0.5), int(y + 0.5)
    neighbors = []
    for dir, vec in Actions._directionsAsList:
        dx, dy = vec
        next_x = x_int + dx
        if next_x < 0 or next_x == walls.width: continue
        next_y = y_int + dy
        if next_y < 0 or next_y == walls.height: continue
        if not walls[next_x][next_y]: neighbors.append(((next_x, next_y),dir))
    return neighbors

def getCostOfActions(gameState, currPos, actions):
    """
    Returns the cost of a particular sequence of actions.  If those actions
    include an illegal move, return 999999
    """
    from game import Actions
    if actions == None: return 999999
    x,y= currPos #gameState.getStartState()
    cost = 0
    for action in actions:
        # Check figure out the next state and see whether its' legal
        dx, dy = Actions.directionToVector(action)
        x, y = int(x + dx), int(y + dy)
        if gameState.getWalls()[x][y]: return 999999
        cost += 1 #gameState.costFn((x,y)) #costFn: A function from a search state (tuple) to a non-negative number
    return cost