# multiAgents.py
# --------------
# Licensing Information:  You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
# 
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).


from util import manhattanDistance
from game import Directions
import random, util

from game import Agent
from pacman import GameState

class ReflexAgent(Agent):
    """
    A reflex agent chooses an action at each choice point by examining
    its alternatives via a state evaluation function.

    The code below is provided as a guide.  You are welcome to change
    it in any way you see fit, so long as you don't touch our method
    headers.
    """


    def getAction(self, gameState: GameState):
        """
        You do not need to change this method, but you're welcome to.

        getAction chooses among the best options according to the evaluation function.
        getAction 根据评估函数在最佳选项中进行选择。

        Just like in the previous project, getAction takes a GameState and returns
        some Directions.X for some X in the set {NORTH, SOUTH, WEST, EAST, STOP}
        就像在上一个项目中一样,getAction 采用一个 GameState,
        并为集合中的某些 X 返回一些 Directions.X {NORTH, SOUTH, WEST, EAST, STOP}
        """
        # Collect legal moves and successor states
        legalMoves = gameState.getLegalActions()

        # Choose one of the best actions
        scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
        bestScore = max(scores)
        bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
        chosenIndex = random.choice(bestIndices) # Pick randomly among the best

        "Add more of your code here if you want to"

        return legalMoves[chosenIndex]

    def evaluationFunction(self, currentGameState: GameState, action):
        # Design a better evaluation function here.
        """在这里设计一个更好的评估函数。"""

        # The evaluation function takes in the current and proposed successor
        # GameStates (pacman.py) and returns a number, where higher numbers are better.
        """评估函数接受当前和建议的后续游戏状态 （pacman.py） 并返回一个数字，其中数字越大越好。 """  

        # The code below extracts some useful information from the state, like the
        # remaining food (newFood) and Pacman position after moving (newPos).
        # newScaredTimes holds the number of moves that each ghost will remain
        # scared because of Pacman having eaten a power pellet.    
        """下面的代码从状态中提取了一些有用的信息，比如剩余的食物（newFood）和移动后的吃豆人位置（newPos）。
        newScaredTimes 拥有每个幽灵会因为吃豆子吃了能量弹而保持害怕的动作次数。"""

        # Print out these variables to see what you're getting, then combine them
        # to create a masterful evaluation function.
        """打印出这些变量以查看您得到的结果，然后将它们组合起来以创建一个出色的评估函数。"""

        # Useful information you can extract from a GameState (pacman.py)
        successorGameState = currentGameState.generatePacmanSuccessor(action)    #在指定的吃豆人移动后生成后续状态
        newPos = successorGameState.getPacmanPosition()     #获取吃豆人位置
        newFood = successorGameState.getFood()    #返回布尔食物指示符变量的网格
        newGhostStates = successorGameState.getGhostStates()    #获取幽灵状态
        newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]

        "*** YOUR CODE HERE ***"
        foodList = successorGameState.getFood().asList()    # 获取迷宫中所有豆子的位置
        # print(foodList)         # [(1, 5), (1, 7), (2, 6), (2, 8), (3, 1), (3, 3), (3, 5), (3, 7)]
        foodNum = currentGameState.getFood().count()        # 获取当前状态中剩余食物的数量
        # print(foodNum)          # 8
        # for ngs in newGhostStates:  # 查看幽灵状态
        #     print(ngs)          # Ghost: (x,y)=(2, 7), Stop/E/W/S/North
        if len(newFood.asList()) == foodNum:                # 如果该行动没有吃到食物
            dis = float('inf')                              # 设置最大评估值为无穷大
            for pt in newFood.asList():                     # 遍历所有食物位置
                if manhattanDistance(pt , newPos) < dis :   # 计算曼哈顿距离,并取最小值
                    dis = manhattanDistance(pt, newPos)
        else:
            dis = 0         # 否则设置距离为0
        for ghost in newGhostStates:                        # 遍历所有幽灵，计算影响值
            dis += 2 ** (2 - manhattanDistance(ghost.getPosition(), newPos))    # 幽灵的影响随着距离的缩小而增加
        return -dis         # 返回负的距离作为评估函数的值(越小越好)
       
        # return successorGameState.getScore()

def scoreEvaluationFunction(currentGameState: GameState):
    """
    This default evaluation function just returns the score of the state.
    The score is the same one displayed in the Pacman GUI.
    此默认评估函数仅返回状态的分数。分数与吃豆人 GUI 中显示的分数相同。

    This evaluation function is meant for use with adversarial search agents
    (not reflex agents).
    此评估函数旨在与对抗性搜索代理（非反射代理）一起使用。
    """
    return currentGameState.getScore()

class MultiAgentSearchAgent(Agent):
    """
    This class provides some common elements to all of your
    multi-agent searchers.  Any methods defined here will be available
    to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
    此类为所有多代理搜索者提供了一些常见元素。 
    此处定义的任何方法都可用于 MinimaxPacmanAgent、AlphaBetaPacmanAgent 和 ExpectimaxPacmanAgent。

    You *do not* need to make any changes here, but you can if you want to
    add functionality to all your adversarial search agents.  Please do not
    remove anything, however.
    您*不需要*在此处进行任何更改，但如果您想为所有对抗性搜索代理添加功能，则可以进行。 
    但是，请不要删除任何内容

    Note: this is an abstract class: one that should not be instantiated.  It's
    only partially specified, and designed to be extended.  Agent (game.py)
    is another abstract class.
    注意：这是一个抽象类：一个不应该被实例化的类。 
    它只是部分指定，并且设计为可扩展。代理 （game.py） 是另一个抽象类。
    """

    def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
        self.index = 0 # Pacman is always agent index 0
        self.evaluationFunction = util.lookup(evalFn, globals())
        self.depth = int(depth)

class MinimaxAgent(MultiAgentSearchAgent):
    """
    Your minimax agent (question 2)
    """

    def getAction(self, gameState: GameState):
        # Returns the minimax action from the current gameState using self.depth
        # and self.evaluationFunction.
        """使用 self.depth 和 self.evaluationFunction 从当前游戏状态返回最小最大操作。"""
        # Here are some method calls that might be useful when implementing minimax.
        """下面是一些在实现 minimax 时可能有用的方法调用。"""
        # gameState.getLegalActions(agentIndex): 返回代理代理的法律操作列表,Index=0 表示吃豆人，鬼魂是>= 1
        # gameState.generateSuccessor(agentIndex, action): 在代理执行操作后返回后续游戏状态
        # gameState.getNumAgents(): 返回游戏中代理的总数
        # gameState.isWin(): 返回游戏状态是否为获胜状态
        # gameState.isLose(): 返回游戏状态是否为失败状态

        "*** YOUR CODE HERE ***"

        def minimax(state, depth, agent):   # 定义minimax函数
            if state.isWin() or state.isLose() or depth == self.depth:   # 判断是否终止搜索
                return self.evaluationFunction(state), None
            if agent == 0:   # 当前搜索的是max层
                v, a = -float('inf'), None
                for action in state.getLegalActions(agent):  # 遍历当前状态下所有可能的动作，选取最大的估值和对应的动作
                    val = minimax(state.generateSuccessor(agent, action), depth, agent+1)[0]
                    if val > v:
                        v, a = val, action
                return v, a
            else:    # 当前搜索的是min层
                v, a = float('inf'), None
                next_agent = (agent + 1) % state.getNumAgents()      # 计算下一层的代理人编号
                if next_agent == 0:     # 如果下一层是max层，则深度需要加1
                    depth += 1
                for action in state.getLegalActions(agent):     # 遍历当前状态下所有可能的动作，选取最小的估值和对应的动作
                    val = minimax(state.generateSuccessor(agent, action), depth, next_agent)[0]
                    if val < v:
                        v, a = val, action
                return v, a

        return minimax(gameState, 0, 0)[1]      # 返回最优动作


        # def minimax(state, depth, agent): 
        #     if state.isWin() or state.isLose() or depth == self.depth: 
        #         return self.evaluationFunction(state), None 
        #     if agent == 0:
        #         return max((minimax(state.generateSuccessor(agent, action), depth, agent + 1)[0], action) for action in state.getLegalActions(agent)) 
        #     else: 
        #         next_agent = (agent + 1) % state.getNumAgents() 
        #         if next_agent == 0: 
        #             depth += 1 
        #         return min((minimax(state.generateSuccessor(agent, action), depth, next_agent)[0], action) for action in state.getLegalActions(agent)) 
        # return minimax(gameState, 0, 0)[1]



        # util.raiseNotDefined()

class AlphaBetaAgent(MultiAgentSearchAgent):
    """
    Your minimax agent with alpha-beta pruning (question 3)
    """

    def getAction(self, gameState: GameState):
        """
        Returns the minimax action using self.depth and self.evaluationFunction
        """
        "*** YOUR CODE HERE ***"
        util.raiseNotDefined()

class ExpectimaxAgent(MultiAgentSearchAgent):
    """
      Your expectimax agent (question 4)
    """

    def getAction(self, gameState: GameState):
        """
        Returns the expectimax action using self.depth and self.evaluationFunction

        All ghosts should be modeled as choosing uniformly at random from their
        legal moves.
        """
        "*** YOUR CODE HERE ***"
        util.raiseNotDefined()

def betterEvaluationFunction(currentGameState: GameState):
    """
    Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
    evaluation function (question 5).

    DESCRIPTION: <write something here so we know what you did>
    """
    "*** YOUR CODE HERE ***"
    util.raiseNotDefined()

# Abbreviation
better = betterEvaluationFunction
