from copy import deepcopy

def initial_state():
    """
    Returns starting state of the board.
    """

    state = [
        ['', '', ''],
        ['', '', ''],
        ['', '', '']
    ]
    return state

def terminal_test(state):
    """ 判断当前是否是终止状态"""
    #第一种终止状态：X或O已经连成了线
    winner = get_winner(state)
    if winner is not None:
        return True

    #第二种终止状态：不剩空格
    for row in state:
        if '' in row:
            return False
    return True

def get_winner(state):
    """ 返回赢家信息：X或O，如果没有赢家，返回None"""
    # 检查行列
    winner = None
    for i in range(3):
        if state[i][0] == state[i][1] == state[i][2] != '':
            return state[i][0]
        if state[0][i] == state[1][i] == state[2][i] != '':
            return state[0][i]

    #检查对角线
    if (state[0][0] == state[1][1] == state[2][2] != '') or (state[0][2] == state[1][1] == state[2][0] != ''):
        return state[1][1]

    return None

def utility(state):
    """ 计算终局得分，如果赢家是X，返回1，赢家是O，返回-1，平局返回0"""
    winner = get_winner(state)
    if winner == 'X':
        return 1
    elif winner == 'O':
        return -1
    else:
        return 0

def result(state, action):
    """ 返回在盘面状态s下，进行action行动后的盘面状态"""
    new_state = deepcopy(state)
    # print(action)
    new_state[action[0]][action[1]] = player(state)
    return new_state

def player(state):
    """返回当前盘面状态下，应该行动的玩家（X或O）"""
    x_count = sum(row.count('X') for row in state)
    o_count = sum(row.count('O') for row in state)
    if x_count <= o_count:
        return 'X'
    else:
        return 'O'

def actions(state):
    """ 返回当前盘面状态下，所有可以落子的位置"""
    return [(i, j) for i in range(3) for j in range(3) if state[i][j] == '']


def max_value(state, alpha, beta):
    """
        输入：当前的盘面状态state（state需位于等待玩家MAX行动层）、当前状态的下界𝛼和上界𝛽
        输出：奇数层（玩家MAX/X行动层）状态下的最佳的落子位置（搜索树中的箭头）best_action、对应的分值v
    """
    if terminal_test(state):  # 如果当前盘面是终局状态
        return None, utility(state)  # 返回终局得分

    best_action = None  # 记录让分数最高的落子位置
    v = float('-inf')  # 记录后继结点的最高分

    # 循环计算每一个后继结点的分数
    for action in actions(state):
        # new_value代表一个后继结点的分数
        new_state = result(state, action)  # 一个后继结点的盘面状态
        _, new_value = min_value(new_state, alpha, beta)  # 计算这个后继结点的分数，继承父结点的alpha和beta

        # 更新最高分和分数最高的落子位置
        if new_value > v:
            v = new_value
            best_action = action

        # if v == 1:
        #     break

        # 如果后继结点（MIN结点）的得分大于当前的alpha值，则将alpha值更新为该分数
        alpha = max(alpha, v)

        # 如果满足alpha>=beta，则停止对剩余后继结点的访问,直接返回当前最优结果
        if alpha >= beta:
            return best_action, v

    return best_action, v


def min_value(state, alpha, beta):
    """
        输入：当前的盘面状态state（state需位于等待玩家MIN行动层）、当前状态的下界𝛼和上界𝛽
        输出：偶数层（玩家MIN/O行动层）状态下的最佳的落子位置（搜索树中的箭头）best_actio、对应的分值v
    """
    if terminal_test(state):  # 如果当前盘面是终局状态
        return None, utility(state)  # 返回终局得分

    best_action = None  # 记录让分数最低的落子位置
    v = float('inf')  # 记录后继结点的最低分

    # 循环计算每一个后继结点的分数
    for action in actions(state):
        # new_v代表一个后继结点的分数
        new_state = result(state, action)  # 一个后继结点的状态
        _, new_value = max_value(new_state, alpha, beta)  # 计算这个后继结点的分数,，继承父结点的alpha和beta

        # 更新最低分和对应的落子位置
        if new_value < v:
            v = new_value
            best_action = action

        # if v == -1:
        #     break


        # 如果后继结点（MAX结点）的得分小于当前的beta值，则将beta值更新为该分数
        beta = min(beta, v)

        # 如果满足alpha>=beta，则停止对剩余后继结点的访问,直接返回当前最优结果
        if alpha >= beta:
            return best_action, v

    return best_action, v


def alpha_beta(state):
    '''输入当前的盘面状态state，计算玩家下一步的最佳落子位置'''
    if player(state) == 'X':  # player(state)判断当前状态下哪个玩家落子
        best_action, v = max_value(state, float('-inf'), float('inf'))  # 根结点的alpha和beta分别是﹣∞和﹢∞
    else:
        best_action, v = min_value(state, float('-inf'), float('inf'))

    return best_action