﻿import numpy as np

def possible_moves(state):
    return [i for i in range(len(state)) if state[i] == 0]

def is_winner(state, player):
    winning_positions = [(0,1,2), (3,4,5), (6,7,8), (0,3,6), (14,7), (2,5,8), (0,4,8), (2,4,6)]
    return any(all(state[pos] == player for pos in line) for line in winning_positions)

def get_reward(state):
    if is_winner(state, 1):
        return 1  # 奖励
    elif is_winner(state, -1):
        return -1  # 惩罚
    return 0  # 平局或未结束

def best_move(state, q_table):
    moves = possible_moves(state)
    if not moves:
        return None
    return max(moves, key=lambda x: q_table[tuple(state), x])

def train_q_learning(episodes, alpha=0.1, gamma=0.9, epsilon=0.2):
    q_table = {}
    for _ in range(episodes):
        state = [0] * 9  # 井字棋初始状态
        while True:
            if np.random.random() < epsilon:  # 探索
                move = np.random.choice(possible_moves(state))
            else:  # 利用
                move = best_move(state, q_table) if tuple(state) in q_table else np.random.choice(possible_moves(state))

            new_state = state[:]
            new_state[move] = 1  # 假设玩家1是学习者

            reward = get_reward(new_state)
            if tuple(new_state) not in q_table:
                q_table[tuple(new_state)] = np.zeros(9)
            if tuple(state) not in q_table:
                q_table[tuple(state)] = np.zeros(9)

            # Q值更新
            q_table[tuple(state)][move] += alpha * (reward + gamma * np.max(q_table[tuple(new_state)]) - q_table[tuple(state)][move])

            state = new_state
            if reward != 0 or all(x != 0 for x in state):  # 游戏结束
                break
    return q_table

# 训练智能体
q_table = train_q_learning(1000)