import numpy as np

board_rows = 3 
board_cols = 3

def get_possible_moves(board):
    return [(i, j) for i in range(board_rows) for j in range(board_cols) if board[i][j] == 0]

def get_next_board(board, move, player):
    new_board = np.copy(board)
    new_board[move] = player
    return new_board

# def get_winner(board):
#     for i in range(3):
#         if board[i, 0] == board[i, 1] == board[i, 2] != 0:
#             return board[i, 0]
#         if board[0, i] == board[1, i] == board[2, i] != 0:
#             return board[0, i]
#     if board[0, 0] == board[1, 1] == board[2, 2] != 0:
#         return board[0, 0]
#     if board[0, 2] == board[1, 1] == board[2, 0] != 0:
#         return board[0, 2]
#     return 0  

def get_winner(board):
    for i in range(board_rows):
        for j in range(board_cols):
            if board[i][j] == 0:
                continue
            if i > 0 and i < board_rows-1 and j > 0 and j < board_cols-1:
                if board[i-1][j-1] == board[i][j] == board[i+1][j+1]:
                    return board[i][j]
                if board[i-1][j+1] == board[i][j] == board[i+1][j-1]:
                    return board[i][j]
            if i > 0 and i < board_rows-1:
                if board[i-1][j] == board[i][j] == board[i+1][j]:
                    return board[i][j]
            if j > 0 and j < board_cols-1:
                if board[i][j-1] == board[i][j] == board[i][j+1]:
                    return board[i][j]  
    return 0

# 迭代计算值函数
def value_iteration(states, which_player=1, gamma=0.9, threshold=0.001):
    V = np.zeros(len(states))
    state_index = {str(state): idx for idx, state in enumerate(states)}
    
    while True:
        delta = 0
        for s, state in enumerate(states):
            v = V[s]
            max_v = 0.0
            first = 1
            moves = get_possible_moves(state)
            if len(moves) == 0 or get_winner(state) == 1 or get_winner(state) == 2:
                player = 2 if np.sum(state == 0) % 2 == 1 else 1
                winner = get_winner(state)
                if winner == 1:
                    if which_player == 1:
                        reward = 5
                    else:
                        reward = -5
                elif winner == 2:
                    if which_player == 1:
                        reward = -5
                    else:
                        reward = 5
                else:
                    reward = 0
                V[s] = reward + gamma * V[s]
            else:
                for move in moves:
                    player = 1 if np.sum(state == 0) % 2 == 1 else 2
                    next_state = get_next_board(state, move, player)
                    winner = get_winner(next_state)
                    if winner == 1:
                        if which_player == 1:
                            reward = 1
                        else:
                            reward = -1
                    elif winner == 2:
                        if which_player == 1:
                            reward = -1
                        else:
                            reward = 1
                    else:
                        reward = 0
                    next_state_idx = state_index[str(next_state)]
                    if first:
                        max_v = reward + gamma * V[next_state_idx]
                        first = 0
                    else:
                        if which_player == player:
                            max_v = max(max_v,reward + gamma * V[next_state_idx])
                        else:
                            max_v = min(max_v,reward + gamma * V[next_state_idx])
                V[s] = max_v

            delta = max(delta, abs(v - V[s]))
        
        if delta < threshold:
            break
    
    return V

# 生成状态空间
def generate_states(current_state, current_player):
    if get_winner(current_state) != 0:
        return [current_state]
    
    states = []
    moves = get_possible_moves(current_state)
    if not moves:
        return [current_state]
    
    for move in moves:
        states.append(current_state)
        next_state = get_next_board(current_state, move, current_player)
        next_player = 2 if current_player == 1 else 1
        states.extend(generate_states(next_state, next_player))
    
    return states

# 生成状态空间
initial_board = np.zeros((board_rows, board_cols), dtype=int)
all_states = list(set(tuple(map(tuple, state)) for state in generate_states(initial_board, 1)))
all_states = [np.array(state) for state in all_states]
state_index = {str(state): idx for idx, state in enumerate(all_states)}
# 迭代计算值函数
print(len(all_states))
values1 = value_iteration(all_states)
values2 = value_iteration(all_states,which_player=2)
np.save('values1.npy', values1)
np.save('values2.npy', values2)
np.save('state_index.npy',state_index)
print("Value function computed for all states.")