import numpy as np
import random

board_rows = 12 
board_cols = 12

def get_possible_moves(board):
    return [(i, j) for i in range(board_rows) for j in range(board_cols) if board[i][j] == 0]

def get_next_board(board, move, player):
    new_board = np.copy(board)
    new_board[move] = player
    return new_board

def get_winner(board):
    for i in range(board_rows):
        for j in range(board_cols):
            if board[i][j] == 0:
                continue
            if i > 1 and i < board_rows-2 and j > 1 and j < board_cols-2:
                if board[i-2][j-2] == board[i-1][j-1] == board[i][j] == board[i+1][j+1] == board[i+2][j+2]:
                    return board[i][j]
                if board[i-2][j+2] == board[i-1][j+1] == board[i][j] == board[i+1][j-1] == board[i+2][j-2]:
                    return board[i][j]
            if i > 1 and i < board_rows-2:
                if board[i-2][j] == board[i-1][j] == board[i][j] == board[i+1][j] == board[i+2][j]:
                    return board[i][j]
            if j > 1 and j < board_cols-2:
                if board[i][j-2] == board[i][j-1] == board[i][j] == board[i][j+1] == board[i][j+2]:
                    return board[i][j]  
    return 0

# 生成一条轨迹
def generate_trajectory(current_state, current_player):
    if get_winner(current_state) != 0:
        return [current_state]
    
    states = []
    moves = get_possible_moves(current_state)
    if not moves:
        return [current_state]
    move_index = random.randint(0,len(moves)-1)

    states.append(current_state)
    next_state = get_next_board(current_state, moves[move_index], current_player)
    next_player = 2 if current_player == 1 else 1
    states.extend(generate_trajectory(next_state, next_player))
    
    return states

# 蒙特卡洛价值计算
def value_iteration(which_player=1, gamma=0.9, sample_number=10000):
    # V = np.zeros(len(states))
    # niter = np.zeros(len(states))
    V = dict()
    niter = {}
    # state_index = {str(state): idx for idx, state in enumerate(states)}
    for _ in range(sample_number):
        initial_board = np.zeros((board_rows, board_cols), dtype=int)
        trajectory = generate_trajectory(initial_board, 1)
        state = trajectory[-1]
        winner = get_winner(state)
        if winner == 1:
            if which_player == 1:
                reward = 1
            else:
                reward = -1
        elif winner == 2:
            if which_player == 1:
                reward = -1
            else:
                reward = 1
        else:
            reward = 0
        weight = 1
        while trajectory:
            state = trajectory.pop()
            if not str(state) in V.keys():
                V[str(state)] = 0.0
            V[str(state)] += weight*reward
            if not str(state) in niter.keys():
                niter[str(state)] = 0
            niter[str(state)] += 1
            weight *= gamma
    
    for key in V.keys():
        if niter[key] != 0:
            V[key] = V[key] / niter[key]
    return V, niter

# online蒙特卡洛价值计算
def online_value_iteration(traj, values, visits , which_player=1, gamma=0.9, sample_number=1000):
    # V = dict()
    # niter = {}
    # state_index = {str(state): idx for idx, state in enumerate(states)}
    for _ in range(sample_number):
        gen_traj = generate_trajectory(traj[-1], current_player= 2 if which_player == 1 else 1)
        trajectory = traj+gen_traj
        state = trajectory[-1]
        winner = get_winner(state)
        if winner == 1:
            if which_player == 1:
                reward = 1
            else:
                reward = -1
        elif winner == 2:
            if which_player == 1:
                reward = -1
            else:
                reward = 1
        else:
            reward = 0
        weight = 1
        while trajectory:
            state = trajectory.pop()
            if not str(state) in visits.keys():
                visits[str(state)] = 0
            visits[str(state)] += 1
            if not str(state) in values.keys():
                values[str(state)] = weight*reward
            else:
                values[str(state)] =(1-1/visits[str(state)])*values[str(state)] + (1/visits[str(state)])*weight*reward
            weight *= gamma

    return values, visits

# 生成状态空间
def generate_states(current_state, current_player):
    if get_winner(current_state) != 0:
        return [current_state]
    
    states = []
    moves = get_possible_moves(current_state)
    if not moves:
        return [current_state]
    
    for move in moves:
        states.append(current_state)
        next_state = get_next_board(current_state, move, current_player)
        next_player = 2 if current_player == 1 else 1
        states.extend(generate_states(next_state, next_player))
    
    return states

# initial_board = np.zeros((board_rows, board_cols), dtype=int)
# all_states = list(set(tuple(map(tuple, state)) for state in generate_states(initial_board, 1)))
# all_states = [np.array(state) for state in all_states]
# state_index = {str(state): idx for idx, state in enumerate(all_states)}
def train():
    print("Start sample.")
    values1, visits1 = value_iteration(which_player=1,sample_number=100000)
    np.save('values1.npy', values1)
    np.save('visits1.npy', visits1)
    print("Value1 complete!")
    values2, visits2 = value_iteration(which_player=2,sample_number=100000)
    np.save('values2.npy', values2)
    np.save('visits2.npy', visits2)
    # np.save('state_index.npy',state_index)
    print("Value function computed for all trajectory.")

train()