import torch
import time
import random
from mine import generate_board, place_mines, calculate_numbers, reveal, print_board, check_win
from DQN import DQN

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

policy_net = DQN(input_dim=256, output_dim=256).to(device)
policy_net.load_state_dict(torch.load("trained/model.pth"))
policy_net.eval()

def ai_play():
    board = generate_board(16)
    first_move = (random.randint(0, 15), random.randint(0, 15))
    board = place_mines(board, first_move, 40)
    calculate_numbers(board)
    revealed = [[False for _ in range(16)] for _ in range(16)]
    state = np.array(board).flatten()
    state = torch.tensor([state], device=device, dtype=torch.float32)

    while True:
        print_board(board, revealed)
        time.sleep(1)
        action = policy_net(state).max(1)[1].view(1, 1)
        reveal(board, revealed, action // 16, action % 16)
        state = np.array(board).flatten()
        state = torch.tensor([state], device=device, dtype=torch.float32)

        if check_win(board, revealed):
            print("AI wins!")
            break

if __name__ == "__main__":
    ai_play()