import numpy as np
import torch
import random
import torch.optim as optim
import torch.nn.functional as F
from DQN import DQN, ReplayMemory, select_action
from mine import generate_board, place_mines, calculate_numbers, reveal, check_win

# Parameters
BATCH_SIZE = 128
GAMMA = 0.99
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
MEMORY_CAPACITY = 10000

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

policy_net = DQN(input_dim=256, output_dim=256).to(device)
target_net = DQN(input_dim=256, output_dim=256).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()

optimizer = optim.RMSprop(policy_net.parameters())
memory = ReplayMemory(MEMORY_CAPACITY)

steps_done = 0

def optimize_model():
    if len(memory) < BATCH_SIZE:
        return
    transitions = memory.sample(BATCH_SIZE)
    batch = Transition(*zip(*transitions))

    non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.bool)
    non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
    state_batch = torch.cat(batch.state)
    action_batch = torch.cat(batch.action)
    reward_batch = torch.cat(batch.reward)

    state_action_values = policy_net(state_batch).gather(1, action_batch)

    next_state_values = torch.zeros(BATCH_SIZE, device=device)
    next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
    expected_state_action_values = (next_state_values * GAMMA) + reward_batch

    loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))

    optimizer.zero_grad()
    loss.backward()
    for param in policy_net.parameters():
        param.grad.data.clamp_(-1, 1)
    optimizer.step()

def board_to_numeric(board):
    mapping = {' ': 0, '*': 1}
    numeric_board = []
    for row in board:
        numeric_row = []
        for cell in row:
            if cell in mapping:
                numeric_row.append(mapping[cell])
            else:
                numeric_row.append(int(cell))
        numeric_board.append(numeric_row)
    return numeric_board

for i_episode in range(1000):
    board = generate_board(16)
    first_move = (random.randint(0, 15), random.randint(0, 15))
    board = place_mines(board, first_move, 40)
    calculate_numbers(board)
    revealed = [[False for _ in range(16)] for _ in range(16)]
    numeric_board = board_to_numeric(board)
    state = np.array(numeric_board).flatten()
    state = torch.tensor([state], device=device, dtype=torch.float32)

    for t in range(100):
        action = select_action(state, policy_net, steps_done, EPS_START, EPS_END, EPS_DECAY, 256)
        reveal(board, revealed, action // 16, action % 16)
        reward = 1 if check_win(board, revealed) else -1
        reward = torch.tensor([reward], device=device)
        numeric_board = board_to_numeric(board)
        next_state = np.array(numeric_board).flatten()
        next_state = torch.tensor([next_state], device=device, dtype=torch.float32)
        done = reward == 1

        memory.push(state, action, reward, next_state, done)

        state = next_state

        optimize_model()
        if done:
            break

    if i_episode % TARGET_UPDATE == 0:
        target_net.load_state_dict(policy_net.state_dict())

torch.save(policy_net.state_dict(), "trained/model.pth")