import torch
import torch.nn.functional as F
import reversi_layer_cpp
from scheduler import LinearWarmupScheduler
from net import ReversiNet
from evaluate import evaluate
import os
from tqdm import tqdm
import datetime

ITERS = 2000000
BATCH_SIZE = 1024
device = 'cuda'

bk_dir = datetime.datetime.now().strftime("backup/%Y%m%d%H%M%S")
os.makedirs(bk_dir, exist_ok=True)


def train(model):
    from tensorboardX import SummaryWriter
    writer = SummaryWriter(flush_secs=30)

    optim = torch.optim.SGD(model.parameters(), 0.01, momentum=0.9, weight_decay=1e-4)
    sched = LinearWarmupScheduler(optim, 1000, [ITERS/10*7, ITERS/10*9])

    p_prev = None
    win_prev = None
    pbar = tqdm(range(ITERS))
    for iter in pbar:
        data_b, data_w, s, valid, win = reversi_layer_cpp.pretrain(BATCH_SIZE)
        pred = model(data_b, data_w).to('cpu')
        if win_prev is not None:
            with torch.no_grad():
                gt = 1 - pred[:, -1].sigmoid()
                gt[win_prev != 3] = win_prev[win_prev != 3].float() / 2
            loss_p = F.binary_cross_entropy_with_logits(p_prev, gt)
            loss = loss_p + loss_q

            optim.zero_grad()
            loss.backward()
            optim.step()
            sched.step()

            if iter % 100 == 0:
                writer.add_scalar("loss_p", loss_p, iter)
                writer.add_scalar("loss_q", loss_q, iter)
            pbar.set_description_str(f"q: {loss_q:.4f}; p: {loss_p:.4f}")

        p_prev = pred[:, -1]
        loss_q = F.cross_entropy(pred[s!=64, :64] - 1e12 * (~valid[s!=64]), s[s!=64])
        win_prev = win

        if iter % 5000 == 0:
            writer.add_scalar("win_std", evaluate(model, 100), iter)
        if iter and iter % 100000 == 0:
            torch.save(model.state_dict(), bk_dir + f"/checkpoint{iter//100000:03d}.pth")

if __name__ == '__main__':
    model = ReversiNet(train=True).to(device)
    train(model)
    torch.save(model.state_dict(), "w.pt")
