import torch
import torch.nn.functional as F
import reversi_layer_cpp
from scheduler import LinearWarmupScheduler
from net import ReversiNet
from evaluate import evaluate
import os
from tqdm import tqdm
import datetime

import wandb

hyperparameter_defaults = dict(
    iters=4000000,
    batch_size=1024,
    q_factor=0.1,
    e_factor=0.1,
    weight_decay=1e-4,
    lr=0.01,
    layers=8,
    channels=256,
)
wandb.init(project='reversi', config=hyperparameter_defaults)
config = wandb.config

device = 'cuda'

bk_dir = datetime.datetime.now().strftime("backup/%Y%m%d%H%M%S")
os.makedirs(bk_dir, exist_ok=True)


def train(model):
    optim = torch.optim.SGD(model.parameters(), config.lr, momentum=0.9, weight_decay=config.weight_decay)
    sched = LinearWarmupScheduler(optim, 10000, [config.iters*6//10, config.iters*9//10])

    data_b = torch.zeros((config.batch_size,), dtype=torch.long)
    data_w = torch.zeros((config.batch_size,), dtype=torch.long)
    data_b[:] = 0x0000000810000000
    data_w[:] = 0x0000001008000000

    # init
    for i in range(config.batch_size - 1):
        to = (i//64+1)*64
        reversi_layer_cpp.forward(data_b[i:to], data_w[i:to], torch.ones((to - i, 65)))

    p_prev = None
    q_prev = None
    win_prev = None
    entropy = None
    w = 0
    pbar = tqdm(range(-1, config.iters))
    for iter in pbar:
        pred = model(reversi_layer_cpp.convert(data_b, data_w).half().to('cuda')).to('cpu').float()
        s, win, valid_mask = reversi_layer_cpp.forward(data_b, data_w, pred)

        # Compute and print loss
        if win_prev is not None:
            with torch.no_grad():
                gt = 1 - pred[:, -1].sigmoid()
                gt[win_prev >= 0] = win_prev[win_prev >= 0].float() / 2
                residue = gt - p_prev.sigmoid()
            loss_p = F.binary_cross_entropy_with_logits(p_prev, gt)
            loss_q = (residue * q_prev).mean()
            loss_e = (F.relu(-residue).pow(2) * -entropy).mean()
            loss = loss_p + loss_q * config.q_factor + loss_e * config.e_factor

            optim.zero_grad()
            loss.backward()
            optim.step()
            sched.step()

            info = {"loss_p": loss_p}
            pbar.set_description_str(f"q: {loss_q:.4f}; p: {loss_p:.4f}, w:{w:.3f}")
            if iter % 5000 == 0:
                w = evaluate(model, 100)
                info.update({"win_std": w})
            if iter % 100000 == 0:
                torch.save(model.state_dict(), bk_dir + f"/{iter//100000:04d}.pt")
            wandb.log(info)

        p_prev = pred[:, -1]
        logits = pred - (~valid_mask * 1e12)
        entropy = -torch.sum(logits.softmax(-1) * logits.log_softmax(-1) * valid_mask, -1)
        q_prev = F.cross_entropy(logits, s, reduction='none')
        q_prev[s == 64] = 0
        win_prev = win


if __name__ == '__main__':
    model = ReversiNet(
        cfg=[(config.layers, config.channels)],
        train=True).to(device).half()
    train(model)
    torch.save(model.state_dict(), bk_dir + "/w.pt")
