import random
import torch
from torch.nn import functional as F
import game_layer_cpp
from net import EMAModel, ReversiNet
from tqdm import tqdm
from torch.distributions import Categorical
from match import eval

ITERS = 10000000
BATCH_SIZE = 256
N = 2048
device = 'cuda'
BOARD_SIZE = 15


batch_range = torch.arange(BATCH_SIZE, device=device)

def train(model):
    from tensorboardX import SummaryWriter
    writer = SummaryWriter('.', flush_secs=1)
    optim = torch.optim.AdamW(model.parameters(), lr=1e-4)
    sched = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=[ITERS*8//10, ITERS*9//10])
    # scaler = GradScaler()
    ema_model = EMAModel(model, 0.9995).eval()

    board = torch.rand((N, BATCH_SIZE, 2, BOARD_SIZE, BOARD_SIZE), device=device) < 0.1

    pbar = tqdm(range(ITERS + 1))
    # action = None
    # last_p = torch.zeros((N, BATCH_SIZE,), device=device)
    for iter in pbar:
        i = random.randint(0, N-1)
        pred: torch.Tensor = model(board[i].float())
        p, status_pred = pred[:, 0].flatten(1), pred[:, 1:]
        status_gt = game_layer_cpp.get_status(board[i])
        invalid_mask = board[i].any(1).flatten(1)
        p = p.masked_fill(invalid_mask, float('-inf'))
        dist = Categorical(logits=p / 0.05)
        action = dist.sample()

        # step game
        win = game_layer_cpp.check_win_cuda(board[i], action)
        p = p[batch_range, action]

        board[i] = torch.flip(board[i], (1,))
        full = board[i].any(1).flatten(1).all(1)
        board[i, win | full] = False

        with torch.no_grad():
            _p = ema_model(board[i].float())[:, 0].flatten(1)
            _invalid_mask = board[i].any(1).flatten(1)
            _p = _p.masked_fill(_invalid_mask, float('-inf'))
            _dist = Categorical(logits=_p / 0.05)
            _action = _dist.sample()
            _board = board[i].clone()
            _win = game_layer_cpp.check_win_cuda(_board, _action)

            # board[i] = torch.flip(board[i], (1,))
            # _full = _board.any(1).flatten(1).all(1)
            board[i, _win] = False

            gt = -_p.max(-1).values.tanh()
            gt[full] = 0
            gt[_win] = -1
            gt[win] = 1
        status_loss = F.binary_cross_entropy_with_logits(status_pred, status_gt.float())
        win_prob_loss = F.mse_loss(p.tanh(), gt)
        loss = win_prob_loss + status_loss

        optim.zero_grad()
        loss.backward()
        # scaler.scale(loss).backward()
        # scaler.unscale_(optim)
        # grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
        optim.step()
        # scaler.step(optim)
        # scaler.update()
        sched.step()
        ema_model.update(model)

        if iter % 100 == 0:
            if win.any():
                writer.add_scalar("black_win_prior", _p[win, 7*BOARD_SIZE+7].sigmoid().mean(), iter)
                writer.add_scalar("1_win_prob", p[win].sigmoid().mean(), iter)
                # writer.add_scalar("3_win_prob", last_p[i, win].sigmoid().mean(), iter)
            if _win.any():
                writer.add_scalar("2_lose_prob", (-p[_win]).sigmoid().mean(), iter)
                # writer.add_scalar("4_lose_prob", (-last_p[i, _win]).sigmoid().mean(), iter)

            avg_stone = board[i].sum().item() / BATCH_SIZE
            writer.add_scalar("loss", loss, iter)
            writer.add_scalar("loss/status", status_loss, iter)
            writer.add_scalar("loss/win_prob", win_prob_loss, iter)
            writer.add_scalar("avg/stone", avg_stone, iter)
            writer.add_scalar("avg/win", win.sum(), iter)
            writer.add_scalar("avg/_win", _win.sum(), iter)
            # writer.add_scalar("grad_norm", grad_norm, iter)
            pbar.set_description_str(f"l: {loss:.4f}; s: {avg_stone:.1f}")
        # last_p[i] = p.detach()
        if iter % 5_000 == 0:
            win_rate, win_avg_stone = eval(ema_model)
            if win_rate is not None:
                writer.add_scalar("eval/win_rate", win_rate, iter)
                writer.add_scalar("eval/win_avg_stone", win_avg_stone, iter)
        if iter % 100_000 == 0:
            torch.save(model.state_dict(), f"checkpoint{iter//100_000:04d}.pth")
            torch.save(ema_model.model.state_dict(), f"ema{iter//100_000:04d}.pth")

if __name__ == '__main__':
    model = ReversiNet(train=True).to(device)
    # model = torch.nn.DataParallel(model)
    train(model)
    torch.save(model.state_dict(), "w.pth")
