from models.Seq2seq import Seq2seq
from data import train_data_loader, tgt_vocab
import torch
from torch.nn import functional as F
from configs.model_config import params
from torch import optim
import tqdm
from logger import logger
import math

if torch.cuda.is_available():
    torch.cuda.set_device(params['device_id'])

seq2seq = Seq2seq()
seq2seq.to(params['device'])

optimizer = optim.Adam(seq2seq.parameters(), lr=params['sp']['lr'])

for epoch_idx in range(1, params['sp']['num_epoch'] + 1):
    dataloader = tqdm.tqdm(train_data_loader, ncols=90)
    # dataloader = train_data_loader
    loss_records = []
    for src, src_len, tgt, tgt_len in dataloader:
        # dataloader.set_description("Training epoch {0}:".format(epoch_idx))
        optimizer.zero_grad()
        dist = seq2seq(src, src_len, tgt)
        tgt_batch = tgt[:, 1:].reshape(-1)
        dist = dist.view(-1, dist.shape[-1])
        loss = F.nll_loss(dist.log(), tgt_batch, ignore_index=seq2seq.decoder.vocab.pad_id, reduction='mean')
        loss.backward()
        try:
            torch.nn.utils.clip_grad_norm_(seq2seq.parameters(), norm_type=2, max_norm=2, error_if_nonfinite=True)
            optimizer.step()
            dataloader.set_postfix(loss="%.4f" % loss.item())
        except:
            logger.info("FATAL")
        loss_records.append(loss.item())
    logger.info("Epoch {0} : avg loss : {1}".format(epoch_idx, sum(loss_records) / len(loss_records)))

# idx 11463
