from models.consum import Consum
from utils.data.loader import data_loader, test_loader
import torch
import torch.nn.functional as F
from config import Config
from tqdm import tqdm
from torch import optim

model = Consum()
model.to(Config.DEVICE)
optimizer = optim.Adam(model.parameters(), lr=1e-3)

num_epoch = 10

# Training
model.train()
for epoch in range(num_epoch):
    data_loader = tqdm(data_loader, desc='Training epoch ' + str(epoch + 1))
    for batch in data_loader:
        optimizer.zero_grad()
        m_src_batch = batch['methods']
        m_src_lengths = torch.Tensor(batch["src_methods_len"]).to('cpu')
        c_src_batch = batch['contexts']
        c_src_lengths = torch.Tensor(batch['src_contexts_len']).to('cpu')
        tgt_batch = batch['summaries']
        tgt_lengths = torch.Tensor(batch["src_summaries_len"]).to('cpu')

        dist, m_attention, c_attention = model(m_src_batch=m_src_batch,
                                               m_src_lengths=m_src_lengths,
                                               c_src_batch=c_src_batch,
                                               c_src_lengths=c_src_lengths,
                                               tgt_batch=tgt_batch)
        tgt_batch = tgt_batch[:, 1:]
        tgt_batch = tgt_batch.reshape(-1)
        # dist [batch_size, tgt_len, vocab_size]
        dist = dist.view(-1, model.decoder.vocab_size)  # -> [batch_size * tgt_len, vocab_size]
        # tgt_batch : [batch_size, tgt_len]
        loss = F.nll_loss(dist.log(), tgt_batch, ignore_index=model.decoder.vocab[Config.PAD_TOKEN], reduction='mean')
        # NLL loss : The input given through a forward call is expected to contain log-probabilities of each class
        loss.backward()
        optimizer.step()
        data_loader.set_postfix(loss="%.4f" % loss.item())

# Evaluate

# model.eval()
# for batch in test_loader:
#     m_src_batch = batch['methods']
#     m_src_lengths = torch.Tensor(batch["src_methods_len"]).to('cpu')
#     c_src_batch = batch['contexts']
#     c_src_lengths = torch.Tensor(batch['src_contexts_len']).to('cpu')
#     tgt_batch = batch['summaries']
#     tgt_lengths = torch.Tensor(batch["src_summaries_len"]).to('cpu')
#     m_encoder_outputs, m_final_state = model.encoder(m_src_batch, m_src_lengths)
#     c_encoder_outputs, c_final_state = model.encoder(c_src_batch, c_src_lengths)
#     outputs, m_attn, c_attn = model.decoder(tgt_batch, final_state, memory, src_lengths)
