import os
import torch
from tqdm import tqdm
from nltk.translate.bleu_score import corpus_bleu
from metrics import (
    corpus_given_items_percentage,
    corpus_extra_items_percentage,
    corpus_meteor,
)
from datautils import Vocabulary
from torch.utils.tensorboard import SummaryWriter


# eval loop
def evaluation(model, iterator, criterion, device="cpu"):
    model.eval()
    epoch_loss = 0
    model.to(device)

    total = 0
    right = 0

    with torch.no_grad():
        for i, batch in enumerate(iterator):
            src = batch["input"]
            trg = batch["output"]
            output = model(src.to(device), trg.to(device), 1)
            output_dim = output.shape[-1]
            output = output[1:].view(-1, output_dim)
            trg = trg[1:].view(-1)
            loss = criterion(output, trg.to(device))
            epoch_loss += loss.item()
            total += (trg != 0).sum().item()
            right += (
                ((output.argmax(1) == trg.to(device)) & (trg != 0).to(device))
                .sum()
                .item()
            )

    model.train()
    return epoch_loss / len(iterator), right / total


# training loop
def train(
    model,
    iterator,
    eval_iterator,
    test_iterator,
    optimizer,
    criterion,
    clip,
    vocab: Vocabulary,
    saved_folder="checkpoint",
    device="cpu",
    max_epoch=20,
    eval_every_n_batch=100,
    early_stopping_patience=10,
):
    writer = SummaryWriter(log_dir=os.path.join(saved_folder, "tensorboard"))
    if not os.path.exists(saved_folder):
        os.makedirs(saved_folder)
    model.train()
    epoch_loss = 0
    model.to(device)

    max_acc = 0
    patience = 0

    for epoch in range(max_epoch):
        for i, batch in tqdm(enumerate(iterator), total=len(iterator)):
            optimizer.zero_grad()
            src = batch["input"]
            trg = batch["output"]
            output = model(src.to(device), trg.to(device))
            output_dim = output.shape[-1]
            output = output[1:].view(-1, output_dim)
            trg = trg[1:].view(-1)
            loss = criterion(output, trg.to(device))
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
            optimizer.step()
            epoch_loss += loss.item()
            writer.add_scalar("Loss/train", loss.item(), epoch * len(iterator) + i + 1)

            if i % eval_every_n_batch == 0 and i > 0:
                print(f"Current training last 100 batch: {epoch_loss / i}")
                eval_loss, acc = evaluation(model, eval_iterator, criterion, device)
                print(f"Current eval loss: {eval_loss}. acc {acc}.")
                writer.add_scalar("Loss/dev", eval_loss, epoch * len(iterator) + i + 1)
                writer.add_scalar("Acc/dev", acc, epoch * len(iterator) + i + 1)
                if acc > max_acc:
                    max_acc = acc
                    patience = 0
                    torch.save(model, os.path.join(saved_folder, f"best.pt"))
                else:
                    patience += 1
                    if patience == early_stopping_patience:
                        print("Early stopping")
                        break

        print(f"Training epoch {epoch} done.")
        eval_loss, acc = evaluation(model, eval_iterator, criterion, device)
        bleu, meteor, gip, eip = test(model, eval_iterator, vocab, device=device)
        writer.add_scalar("bleu", bleu, epoch)
        writer.add_scalar("meteor", meteor, epoch)
        writer.add_scalar("Given Items Percentage", gip, epoch)
        writer.add_scalar("Extra Items Percentage", eip, epoch)

    model = torch.load("checkpoints/seq2seq/best.pt")
    test(model, test_iterator, vocab, device=device)


def test(model, iterator, vocab: Vocabulary, device="cpu"):
    model.eval()
    model.to(device)
    references = []
    predictions = []

    with torch.no_grad():
        for i, batch in enumerate(iterator):
            src = batch["input"]
            prediction = model.generate(src.to(device))
            output = batch["output"]

            for p, o in zip(prediction, output):
                references.append([vocab.denumericalize(o.tolist())])
                predictions.append(vocab.denumericalize(p.tolist()))

    bleu = corpus_bleu(references, predictions)
    meteor = corpus_meteor(references, predictions)
    gip = corpus_given_items_percentage(references, predictions)
    eip = corpus_extra_items_percentage(references, predictions)

    print(
        f"BLEU: {bleu}, METEOR: {meteor}, Given Items Percentage: {gip}, Extra Items Percentage: {eip}"
    )
    model.train()
    return bleu, meteor, gip, eip
