import torch
from ignite.utils import setup_logger
from torch import nn, optim
from torch.optim import SGD
from tqdm import tqdm
import gensim

import net_review
import data
from ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events
from ignite.contrib.handlers import FastaiLRFinder, ProgressBar
from ignite.metrics import Accuracy, Loss


def binary_accuracy(preds, y):
    """
    Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
    """

    # round predictions to the closest integer
    rounded_preds = torch.round(torch.sigmoid(preds))
    correct = (rounded_preds == y).float()  # convert into float for division
    acc = correct.sum() / len(correct)
    return acc


def train(model, iterator, optimizer, criterion):
    epoch_loss = 0
    epoch_acc = 0

    model.train()

    for batch in iterator:
        optimizer.zero_grad()
        # print(batch.text)
        # print(batch.text.max())  # (After converted to tensor)
        # print(batch.text.min())
        predictions = model(batch.text).squeeze(1)

        loss = criterion(predictions.to(device), batch.label.float())

        acc = binary_accuracy(predictions, batch.label)

        loss.backward()

        optimizer.step()

        epoch_loss += loss.item()
        epoch_acc += acc.item()

    return epoch_loss / len(iterator), epoch_acc / len(iterator)


def evaluate(model, iterator, criterion):
    epoch_loss = 0
    epoch_acc = 0

    model.eval()

    with torch.no_grad():
        for batch in iterator:
            predictions = model(batch.text).squeeze(1)

            loss = criterion(predictions.to(device), batch.label.float())

            acc = binary_accuracy(predictions, batch.label)

            epoch_loss += loss.item()
            epoch_acc += acc.item()

    return epoch_loss / len(iterator), epoch_acc / len(iterator)


if __name__ == '__main__':
    log_interval = 10
    epochs = 10
    train_loader, val_loader, test_loader, TEXT, Label = data.get_data_loaders('review', 1024)
    INPUT_DIM = 426677
    EMBEDDING_DIM = 50
    OUTPUT_DIM = 1
    PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
    model_wv = gensim.models.KeyedVectors.load_word2vec_format('./data/review/wiki_word2vec_50.bin', binary=True,
                                                               encoding='gbk')
    weights = torch.FloatTensor(model_wv.vectors)

    model = net_review.FastText(INPUT_DIM, EMBEDDING_DIM, OUTPUT_DIM, PAD_IDX)

    model.embedding.weight.data.copy_(weights)
    UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]

    model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
    model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
    device = "cuda" if torch.cuda.is_available() else "cpu"

    optimizer = optim.Adam(model.parameters())
    criterion = nn.BCEWithLogitsLoss()

    model = model.to(device)
    criterion = criterion.to(device)

    # save the initial state for the model and the optimizer
    # to reset them later

    # N_EPOCHS = 50
    #
    # best_valid_loss = float('inf')
    #
    # for epoch in range(N_EPOCHS):
    #
    #     train_loss, train_acc = train(model, train_loader, optimizer, criterion)
    #     valid_loss, valid_acc = evaluate(model, val_loader, criterion)
    #
    #     if valid_loss < best_valid_loss:
    #         best_valid_loss = valid_loss
    #         torch.save(model.state_dict(), 'tut3-model.pt')
    #
    #     print(f'Epoch: {epoch + 1:02} ')
    #     print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.2f}%')
    #     print(f'\t Val. Loss: {valid_loss:.3f} |  Val. Acc: {valid_acc * 100:.2f}%')
    model.load_state_dict(torch.load('tut3-model.pt'))

    test_loss, test_acc = evaluate(model, test_loader, criterion)

    print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc * 100:.2f}%')