from Siamese_Networks import SiameseNetwork, TripletLoss
from dataprocesser import qna_data_pipeline
import pandas as pd
from collections import Counter, defaultdict
from torchtext.data.utils import get_tokenizer
import torch
import torch.nn as nn
import time
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
import os


def train_epoch(dataloader, model, optimizer, scheduler, loss_fn):
    model.train()
    losses = 0
    size = len(dataloader.dataset)
    cur_batch_size = dataloader.batch_size
    for x1, x2 in dataloader:
        x1, x2 = x1.to(device), x2.to(device)
        optimizer.zero_grad()
        pred1, pred2 = model(x1, x2)
        loss = loss_fn(pred1, pred2)
        loss.backward()
        optimizer.step()
        losses += loss.item()
    scheduler.step()
    return cur_batch_size * losses / size


def compute_acc(dataloader, model, threshold=0.7):
    model.eval()
    acc = 0
    size = len(dataloader.dataset)
    with torch.no_grad():
        for x1, x2, y in dataloader:
            x1, x2, y = x1.to(device), x2.to(device), y.to(device)
            pred1, pred2 = model(x1, x2)
            similarity = torch.matmul(pred1, pred2.T)
            similarity = torch.diagonal(similarity)
            res = ((similarity > threshold) == y).sum()
            acc += res
    return acc / size


def train(epochs, verbose_round, log_dir, load_model, threshold=0.7):
    cur_epoch = 0
    if load_model:
        if not os.path.exists(log_dir):
            print("No such file, retraining now")
        checkpoint = torch.load(log_dir)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("cur learning rate:", optimizer.param_groups[0]['lr'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        cur_epoch = checkpoint['epoch']
        test_acc = compute_acc(test_loader, model, threshold)
        print(f"current accuracy: {test_acc}")

    start_time = time.time()
    for t in range(cur_epoch, epochs):
        train_loss = train_epoch(train_loader, model, optimizer, scheduler, loss_fn)
        if t % verbose_round == 0:
            end_time = time.time()
            test_acc = compute_acc(test_loader, model, threshold)
            state = {'model': model.state_dict(),
                     'optimizer': optimizer.state_dict(),
                     'epoch': t,
                     'scheduler': scheduler.state_dict()}
            torch.save(state, log_dir)

            print(f"Epoch: {t + 1}, Train loss: {train_loss:.6f}, test acc: {test_acc:.4f}"
                  f" , Epoch Time: {end_time - start_time:.3f}s, "
                  f"cur learning rate: {optimizer.param_groups[0]['lr']:.7f}")
            start_time = time.time()


if __name__ == "__main__":
    load_model = 0
    device = torch.device('cuda')
    train_dataset, test_dataset, vocab = qna_data_pipeline(file='data/questions.csv', train_ratio=0.9)
    batch_size = 128
    vocab_size = len(vocab.vocab2id)
    d_model = 128
    train_loader = DataLoader(train_dataset[:80000], batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    model = SiameseNetwork(vocab_size, d_model).to(device)
    loss_fn = TripletLoss(device, 0.25)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.003)
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer=optimizer, lr_lambda=lambda epoch: 0.97 ** epoch)
    train(31, 5, f"QNA_dim{d_model}.pth", load_model, 0.8)

