import sys
from dataprocesser import qna_data_pipeline
from Siamese_Networks import SiameseNetwork, TripletLoss
from dataprocesser import qna_data_pipeline
import pandas as pd
from collections import Counter, defaultdict
from torchtext.data.utils import get_tokenizer
import torch
import torch.nn as nn
import time
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
import QuestionDupli_training
import re


def test_qna(model, threshold=0.8):
    while 1:
        print('enter question1:', end=" ")
        x1 = re.sub(r"[^\w ,.?]+", "", sys.stdin.readline())
        print('enter question2:', end=" ")
        x2 = re.sub(r"[^\w ,.?]+", "", sys.stdin.readline())
        if len(x1) == 0:
            break
        q1_train_tensor = vocab.encode_dataset([x1])
        q2_train_tensor = vocab.encode_dataset([x2])
        x1 = torch.tensor(q1_train_tensor)
        x2 = torch.tensor(q2_train_tensor)
        model.eval()
        with torch.no_grad():
            x1, x2 = x1.to(device), x2.to(device)
            pred1, pred2 = model(x1, x2)
            similarity = torch.matmul(pred1, pred2.T).item()
            print(similarity)
            result = similarity > threshold
            print(result)


if __name__ == "__main__":
    device = torch.device('cuda')
    _, _, vocab = qna_data_pipeline(file='data/questions.csv', train_ratio=0.9)
    vocab_size = len(vocab.vocab2id)
    d_model = 64
    model = SiameseNetwork(vocab_size, d_model).to(device)
    loss_fn = TripletLoss(device, 0.25)
    checkpoint = torch.load(f"QNA_dim{d_model}.pth")
    model.load_state_dict(checkpoint['model'])
    print("enter sentence")
    test_qna(model)
