import torch
from bert_seq2seq import Tokenizer
from bert_seq2seq import load_model
from bert_seq2seq import Predictor
import faiss

query_save_path = "../../large-model-data/05_bnudata/bnu_xs/bnu_xs_q_text.json"
answer_save_path = "../../large-model-data/05_bnudata/bnu_xs/bnu_xs_a_text.json"
embeddings_save_path = "../../large-model-data/05_bnudata/bnu_xs/bnu_xs_q_embd.json"

maxlen = 256
d = 768
nlist = 5

model_name = "bert" # 选择模型名字
task_name = "embedding"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

vocab_path = "../../large-model-data/state_dict/roberta/vocab.txt" # roberta模型字典的位置
model_path = "../../large-model-data/state_dict/roberta/pytorch_model.bin" # roberta模型位置

tokenizer = Tokenizer(vocab_path, do_lower_case=True, max_len=maxlen)
bert_model = load_model(tokenizer.vocab, model_name=model_name, task_name=task_name)
bert_model.load_pretrain_params(model_path)

predictor = Predictor(bert_model, tokenizer)

class Search:
    def __init__(self, training_vectors, d, nlist=10, nprobe=1):
        quantizer = faiss.IndexFlatIP(d)  # the other index，需要以其他index作为基础
        self.index = faiss.IndexIVFFlat(quantizer, d, nlist, faiss.METRIC_L2)
        assert not self.index.is_trained
        self.index.train(training_vectors)
        assert self.index.is_trained
        self.index.nprobe = nprobe  # default nprobe is 1, try a few more
        self.index.add(training_vectors)  # add may be a bit slower as well
        self.d = d

    def search(self, question_embedding, k, query, answer):
        question_embedding = question_embedding.numpy().reshape(-1, self.d)
        D, I = self.index.search(question_embedding, k)  # actual search
        result = []
        for s, i in zip(D[0], I[0]):
            print(s,i, query[i], answer[i])
            if i != -1:
                result.append({query[i]: answer[i]})

        print(result)

if __name__ == '__main__':
    # load data
    q_text = torch.load(query_save_path)
    a_text = torch.load(answer_save_path)
    q_embd = torch.load(embeddings_save_path)

    method = Search(training_vectors=q_embd, d=d, nlist=nlist, nprobe=2)

    # while True:
    #     question = input("请输入想要获取的数据：")
    #     if question == "q":
    #         break
    #     question_embedding = predictor.predict_embedding(question, maxlen=maxlen)
    #     method.search(question_embedding, 5, q_text, a_text)

    question = "姓名"
    question_embedding = predictor.predict_embedding(question, maxlen=maxlen)
    method.search(question_embedding, 5, q_text, a_text)

    question = "学工号"
    question_embedding = predictor.predict_embedding(question, maxlen=maxlen)
    method.search(question_embedding, 5, q_text, a_text)

    question = "手机号"
    question_embedding = predictor.predict_embedding(question, maxlen=maxlen)
    method.search(question_embedding, 5, q_text, a_text)