import os
import time
import torch

import numpy as np
from transformers import BertTokenizer
from gcn_model.make_vocab import make_vocab

rootDir = os.path.dirname(__file__)
os.chdir(rootDir)
isCodingAndDebug = __name__ == "__main__"

# vocab and data instances

data_vocab_info = make_vocab()
instances = data_vocab_info["original_instances"]
len_vocab = len(data_vocab_info["final_words"])
labels2id = data_vocab_info["labels2id"]
max_words = data_vocab_info["max_words"]
max_symbols = max_words + 2
# max_lines = max_layers = data_vocab_info["max_layers"]
max_lines = 1
batch_size = 16
dim = 64

dictSentence2ItsInfo = {instance["数学公式："]: instance for instance in instances}
InfoList = dictSentence2ItsInfo.values()
dictID2Sentence = dict(zip(range(len(dictSentence2ItsInfo)), dictSentence2ItsInfo.keys()))

best_path = 'history_models/best_model.pkl'
model = torch.load(best_path)
# 注意了model.eval改变类似dropout和BN
# 但是不改变tensor的梯度保存和计算，所以需要手动设置
model.eval()
for name, param in model.named_parameters():
    param.requires_grad_(False)
tokenizer = BertTokenizer(vocab_file="vocab.txt", do_lower_case=False)
tokenizer.unique_no_split_tokens = list(tokenizer.ids_to_tokens.values())


def get_keyWordsSentence_matrix(model=model, tokenizer=tokenizer, InfoList=InfoList):
    # 获取一组解析词汇对应的向量。
    # 输出维度：句子的个数 × 句向量维度

    from formula_gcn_modeling import info2digit_world
    batchs = info2digit_world(InfoList, tokenizer, max_symbols, max_lines)
    matrixs, input_ids_s, masks, true_labels, formulas = zip(*batchs)
    batch_datas = torch.tensor(np.stack(matrixs)), torch.tensor(np.stack(input_ids_s)), torch.tensor(np.stack(masks))
    hidden_matrix = model.get_hidden_states(batch_datas)
    hidden_matrix = hidden_matrix.numpy()
    return hidden_matrix


def sentence2vector(sentence, word2id, matrix):
    keyWordsStr = dictSentence2ItsInfo[sentence]
    sentenceVectorMatrix = get_keyWordsSentence_matrix(word2id, matrix, InfoList=[keyWordsStr])
    if isCodingAndDebug and False:
        print(sentenceVectorMatrix)
    return sentenceVectorMatrix


def getTopKids(matrix_q, matrix_v, TopK=5):
    import util
    cosMatrix = util.MatrixCos(matrix_q, matrix_v, isTrimZeros=False)
    sortedCos = np.sort(cosMatrix)[:, ::-1][:, :TopK]
    sortedIDS = np.argsort(cosMatrix)[:, ::-1][:, :TopK]
    if isCodingAndDebug:
        print("余弦相似度：", sortedCos)
        print("对应的公式id", sortedIDS)
    return sortedIDS


def singleSentenceSearch(sentence, word2id, matrix, keyWordsStrMatrix, TopK=5):
    sentencesMatrix = sentence2vector(sentence, word2id, matrix)
    sortedIDS = getTopKids(sentencesMatrix, keyWordsStrMatrix, TopK=TopK)
    candidates = np.vectorize(lambda idx: dictID2Sentence[idx])(sortedIDS)
    if isCodingAndDebug:
        print("input sentence: ", sentence)
        print("candidates TopK=", TopK)
        for i in range(candidates.shape[-1]):
            print(candidates[0][i])
    return candidates


if __name__ == "__main__":
    # 获取句向量
    keyWordsStrMatrix = get_keyWordsSentence_matrix()
    topK = 5
    sentences = list(dictSentence2ItsInfo.keys())[:2]
    for sentence in sentences:
        time_s = time.time()
        singleSentenceSearch(sentence, model, tokenizer, keyWordsStrMatrix, TopK=topK)
        time_e = time.time()
        print("耗时：{}毫秒".format((time_e - time_s) * 1000))

    active = True
    prompt = '\n请输入TopK数字，表示选择多少个最相似的公式,输入quit将会退出。:'
    while prompt:
        try:
            message = input(prompt)
            if message == 'quit':
                active = False
                prompt = False
            else:
                topK = int(message)
                prompt = False
        except:
            pass

    prompt = "\n请输入待查询的公式，输入'quit'将会退出。:"
    while active:
        message = input(prompt)
        if message == 'quit':
            active = False
        else:
            sentence = message
            try:
                time_s = time.time()
                singleSentenceSearch(sentence, model, tokenizer, keyWordsStrMatrix, TopK=topK)
                time_e = time.time()
                print("耗时：{}毫秒".format((time_e - time_s) * 1000))
            except:
                print('NOT IN:输入公式不在已知切分的公式集合中。')