import os,time,torch
import numpy as np
from collections import defaultdict

from transformers import BertTokenizer
rootDir = os.path.dirname(__file__)
os.chdir(rootDir)
isCodingAndDebug = __name__ == "__main__"

from cnn_model.make_vocab import make_vocab
# data preprocess
data_vocab_info = make_vocab()
instances = data_vocab_info["instances"]
len_vocab = len(data_vocab_info["final_words"])
labels2id = data_vocab_info["labels2id"]
max_words = data_vocab_info["max_words"]
max_symbols = max_words + 2
max_lines = max_layers = data_vocab_info["max_layers"]
batch_size = 16
dim = 64

dictSentence2KeyWordsStr = {instance["公式："]:instance["layers"] for instance in instances}
# dictSentence2KeyWordsStr = pickle.load(open(os.path.join(rootDir,"dictSentence2KeyWordsStr.pkl"),'rb'))
keyWordsStrList = dictSentence2KeyWordsStr.values()
dictID2Sentence = dict(zip(range(len(dictSentence2KeyWordsStr)),dictSentence2KeyWordsStr.keys()))

best_path = 'history_models/best_model.pkl'
model = torch.load(best_path).cpu()
# 注意了model.eval改变类似dropout和BN
# 但是不改变tensor的梯度保存和计算，所以需要手动设置
model.eval()
for name, param in model.named_parameters():
    param.requires_grad_(False)
tokenizer = BertTokenizer(vocab_file="vocab.txt",do_lower_case=False)
tokenizer.unique_no_split_tokens = list(tokenizer.ids_to_tokens.values())

def get_keyWordsSentence_matrix(model=model, tokenizer=tokenizer, keyWordsStrList=keyWordsStrList):
    #获取一组解析词汇对应的向量。
    #输出维度：句子的个数 × 句向量维度

    from formula_modeling import multilines_keywords_list2matrix
    batchs = multilines_keywords_list2matrix(keyWordsStrList,tokenizer,max_symbols,max_lines)
    batch_datas = np.stack(batchs)
    batch_datas = torch.from_numpy(batch_datas).long()
    batch_size = 512
    if len(batch_datas) >batch_size:
        batch_datas = batch_datas.split(batch_size, dim=0)
        ## 内存不足时可以分批计算结果
        hidden_list = []
        for batch_data in batch_datas:
            # 注意了模型返回的是一个tuple
            hidden_list.append(model.get_hidden_states(batch_data))
        hidden_matrix = torch.cat(hidden_list, dim=0)
    else:
        hidden_matrix = model.get_hidden_states(batch_datas)
    hidden_matrix = hidden_matrix.numpy()
    return hidden_matrix

def sentence2vector(sentence,word2id,matrix):
    keyWordsStr = dictSentence2KeyWordsStr[sentence]
    sentenceVectorMatrix = get_keyWordsSentence_matrix(word2id,matrix,keyWordsStrList=[keyWordsStr])
    if isCodingAndDebug and False:
        print(sentenceVectorMatrix)
    return sentenceVectorMatrix

def getTopKids(matrix_q,matrix_v,TopK=5):
    import util
    cosMatrix = util.MatrixCos(matrix_q,matrix_v,isTrimZeros=False)
    sortedCos = np.sort(cosMatrix)[:,::-1][:,:TopK]
    sortedIDS = np.argsort(cosMatrix)[:,::-1][:,:TopK]
    if isCodingAndDebug:
        print("余弦相似度：",sortedCos)
        print("对应的公式id",sortedIDS)
    return sortedIDS

def singleSentenceSearch(sentence,word2id,matrix,keyWordsStrMatrix,TopK=5):
    sentencesMatrix = sentence2vector(sentence,word2id,matrix)
    sortedIDS = getTopKids(sentencesMatrix, keyWordsStrMatrix,TopK=TopK)
    candidates = np.vectorize(lambda idx:dictID2Sentence[idx])(sortedIDS)
    if isCodingAndDebug :
        print("input sentence: ",sentence)
        print("candidates TopK=",TopK)
        for i in range(candidates.shape[-1]):
            print(candidates[0][i])
    return candidates

if __name__ == "__main__":
    # 获取句向量
    keyWordsStrMatrix = get_keyWordsSentence_matrix()
    topK = 5
    #x=\frac{\negative*b\pm\sqrt{b^{2}-4*a*c}}{2*a}
    sentences = list(dictSentence2KeyWordsStr.keys())[:2]
    for sentence in sentences:
        time_s = time.time()
        singleSentenceSearch(sentence,model,tokenizer,keyWordsStrMatrix,TopK=topK)
        time_e = time.time()
        print("耗时：{}毫秒".format((time_e-time_s)*1000))

    active = True
    prompt = '\n请输入TopK数字，表示选择多少个最相似的公式,输入quit将会退出。:'
    while prompt:
        try:
            message = input(prompt)
            if message == 'quit':
                active = False
                prompt = False
            else:
                topK = int(message)
                prompt = False
        except:pass

    prompt = "\n请输入待查询的公式，输入'quit'将会退出。:"
    while active:
        message = input(prompt)
        if message == 'quit':
            active = False
        else:
            sentence = message
            try:
                time_s = time.time()
                singleSentenceSearch(sentence, model, tokenizer, keyWordsStrMatrix, TopK=topK)
                time_e = time.time()
                print("耗时：{}毫秒".format((time_e - time_s) * 1000))
            except:print('输入公式不在，已知切分的公式集合中。')
