import copy
import time

from get_sim_embeddings_m3e import  GetEmbedding
import numpy as np
import json
import faiss
import random


#获取embeddings
model_path = './roberta-wwm-finetune'
embedding_path = "./zhengzhuang_search_embedding_v20231116.npy"
# 建立查询矩阵
test = GetEmbedding(model_path)
embeddings_ = np.load(embedding_path)
di_qu_all_embedding = np.array(embeddings_)
quantizer = faiss.IndexFlatIP(di_qu_all_embedding.shape[1])
quantizer.add(di_qu_all_embedding.astype(np.float32))

def readjson(file_path):
  """"""
  # make_path_legal(file_path)
  with open(file_path, "r", encoding="utf-8") as f:
    return json.load(f)

# datas = readjson('./symptoms_all.json')
datas = readjson('./dataset/symptoms_all_v20231116.json')

def get_augument_content(input_text, topk=5):
    print("问题embeddding时间：")
    start_e = time.time()
    query_embeding = test.get_sim_embedding(input_text)
    end_e = time.time()
    print("问题embeddding时间：",str(end_e - start_e))

    #query_embeding_ori = copy.copy(query_embeding)
    # normalize(query_embeding)
    print("-----------------------------------")    
    print(query_embeding.shape)
    start_r = time.time()
    distance, idx = quantizer.search(query_embeding, topk)
    end_r = time.time()
    print("问题检索时间：",str(end_r - start_r))
    print("distance: ", distance)
    print("idx: ", idx)
    #答案list
    q_annd_a = {}
    for index,item in enumerate(idx[0]):
        #print(index,item,di_qu_all_question[item],"\t",distance[0][index])#di_qu_name[item])
        ask_ = datas[item]
            #answer_list.append(da_an)
        if len(ask_) > 1:
            q_annd_a.update({
                str(index):[ask_,str(distance[0][index])]
            })

    return  q_annd_a

if __name__ == '__main__':
    test_datas = ['感冒', '发烧', '头疼']
    for data in test_datas:
        print(f'{data}： {get_augument_content(data)}')
    for _ in range(100):
        i = random.randint(0, len(datas))
        data = datas[i]
        cur = get_augument_content(data)
        print(data, cur)
