from pke_zh.tfidf import TfIdf
from keyBert import KeyBert
import json
from functools import partial
import sys
sys.path.append("/home/lxy/DPR")
from faissManager import FAISS
from sent2vec import Sent2VecEmbeddings

import torch

q = '我还想申请一下仪器设备维修'

model=Sent2VecEmbeddings()
KeyBert_m = KeyBert()


db=FAISS.load_local("/data/lxy/RAT/10m9d_wxb_bge1.5_hnswivf_20500",model)
# Reorder the documents:
# Less relevant document will be at the middle of the list and more
# relevant elements at begining / end.

key_word=KeyBert_m.extract(q)
print(key_word)
start_pos=q.find(key_word[0][0])
end_pos=start_pos+len(key_word[0][0])
q_token_embedding,q_s_embedding =model.embed_query_for_multi_turn(q)
result=db.similarity_search_with_score_by_vector(q_s_embedding.to('cpu'))
docs,score=zip(*result)
print(docs,score)

print(docs)
print(q_token_embedding)
print(q_token_embedding[0,start_pos-1:end_pos-1])
mean_value=torch.mean(q_token_embedding[0,start_pos-1:end_pos-1],dim=0)
print(mean_value)
# 默认是4-shot
# Confirm that the 4 relevant documents are at begining and end.
# query = "怎么在网上申请图书馆报告厅使用？"
# docs = db.similarity_search(query,k=50)
# print(docs)
query2="南湖"
q2_token_embedding,q2_s_embedding =model.embed_query_for_multi_turn(query2)
docs1,scores1=zip(*db.similarity_search_with_score_by_vector(q2_s_embedding.to('cpu')))
docs2,scores2=zip(*db.similarity_search_with_score_by_vector((q2_s_embedding.to('cpu')+mean_value.to('cpu'))/2))
docs3,scores3=zip(*db.similarity_search_with_score_by_vector((q2_s_embedding.to('cpu')+q_s_embedding.to('cpu'))/2))
print(docs1,scores1)
print('----------------')
print(docs2,scores2)
print('----------------')
print(docs3,scores3)
