from langchain.document_loaders import TextLoader,JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from faissManager import FAISS
from sent2vec import Sent2VecEmbeddings
from langchain.document_transformers import (
    LongContextReorder,
)


import resource

def get_memory_usage():
    # 获取当前进程的内存使用情况，以KB为单位
    usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
    return usage




# Load the document, split it into chunks, embed each chunk and load it into the vector store.
db=FAISS.load_local("/home/lxy/DPR/qg_bge_2wfinetuned_340000",Sent2VecEmbeddings(model_name='/home/lxy/DPR/models/checkpoint-53910-epoch-10'))
# Reorder the documents:
# Less relevant document will be at the middle of the list and more
# relevant elements at begining / end.


# 默认是4-shot
# Confirm that the 4 relevant documents are at begining and end.
query = "2022年东北大学秦皇岛分校民族学专业在辽宁省的录取人数是多少？"

docs = db.similarity_search(query,k=5)

# reordering = LongContextReorder()
# reordering_docs = reordering.transform_documents(docs)
prompt="请根据以下信息回答问题：\n"
for i in range(5):
    # prompt+=f'{i+1}.'+reordering_docs[i].page_content+'\n'
    prompt+=f'{i+1}.'+docs[i].page_content+'\n'
print(prompt)

