
from langchain.document_loaders import TextLoader,JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from faissManager import FAISS
from sent2vec import Sent2VecEmbeddings
from langchain.document_transformers import (
    LongContextReorder,
)

index_name="/home/lxy/DPR/faiss_index"
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
db=FAISS.load_local(index_name,Sent2VecEmbeddings())


# import pickle
# from os import path

# s=set()
# with open('/home/lxy/DPR/summary_index/index.pkl',"rb") as f:
#         docstore, index_to_docstore_id = pickle.load(f)
#         print('1')
            



num=db.index.ntotal
shit=db.index.reconstruct_n(0,num)


import numpy as np

# 创建一个包含重复项的 NumPy 数组

# 找出唯一的元素以及其对应的计数
unique_elements, unique_indices,duplicate_counts = np.unique(shit, return_counts=True,axis=0,return_index=True)
print(num,unique_elements.shape,duplicate_counts)
duplicate_indices = np.where(np.bincount(unique_indices) > 1)[0]
# 找出重复项
duplicates = unique_elements[duplicate_counts > 1]

print("重复项：", duplicates)
print("重复项的索引位置：", duplicate_indices)
print("对应的重复次数：", duplicate_counts)




# with open('补充数据.txt','r',encoding='utf-8') as f:
#     data=f.readlines()
#     print(data)

# db.add_texts(data)
# db.save_local(index_name)



# query = "东北大学现在的校长是谁？"
# docs = db.similarity_search(query)
# reordering = LongContextReorder()
# reordering_docs = reordering.transform_documents(docs)
# prompt="请根据以下信息回答问题：\n"
# for i in range(4):
#     prompt+=f'{i+1}.'+docs[i].page_content+'\n'
# print(prompt)