
from langchain.document_loaders import TextLoader,JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from faissManager import FAISS
from sent2vec import Sent2VecEmbeddings
from langchain.document_transformers import (
    LongContextReorder,
)
import os
import torch
index_name="/home/lxy/DPR/9m20d_neu_1w5_bge1.5_hnswivf_maxNorm"
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
maxNorm=torch.load(os.path.join(index_name,'maxNorm.pt'))
model=Sent2VecEmbeddings()
db=FAISS.load_local(index_name,model)

# with open('补充数据.txt','r',encoding='utf-8') as f:
#     data=f.readlines()

print('删除前的数据量：',db.index.ntotal)

query_to_delete=['你认识周航吗？']
for d in query_to_delete:
    d[1]=(model.embed_documents([d[1]])[0]/maxNorm)[0].to('cpu')
    print(d[1].shape)
print('data',data)
texts, embeddings = zip(*data)
print(texts,embeddings)
# 真正传入模型需要 [(doc，对应query的embedding),(),()]
db.delete(data)
print('添加后的数据量：',db.index.ntotal)
# db.save_local(index_name)

