from dotenv import load_dotenv
from langchain_community.document_loaders import PyPDFLoader

load_dotenv()
loader = PyPDFLoader("./llama2.pdf")
pages = loader.load_and_split()

from langchain.text_splitter import RecursiveCharacterTextSplitter
#分割
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size = 200,
    chunk_overlap= 100,
    length_function=len,
    add_start_index = True,
)
paragraphs = []
for page in pages:
    paragraphs.extend(text_splitter.create_documents([page.page_content]))

#print(paragraphs)
##3.文档向量化
from langchain_openai import OpenAIEmbeddings
from langchain_community.document_loaders import  Chroma
db = Chroma.from_documents(paragraphs,OpenAIEmbeddings())

#as_retriever,生成检索器实例
#restriever = db.as_retriever()
#docs = restriever.invoke("llama2有多少个参数？")
#for doc in docs:
    #print(f"{doc.page_content}\n-------\n")

#指定相似度（这里指定为0.5）
retriever = db.as_retriever(
    saerch_type = "similarity_score_threshold",
    similarity_score_threshold = 0.5,
)
docs = retriever.invoke("llama2有多少参数？")
print(docs)