from dotenv import load_dotenv
from langchain_community.document_loaders import PyPDFLoader

load_dotenv()
loader = PyPDFLoader("./llama2.pdf")
pages = loader.load_and_split()

from langchain.text_splitter import RecursiveCharacterTextSplitter
#分割
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size = 200,
    chunk_overlap= 100,
    length_function=len,
    add_start_index = True,
)
paragraphs = []
for page in pages:
    paragraphs.extend(text_splitter.create_documents([page.page_content]))

#print(paragraphs)
##3.文档向量化
from langchain_openai import OpenAIEmbeddings
from langchain_community.document_loaders import  Chroma
db = Chroma.from_documents(paragraphs,OpenAIEmbeddings())

query = "llama有多少参数？"
docs = db.similarity_search(query)
for doc in docs:
    print(f"{doc.similarity_score}\n-------\n")
