from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyMuPDFLoader

# 加载PDF文件
loader = PyMuPDFLoader(file_path="llama2.pdf")
pages = loader.load_and_split()

# 分割文档
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=300, # 设置分块大小
    chunk_overlap=100, # 允许重叠
    length_function=len, # 使用长度函数
    add_start_index=True # 添加索引
    )

# 创建文档
texts = text_splitter.create_documents([page.page_content for page in pages[:4]])

# 创建向量数据库
# embeddings = OpenAIEmbeddings(model="text-embedding-3-small") #
embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") # 使用新的模型

# 将数据写入库
db = FAISS.from_documents(texts, embeddings)

# 检索top3
retriever = db.as_retriever(search_kwargs={"k": 3}) # 设置检索数量为3

docs = retriever.invoke("llama2有多少参数？")

for doc in docs:
    print(doc.page_content)
    print(doc.metadata)
    print("-"*100)

