from dotenv import load_dotenv, find_dotenv
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter

# 家在环境配置
load_dotenv(find_dotenv())

# 加载文档
loader = PyMuPDFLoader('../data/llama2.pdf')
pages = loader.load_and_split()

text_split = RecursiveCharacterTextSplitter(
    chunk_size=300,       # 按字符数控制块大小
    chunk_overlap=100,    # 重叠100字符以保留上下文
    length_function=len,  # 字符数统计
    add_start_index=True  # 记录起始位置
)

texts = text_split.create_documents( [page.page_content for page in pages[:4]])

# 灌库
embedding = OpenAIEmbeddings(model="BAAI/bge-m3")
db = FAISS.from_documents(texts, embedding)

retriever = db.as_retriever(search_kwargs={"k": 3})

docs = retriever.invoke("llama2有多少参数")

for doc in docs:
    print(doc.page_content)
    print("----")
