from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI
from langchain.chains import RetrievalQA  # 检索式问答
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_core.output_parsers import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.prompts import ChatPromptTemplate

# 加载文档
loader = PyMuPDFLoader("llama2.pdf")
pages = loader.load_and_split() # 加载并分割文档, pdf文件的每一页是一个文档

# 文档分隔
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=300,
    chunk_overlap=100,
    length_function=len, # 计算每个文档的长度
    add_start_index=True, # 添加开始索引, 
    )

texts = text_splitter.create_documents(
    [page.page_content for page in pages[:4]]
    )

# 向量化，入库
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
db = Chroma.from_documents(
    texts,
    embeddings, 
    persist_directory="llama2_db" # 持久化目录
    )

# 检索，取 top 2 个
retriever = db.as_retriever(search_kwargs={"k": 2})

# 问答
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)

# Prompt模板
template = """Answer the question based only on the following context:
{context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

# 定义 chain
chain = (
    {"context": retriever, "question": RunnablePassthrough()} # 将 retriever()  作为 context 的输入
    | prompt
    | llm
    | StrOutputParser()
)

# 流式输出
for s in chain.stream("Llama 2有多少参数"):
    print(s, end="", flush=True)
