# 步骤一: Load the PDF
# 使用 PDFPlumberLoader 加载 PDF 文档，并通过 RecursiveCharacterTextSplitter 将文本切分成适当大小的块
from langchain_community.document_loaders import PDFPlumberLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

file = "C:\\Users\\Administrator\\Desktop\\cycling.pdf"
loader = PDFPlumberLoader(file)
docs = loader.load()
# doc分割
text_splitter = RecursiveCharacterTextSplitter(
# chunk_size：每个块的字符或标记的数量。
# chunk_overlap：连续块之间的重叠。
# length_function：确定如何计算chunk的长度。
    chunk_size=500,
    chunk_overlap=0,
    length_function=len,
    is_separator_regex=False,
)
all_splits = text_splitter.split_documents(docs)

# 步骤二: 初始化向量存储
# 使用 Chroma 数据库存储文档向量，并配置 Ollama 提供的嵌入模型：nomic-embed-text:latest
from langchain_chroma import Chroma
from langchain_ollama import OllamaEmbeddings

local_embeddings = OllamaEmbeddings(model="nomic-embed-text:latest")
vectorstore = Chroma.from_documents(documents=all_splits, embedding=local_embeddings, persist_directory="./db")

# 步骤三： 构建 Chain 表达式：设计并实现处理链，将文档处理、提示模板和模型响应整合成RAG流程化的处理过程。
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama import ChatOllama

model = ChatOllama(
    model="deepseek-r1:1.5b",
)

prompt = ChatPromptTemplate.from_template(
    "Summarize the main themes in these retrieved docs: {docs}"
)

# 将传入的文档转换成字符串的形式
def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

chain = {"docs": format_docs} | prompt | model | StrOutputParser()
question = "Summarize the main themes in these retrieved docs"
docs = vectorstore.similarity_search(question)
print(chain.invoke(docs))
print('SUCCESS 0')

# 步骤四: 带有检索的 QA
# 通过整合检索和问答功能，实现了完整的检索增强生成系统，能够基于文档内容回答用户问询。
from langchain_core.runnables import RunnablePassthrough

RAG_TEMPLATE = """
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.

<context>
{context}
</context>

Answer the following question:

{question}"""

rag_prompt = ChatPromptTemplate.from_template(RAG_TEMPLATE)

retriever = vectorstore.as_retriever()

#构建chain
qa_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | rag_prompt
    | model
    | StrOutputParser()
)

question = "总结一下一共有几条路线?"

# Run
print(qa_chain.invoke(question))
print('SUCCESS 1')
