# 构建基于LangChain LangGraph的基础RAG流程，即简单的增强检索问答系统

# 【避坑】设置环境变量，且设置时机确保在所有代码之前
# 否则会提示：USER_AGENT environment variable not set, consider setting it to identify your requests.
import os

from app.config import config

os.environ["USER_AGENT"] = "MyRAG/1.0 (coder_katherine)"
os.environ["LANGCHAIN_API_KEY"] = config.get("langchain", "api_key")
os.environ["TOKENIZERS_PARALLELISM"] = "true"  # Hugging Face tokenizers 多进程

import asyncio
from typing_extensions import List, TypedDict
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_classic import hub
from langchain_core.prompt_values import PromptValue
from langchain_core.documents import Document
from langgraph.graph import START, StateGraph
from app.documentloader.web import load
from app.embedding.sentence_transformer import getMpnetModel
from app.vectorstore.in_memory import MyInMemoryVectorStore
from app.chatmodel.qwen_qwq import chat_model

# Use LangGraph to tie together the retrieval and generation steps into a single application.

my_vector_store = MyInMemoryVectorStore(embedding_model=getMpnetModel())


class State(TypedDict):
    question: str
    context: List[Document]
    answer: str


async def retrieve(state: State):
    retrieved_docs = await my_vector_store.similarity_search(state["question"])
    return {"context": retrieved_docs}


def generate(state: State):
    prompt = hub.pull("rlm/rag-prompt")  # Use a prompt for RAG that is checked into the LangChain prompt hub
    docs_content = "\n\n".join(doc.page_content for doc in state["context"])
    messages: PromptValue = prompt.invoke({"question": state["question"], "context": docs_content})
    response = chat_model.invoke(messages)
    return {"answer": response.content}


async def main():
    print("Step1: Loading documents.")
    docs = load(("https://python.langchain.com/docs/introduction/",), ("container", "row"))
    print(f"Total characters: {len(docs[0].page_content)}")
    print(f"The first ten characters: {docs[0].page_content[:10]}")

    print("Step2: Splitting documents.")
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,  # chunk size (characters)
        chunk_overlap=200,  # chunk overlap (characters)
        add_start_index=True,  # track index in original document
    )
    all_splits = text_splitter.split_documents(docs)
    print(f"Split blog into {len(all_splits)} sub-documents.")
    print(f"The first sub-documents's first 100 characters are: {all_splits[0].page_content[:100]}")

    print("Step3: Index chunks and Storing documents.")
    document_ids = await my_vector_store.add_documents(documents=all_splits)
    print(f"Indexed {len(document_ids)} documents.")
    print(f"The first three document ids are {document_ids[:3]}")

    print("Step4: Defining graph.")
    graph_builder = StateGraph(State).add_sequence([retrieve, generate])
    graph_builder.add_edge(START, "retrieve")
    graph = graph_builder.compile()
    with open(config.get("folder", "img_cache_folder") + "rag_graph.png", "wb") as f:
        f.write(graph.get_graph().draw_mermaid_png())

    # Usage
    print("Step5: Running graph.")
    try:
        response = await graph.ainvoke({"question": "What is vectorStore"})
        print(f'Answer: {response["answer"]}')
    except Exception as e:
        print(f"Error: {e}")


if __name__ == "__main__":
    asyncio.run(main())
