"""
CRAG (Corrective Retrieval-Augmented Generation) 反思式检索系统 - 修复版
CRAG是一种改进的RAG方法，通过以下步骤提高检索质量：
1. 检索：从向量数据库检索相关文档
2. 评分：评估检索文档的相关性
3. 决策：根据评分结果决定是直接生成答案还是进行查询重写
4. 校正：如果文档不相关，重写查询并进行网络搜索
5. 生成：基于过滤后的相关文档生成最终答案

这种方法能够自动检测和纠正不准确的检索结果，提高RAG系统的可靠性。
"""

# ================================
# 第一部分：数据准备和向量数据库构建
# ================================

import os
from dotenv import load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from typing import List, TypedDict
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
from langchain import hub
from langchain_community.chat_models import ChatZhipuAI
# 使用Tavily作为搜索工具，需要注册 Tavily 并获取 API 密钥
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain.schema import Document
from langgraph.graph import END, StateGraph, START
from pprint import pprint

# 加载环境变量
load_dotenv()

# 1. 为3篇博客文章创建索引
urls = [
    "https://lilianweng.github.io/posts/2023-06-23-agent/",
    "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
    "https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
]

# 加载并分割文档
docs = [WebBaseLoader(url).load() for url in urls]
docs_list = [item for sublist in docs for item in sublist]

text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=250, chunk_overlap=0
)
doc_splits = text_splitter.split_documents(docs_list)

# 创建向量数据库
embeddings_model = HuggingFaceEmbeddings(
    model_name="D:/ideaSpace/MyPython/models/bge-small-zh-v1.5",
    model_kwargs={"device": "cpu"},
    encode_kwargs={"normalize_embeddings": True}
)
vectorstore = Chroma.from_documents(
    documents=doc_splits,
    collection_name="rag-chroma",
    embedding=embeddings_model,
)
retriever = vectorstore.as_retriever()

# ================================
# 第二部分：检索评分器 - 修复版
# ================================

class GradeDocuments(BaseModel):
    """对检索文档相关性的二元评分"""
    binary_score: str = Field(description="文档与问题相关为'yes'，不相关为'no'")

# 创建语言模型实例
llm = ChatZhipuAI(
    api_key=os.getenv("ZHIPUAI_API_KEY"),
    model="glm-4",
    temperature=0.1
)

# 改进的评分提示模板
system = """你是一个严格的文档相关性评分员。必须按以下规则评分：
1. 文档直接回答问题或提供关键信息 → 'yes'
2. 文档与问题无关或信息不充分 → 'no'
只输出JSON格式：{"binary_score": "yes"} 或 {"binary_score": "no"}"""

grade_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system),
        ("human", "文档内容:\n{document}\n\n问题: {question}"),
    ]
)

# 使用JSON输出解析器确保稳定输出
parser = JsonOutputParser(pydantic_object=GradeDocuments)
retrieval_grader = grade_prompt | llm | parser

# ================================
# 第三部分：RAG生成链
# ================================

prompt = hub.pull("rlm/rag-prompt")
llm_generator = ChatZhipuAI(
    api_key=os.getenv("ZHIPUAI_API_KEY"),
    model="glm-4",
    temperature=0
)

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

rag_chain = prompt | llm_generator | StrOutputParser()

# ================================
# 第四部分：查询重写器
# ================================

llm_rewriter = ChatZhipuAI(
    api_key=os.getenv("ZHIPUAI_API_KEY"),
    model="glm-4",
    temperature=0.5
)

system = """你是一个问题重写专家，将输入的问题转换为更适合搜索的版本。
输出必须保持原问题的核心语义，但更精确、更适合检索。"""

re_write_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system),
        ("human", "初始问题: {question} \n请改进这个问题:"),
    ]
)

question_rewriter = re_write_prompt | llm_rewriter | StrOutputParser()

# ================================
# 第五部分：网络搜索工具 - 修复版
# ================================

# 确保TAVILY_API_KEY已在.env文件中设置
if not os.getenv("TAVILY_API_KEY"):
    raise ValueError("请设置TAVILY_API_KEY环境变量")

web_search_tool = TavilySearchResults(
    k=3,
    tavily_api_key=os.getenv("TAVILY_API_KEY")
)

# ================================
# 第六部分：图状态定义
# ================================

class GraphState(TypedDict):
    question: str
    generation: str
    web_search: str
    documents: List[Document]

# ================================
# 第七部分：CRAG工作流节点函数 - 修复版
# ================================

def retrieve(state):
    print("---执行检索---")
    question = state["question"]
    documents = retriever.invoke(question)  # 使用invoke替代get_relevant_documents
    return {"documents": documents, "question": question}

def generate(state):
    print("---生成答案---")
    question = state["question"]
    documents = state["documents"]
    generation = rag_chain.invoke({"context": format_docs(documents), "question": question})
    return {"documents": documents, "question": question, "generation": generation}

def grade_documents(state):
    print("---评估文档相关性---")
    question = state["question"]
    documents = state["documents"]

    filtered_docs = []
    web_search = "No"
    has_relevant_docs = False

    for d in documents:
        try:
            score = retrieval_grader.invoke({"question": question, "document": d.page_content})
            if not score or "binary_score" not in score:
                print("---无效评分结果，默认视为不相关---")
                continue

            if score["binary_score"] == "yes":
                print("---文档相关---")
                filtered_docs.append(d)
                has_relevant_docs = True
            else:
                print("---文档不相关---")
        except Exception as e:
            print(f"---评分出错: {e}---")
            continue

    if not has_relevant_docs:
        web_search = "Yes"
        print("---没有相关文档，需要网络搜索---")

    return {"documents": filtered_docs, "question": question, "web_search": web_search}

def transform_query(state):
    print("---优化查询---")
    question = state["question"]
    better_question = question_rewriter.invoke({"question": question})
    print(f"优化后的查询: {better_question}")
    return {"documents": state["documents"], "question": better_question}

def web_search(state):
    print("---执行网络搜索---")
    question = state["question"]
    documents = state["documents"]

    try:
        results = web_search_tool.invoke({"query": question})
        web_docs = [
            Document(page_content=result["content"], metadata=result)
            for result in results
        ]
        documents.extend(web_docs)
    except Exception as e:
        print(f"---网络搜索出错: {e}---")

    return {"documents": documents, "question": question}

def decide_to_generate(state):
    print("---决定下一步---")
    if state["web_search"] == "Yes":
        print("---决定: 转换查询并搜索---")
        return "transform_query"
    print("---决定: 直接生成---")
    return "generate"

# ================================
# 第八部分：构建工作流图
# ================================

workflow = StateGraph(GraphState)

# 添加节点
workflow.add_node("retrieve", retrieve)
workflow.add_node("grade_documents", grade_documents)
workflow.add_node("generate", generate)
workflow.add_node("transform_query", transform_query)
workflow.add_node("web_search_node", web_search)

# 设置边
workflow.add_edge(START, "retrieve")
workflow.add_edge("retrieve", "grade_documents")
workflow.add_conditional_edges(
    "grade_documents",
    decide_to_generate,
    {
        "transform_query": "transform_query",
        "generate": "generate",
    },
)
workflow.add_edge("transform_query", "web_search_node")
workflow.add_edge("web_search_node", "generate")
workflow.add_edge("generate", END)

app = workflow.compile()

# ================================
# 第九部分：运行系统
# ================================

if __name__ == "__main__":
    # 测试问题
    inputs = {"question": "What are the types of agent memory?"}
    # 第二个问题示例（中文，被注释掉）
    inputs = {"question": "为何山西省旅游资源丰富?"}

    print("\n=== CRAG 工作流开始 ===")
    for output in app.stream(inputs):
        for key, value in output.items():
            pprint(f"节点 '{key}':")
            # 如需查看完整状态，取消下面注释
            # pprint(value["keys"], indent=2, width=80, depth=None)
        pprint("\n---\n")

    print("=== 最终结果 ===")
    pprint(value["generation"])

"""
CRAG工作流总结：

1. 检索(retrieve): 从向量数据库检索候选文档
2. 评分(grade_documents): 使用LLM评估文档相关性
3. 决策(decide_to_generate): 根据评分结果选择路径
4a. 直接路径: 如果有相关文档 → 生成答案
4b. 校正路径: 如果无相关文档 → 转换查询 → 网络搜索 → 生成答案

这种设计确保了系统能够自动检测和纠正检索错误，
显著提高了RAG系统的准确性和可靠性。
"""