# pip install langchain_chroma  -i https://pypi.tuna.tsinghua.edu.cn/simple/
# pip install chinese_recursive_text_splitter  -i https://pypi.tuna.tsinghua.edu.cn/simple/

import os
from typing import List, TypedDict, Dict, Any, Literal
from dotenv import find_dotenv, load_dotenv
from pydantic import BaseModel, Field
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableSequence
from langchain_core.output_parsers import StrOutputParser
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain.schema import Document
from langchain import hub
from langgraph.graph import END, StateGraph
# from chinese_recursive_text_splitter import ChineseRecursiveTextSplitter
# from langchain_text_splitters import RecursiveCharacterTextSplitter
# from langchain_tavily import TavilySearch

class GraphState(TypedDict):    
    """    表示图中每个节点的状态。    
    属性：        
    question: 用户输入的问题        
    generation: LLM生成的回答        
    web_search: 是否需要进行网络搜索        
    documents: 存放本地或网络检索到的文档列表        
    fallback: 是否由LLM直接生成    
    """    
    question: str    
    generation: str    
    web_search: bool    
    documents: List[str]    
    fallback: bool

RETRIEVE = "retrieve"
GRADE_DOCUMENTS = "grade_documents"
GENERATE = "generate"
WEBSEARCH = "websearch"
LLM_FALLBACK = "llm_fallback"

from langchain_ollama import OllamaEmbeddings
embed_model = OllamaEmbeddings(model="bge-m3:latest")

from langchain_ollama import ChatOllama
llm_model = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False)

# 定义路由输出结构
class RouteQuery(BaseModel):    
    """将用户的问题路由到最相关的数据源"""    
    datasource: Literal["vectorstore", "websearch", "llm_fallback"] = Field(
        ...,        
        description="根据用户问题，将其路由到向量数据库、网络搜索或直接由LLM生成",    
    )

# 将 LLM 包装成结构化输出模式，输出 RouteQuery 类型
structured_llm_router = llm_model.with_structured_output(RouteQuery)

# 系统提示词
system = """你是一个专家级的路由器，负责判断用户问题应该选择哪个路径。
1. 如果问题是闲聊或寒暄（如“你好”、“讲个笑话”、“你是谁？”），选择llm_fallback；
2. 如果问题与AI技术文档相关，选择vectorstore；
3. 如果需要最新实时信息，选择websearch；
只输出vectorstore、websearch或llm_fallback即可。
"""

# 构建提示模板
route_prompt = ChatPromptTemplate.from_messages(    
    [        
        ("system", system),        
        ("human", "{question}"),    
    ]
)

# 组合提示模板与结构化路由模型
question_router = route_prompt | structured_llm_router

def route_question(state: GraphState) -> str:    
    """根据路由器决定问题走向"""    
    print(state)    
    print("enter: ---路由问题---")    
    source: RouteQuery = question_router.invoke({"question": state["question"]})
    response = LLM_FALLBACK   
    if source.datasource == "websearch":        
        # response = WEBSEARCH   
        response = RETRIEVE 
    elif source.datasource == "vectorstore":        
        response = RETRIEVE    
    else:
        response = LLM_FALLBACK
    print(f"exit router >>>: {response}")
    print("\n")
    return response

def create_vectorstore():    
    """创建或加载向量存储，用于文档检索"""    
    chroma_path = "./chroma_langchain_db"        
    # 如果本地已经存在向量数据库，直接加载    
    if os.path.exists(chroma_path):        
        print("正在加载本地向量存储...")        
        vectorstore = Chroma(            
            persist_directory=chroma_path,            
            embedding_function=embed_model,            
            collection_name="rag-chroma",        
        )        
        return vectorstore.as_retriever()   
         
    # 如果没有，构建新的向量数据库    
    print("正在创建新的向量存储...")    
    urls = [        
        # "https://aws.amazon.com/cn/what-is/large-language-model/",
        # "https://zhuanlan.zhihu.com/p/659386520",
        # "https://zhuanlan.zhihu.com/p/620342675",
        "https://k.sina.com.cn/article_6389563607_17cd900d70010164cg.html",
    ]        
    # 从网页加载内容    
    docs = [WebBaseLoader(url).load() for url in urls]    
    docs_list = [item for sublist in docs for item in sublist]        
    
    # 将长文本拆分为小片段  
    # text_splitter = ChineseRecursiveTextSplitter(        
    #     chunk_size=500,  # 每段大小约 250 tokens        
    #     chunk_overlap=200  # 不重叠
    # )
    text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
        chunk_size=500, chunk_overlap=100
    )

    doc_splits = text_splitter.split_documents(docs_list)

    # 构建 Chroma 向量存储并持久化到本地    
    vectorstore = Chroma.from_documents(        
        documents=doc_splits,        
        collection_name="rag-chroma",        
        embedding=embed_model,        
        persist_directory=chroma_path,    
    )        
    print("向量存储创建完成！")    
    return vectorstore.as_retriever()

# 初始化检索器
retriever = create_vectorstore()

def retrieve(state: Dict[str, Any]) -> Dict[str, Any]:   
    """从向量存储中检索相关文档"""    
    print("enter: ---RETRIEVE---")    
    question = state["question"]    
    # 使用预先配置好的检索器获取最语义相关的文档    
    documents = retriever.invoke(question)
    print(f"exit retrieve >>>: {len(documents)} documents retrieved")
    return {"documents": documents, "question": question}

def web_search(state: GraphState) -> Dict[str, Any]:    
    print("enter: ---WEB SEARCH---")    
    question = state["question"]

    # 获取已有文档，若无则创建空列表    
    documents = state.get("documents", [])        
    
    # 调用网络搜索    
    # tavily_results = web_search_tool.invoke({"query": question})["results"]    
    # joined_tavily_result = "\n".join(        
    #     [tavily_result["content"] for tavily_result in tavily_results]    
    # )    
    
    web_results = Document(page_content="今天会下雪")        
    # 将搜索结果加入文档列表    
    if documents:
        documents.append(web_results)    
    else:        
        documents = [web_results]
    print(f"exit web search >>>: {len(documents)} documents retrieved")
    return {"documents": documents, "question": question}

# 定义评分输出结构
class GradeDocuments(BaseModel):    
    """用于检查检索文档是否与问题相关的二元评分"""    
    binary_score: str = Field(        
        description="判断文档是否与问题相关，仅输出 'yes' 或 'no'"
    )

# 将 LLM 模型封装为结构化输出模式
structured_llm_grader = llm_model.with_structured_output(GradeDocuments)

# 系统提示词
system_prompt = """你是一个负责评估检索结果相关性的评分器。
如果文档内容包含了与问题相关的关键词，或者语义上与问题匹配，请评为 'yes'。
如果完全无关，则返回 'no'。请严格输出 'yes' 或 'no'，用于标记该文档是否与用户问题相关。"""

# 构建提示模板
grade_prompt = ChatPromptTemplate.from_messages([
    ("system", system_prompt),    
    ("human", "检索到的文档:\n\n {document} \n\n 用户问题: {question}"),
])

# 组合评分链
retrieval_grader = grade_prompt | structured_llm_grader

# 评分函数
def grade_documents(state: Dict[str, Any]) -> Dict[str, Any]:    
    """    
    判断检索到的文档是否与问题相关。    
    如果有任何文档不相关，将触发 web 搜索标记。    
    Args:        
        state (dict): 当前图状态    
    Returns:        
        state (dict): 筛选出相关文档并更新 web_search 状态    
    """    
    print("enter:---检查文档与问题的相关性---")    
    question = state["question"]    
    documents = state["documents"]    
    filtered_docs = []    
    web_search = False        
    for d in documents:        
        # 使用检索评分器评估文档相关性
        input = {            
            "question": question,           
            "document": d.page_content        
        }
        score = retrieval_grader.invoke(input)
        print(f"input: {input}, score: {score}")   
        grade = score.binary_score    
        if grade.lower() == "yes":            
            print("---评分结果：文档相关---")            
            filtered_docs.append(d)        
        else:            
            print("---评分结果：文档不相关---")            
            # web_search = True
            web_search = False         
            continue

    return {"documents": filtered_docs, "question": question, "web_search": web_search}

# 从 LangChain Hub 拉取优化好的 RAG Prompt
# prompt = hub.pull("rlm/rag-prompt")
gen_sys_prompt = """
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
"""
gen_prompt = ChatPromptTemplate.from_messages([
    ("system", gen_sys_prompt),    
    ("human", "Context:\n\n {context} \n\n Question: {question}"),
])

# 构建生成链：提示模板 → LLM → 输出解析器
generation_chain = gen_prompt | llm_model | StrOutputParser()

def generate(state: GraphState) -> Dict[str, Any]:    
    """使用文档和问题生成答案"""    
    print("enter: ---GENERATE---")    
    question = state["question"]    
    documents = state["documents"]    
    # 调用生成链生成最终答案
    generation = generation_chain.invoke({"context": documents, "question": question})
    print(f"exit generate >>>: {generation}")  
    return {"documents": documents, "question": question, "generation": generation}

def llm_fallback(state: GraphState) -> Dict[str, Any]:    
    """当问题无需检索或搜索时，直接由 LLM 生成答案"""    
    print("---LLM FALLBACK---")    
    question = state["question"]    
    generation = llm_model.invoke(question).content    
    return {        
        "question": question,        
        "generation": generation,        
        "documents": [],        
        "web_search": False,        
        "fallback": True,    
    }

# 定义幻觉评分输出结构
class GradeHallucinations(BaseModel):    
    """用于检测生成答案中是否存在幻觉的二元评分"""    
    binary_score: bool = Field(        
        description="答案是否基于事实，仅输出 True 或 False"    
    )

# 将 LLM 封装为结构化输出模式
structured_llm_grader = llm_model.with_structured_output(GradeHallucinations)

# 系统提示词
system = """你是一个评分器，用于判断 LLM 生成的回答是否基于一组检索到的事实。
请严格给出二元评分 'yes' 或 'no'。 'Yes' 表示答案基于/支持所提供的事实，'No' 表示答案未得到事实支持。"""

# 构建提示模板
hallucination_prompt = ChatPromptTemplate.from_messages(
    [        
        ("system", system),        
        ("human", "事实集合: \n\n {documents} \n\n LLM 生成内容: {generation}"),    
    ]
)

# 组合成可执行评分链
hallucination_grader: RunnableSequence = hallucination_prompt | structured_llm_grader

# 定义答案评分输出结构
class GradeAnswer(BaseModel):    
    binary_score: bool = Field(        
        description="答案是否有效回应问题，仅输出 True 或 False"    
    )

# 将 LLM 封装为结构化输出模式
structured_llm_grader = llm_model.with_structured_output(GradeAnswer)

# 系统提示词
system = """你是一个评分器，用于判断生成的答案是否解决了用户的问题。
请严格给出二元评分 'yes' 或 'no'。'Yes' 表示答案有效回应并解决了问题，'No' 表示未解决。"""

# 构建提示模板
answer_prompt = ChatPromptTemplate.from_messages(    
    [        
        ("system", system),        
        ("human", "用户问题: \n\n {question} \n\n LLM 生成内容: {generation}"),    
    ]
)
# 组合成可执行评分链
answer_grader: RunnableSequence = answer_prompt | structured_llm_grader

# 决策是否进入生成环节
def decide_to_generate(state):    
    """根据文档评分决定走 web 搜索还是直接生成答案"""    
    print("---评估文档---")    
    return WEBSEARCH if state["web_search"] else GENERATE

# 对生成内容进行幻觉与答案质量评分
def grade_generation_grounded_in_documents_and_question(state):    
    """评分生成答案的真实性与有效性"""    
    print("entrer: ---检查幻觉与答案质量---")    
    question = state["question"]    
    documents = state["documents"]    
    generation = state["generation"]    
    # 检查是否基于事实    
    score = hallucination_grader.invoke({"documents": documents, "generation": generation})
    print(f"exit 幻觉评分: {score}")
    if score.binary_score:        
        # 检查答案是否有用        
        score = answer_grader.invoke({"question": question, "generation": generation})        
        return "useful" if score.binary_score else "not useful"    
    else:        
        return "not supported"
    
# 构建工作流
workflow = StateGraph(GraphState)

workflow.add_node(RETRIEVE, retrieve)
workflow.add_node(GRADE_DOCUMENTS, grade_documents)
workflow.add_node(GENERATE, generate)
workflow.add_node(WEBSEARCH, web_search)
workflow.add_node(LLM_FALLBACK, llm_fallback)

workflow.set_conditional_entry_point(    
    route_question,    
    {WEBSEARCH: WEBSEARCH, RETRIEVE: RETRIEVE, LLM_FALLBACK: LLM_FALLBACK},
)
workflow.add_edge(RETRIEVE, GRADE_DOCUMENTS)
workflow.add_conditional_edges(    
    GRADE_DOCUMENTS,    
    decide_to_generate,    
    {WEBSEARCH: WEBSEARCH, GENERATE: GENERATE},
)
workflow.add_conditional_edges(    
    GENERATE,    
    grade_generation_grounded_in_documents_and_question,    
    {"not supported": GENERATE, "useful": END, "not useful": WEBSEARCH},
)
workflow.add_edge(WEBSEARCH, GENERATE)
workflow.add_edge(LLM_FALLBACK, END)
app = workflow.compile()

# 导出图可视化
# app.get_graph().draw_mermaid_png(output_file_path="graph.png")


# run case
def format_response(result):    
    """提取并格式化工作流输出"""    
    if isinstance(result, dict):        
        return result.get("generation") or result.get("answer", "")    
    return str(result)

# def main():    
#     """自适应 RAG 系统命令行界面"""    
#     print("=== Adaptive RAG System ===")    
#     print("输入 'quit' 退出程序。\n")    
#     while True:        
#         question = input("Question: ").strip()        
#         if question.lower() in ['quit', 'exit', 'q', '']:            
#             break        
#         print("Processing...")        
#         try:            
#             # 调用工作流生成答案            
#             for output in app.stream({"question": question}):                
#                 result = next(iter(output.values()))            
#                 print(f"\nAnswer: {format_response(result)}\n")
#                 # print(f"\n===Answer===: {result}\n")     
#         except Exception as e:
#             print(f"Error: {e}")

def main():    
    """自适应 RAG 系统命令行界面"""    
    print("=== Adaptive RAG System ===")     
    question = "甲午中日战争是哪年发生的？"     
    print("Processing...")     
    try:            
        # 调用工作流生成答案            
        for output in app.stream({"question": question}):                
            result = next(iter(output.values()))            
            print(f"\nAnswer(Generated): {format_response(result)}\n")
            # print(f"\n===Answer===: {result}\n")     
    except Exception as e:
        print(f"Error: {e}")

if __name__ == "__main__":    
    main()