import os
from typing import List, Optional
from typing_extensions import TypedDict

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain import hub
from langchain_community.chat_models import ChatZhipuAI
from pydantic import BaseModel, Field
from langgraph.graph import END, StateGraph, START
from dotenv import load_dotenv

load_dotenv()

# 1. 加载文档和创建向量存储
urls = [
    "https://lilianweng.github.io/posts/2023-06-23-agent/",
    "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
    "https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
]

# 加载文档
docs = [WebBaseLoader(url).load() for url in urls]
docs_list = [item for sublist in docs for item in sublist]

# 分割文档
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
    chunk_size=250, chunk_overlap=0
)
doc_splits = text_splitter.split_documents(docs_list)

# 选择嵌入模型
embedding_model = HuggingFaceEmbeddings(
    model_name="D:/ideaSpace/MyPython/models/bge-small-zh-v1.5",
    model_kwargs={"device": "cpu"},
    encode_kwargs={"normalize_embeddings": True}
)
# 添加到向量数据库
vectorstore = Chroma.from_documents(
    documents=doc_splits,
    collection_name="rag-chroma",
    embedding=embedding_model,
)
retriever = vectorstore.as_retriever()

# 2. 定义评分模型
class GradeDocuments(BaseModel):
    """文档相关性评分"""
    binary_score: str = Field(description="文档是否相关，'是'或'否'")

class GradeHallucinations(BaseModel):
    """幻觉评分"""
    binary_score: str = Field(description="答案是否基于事实，'是'或'否'")

class GradeAnswer(BaseModel):
    """答案质量评分"""
    binary_score: str = Field(description="答案是否解决问题，'是'或'否'")

# 3. 初始化LLM
llm = ChatZhipuAI(
    api_key=os.getenv("ZHIPUAI_API_KEY"),
    model="glm-4",
    temperature=0
)

# 4. 定义各种评分器
def create_grader(prompt_template, grader_model):
    """创建评分器的工厂函数"""
    prompt = ChatPromptTemplate.from_messages([
        ("system", prompt_template["system"]),
        ("human", prompt_template["human"])
    ])
    return prompt | llm.with_structured_output(grader_model)

# 评分器配置
grader_configs = {
    "retrieval": {
        "system": """你是一个评估检索文档与用户问题相关性的评分器。\n 
            规则：1. 文档直接回答问题或提供关键信息 → '是'
                 2. 文档与问题无关或信息不充分 → '否'""",
        "human": "检索到的文档: \n\n {document} \n\n 用户问题: {question}",
        "grader_model": GradeDocuments
    },
    "hallucination": {
        "system": """你是一个评估LLM生成内容是否基于事实的评分器。\n 
            规则：1. 答案基于/由事实支持 → '是'
                 2. 答案包含未基于文档的信息 → '否'""",
        "human": "事实集合: \n\n {documents} \n\n LLM生成内容: {generation}",
        "grader_model": GradeHallucinations
    },
    "answer": {
        "system": """你是一个评估答案是否解决问题的评分器。\n 
            规则：1. 答案解决了问题 → '是'
                 2. 答案未解决问题 → '否'""",
        "human": "用户问题: \n\n {question} \n\n LLM生成内容: {generation}",
        "grader_model": GradeAnswer
    }
}

# 初始化评分器
graders = {name: create_grader(config, config["grader_model"])
           for name, config in grader_configs.items()}

# 5. 定义问题重写器和生成链
system = """你是一个问题重写器，将输入问题转换为更适合检索的版本。"""
re_write_prompt = ChatPromptTemplate.from_messages([
    ("system", system),
    ("human", "初始问题: \n\n {question} \n 改进问题:")
])
question_rewriter = re_write_prompt | llm | StrOutputParser()

prompt = hub.pull("rlm/rag-prompt")
rag_chain = prompt | llm | StrOutputParser()

# 6. 定义状态图和节点
class GraphState(TypedDict):
    """
    表示图的状态。

    属性:
        question: 问题
        generation: LLM生成内容
        documents: 文档列表
        retry_count：重试次数
    """
    question: str
    generation: str
    documents: List[str]
    retry_count: int = 0

def retrieve(state: GraphState) -> dict:
    """检索文档
    参数: state (dict): 当前图状态
    返回: state (dict): 添加了检索文档的新状态
    """
    print("\n---检索文档---")
    question = state["question"]
    documents = retriever.invoke(question)
    return {"documents": documents, "question": question}

def grade_documents(state: GraphState) -> dict:
    """评估文档相关性
    参数: state (dict): 当前图状态
    返回: state (dict): 更新后的文档列表，只包含相关文档
    """
    print("\n---评估文档相关性---")
    question = state["question"]
    documents = state["documents"]

    filtered_docs = []
    for doc in documents:
        try:
            score = graders["retrieval"].invoke({
                "question": question,
                "document": doc.page_content
            })
            if score.binary_score == "是":
                filtered_docs.append(doc)
                print(f"√ 相关文档: {doc.page_content[:50]}...")
            else:
                print(f"× 不相关文档: {doc.page_content[:50]}...")
        except Exception as e:
            print(f"评分出错: {e}")
            continue

    return {"documents": filtered_docs, "question": question}

def generate(state: GraphState) -> dict:
    """生成答案
    参数: state (dict): 当前图状态
    返回: state (dict): 添加了LLM生成内容的新状态
    """
    print("\n---生成答案---")
    question = state["question"]
    documents = state["documents"]

    generation = rag_chain.invoke({
        "context": documents,
        "question": question
    })
    print(f"生成内容: {generation[:100]}...")
    return {"generation": generation, "documents": documents, "question": question}

def transform_query(state: GraphState) -> dict:
    """重写问题（转换查询以产生更好的问题）：
    参数: state (dict): 当前图状态
    返回: state (dict): 更新后的问题
    """
    print("\n---重写问题---")
    question = state["question"]
    retry_count = state.get("retry_count", 0) + 1

    better_question = question_rewriter.invoke({"question": question})
    print(f"原问题: {question}\n改进后: {better_question}")
    return {
        "question": better_question,
        "retry_count": retry_count,
        "documents": []
    }

def decide_to_generate(state: GraphState) -> str:
    """决定下一步（决定是生成答案还是重新生成问还是终止流程。）
    参数: state (dict): 当前图状态
    返回: str: 下一个节点的二元决策
    """
    print("\n---决定下一步---")
    documents = state["documents"]
    retry_count = state.get("retry_count", 0)

    if retry_count >= 3:
        print("× 达到最大重试次数(3)，终止流程")
        return "end"
    elif not documents:
        print("! 无相关文档，将重写问题")
        return "transform_query"
    else:
        print("√ 有相关文档，将生成答案")
        return "generate"

def grade_answer(state: GraphState) -> str:
    """评估答案质量"""
    print("\n---评估答案质量---")
    question = state["question"]
    documents = state["documents"]
    generation = state["generation"]
    retry_count = state.get("retry_count", 0)

    try:
        # 检查幻觉
        hallucination_score = graders["hallucination"].invoke({
            "documents": documents,
            "generation": generation
        })
        if not hasattr(hallucination_score, 'binary_score'):
            raise ValueError("返回结果缺少binary_score字段")

        print("评估是否基于事实成功，结果：", hallucination_score.binary_score)

        if hallucination_score.binary_score == "否":
            print("× 生成内容包含幻觉")
            return "transform_query"
    except Exception as e:
        print(f"---是否基于事实评分出错: {e}---，将重试")
        return "transform_query"

    # 检查答案质量
    try:
        answer_score = graders["answer"].invoke({
            "question": question,
            "generation": generation
        })

        if not hasattr(answer_score, 'binary_score'):
            raise ValueError("返回结果缺少binary_score字段")

        print("评估答案质量成功，结果：", answer_score.binary_score)

        if answer_score.binary_score == "是":
            print("√ 答案质量合格")
            return "useful"
        elif retry_count >= 2:
            print("× 达到最大重试次数，终止流程")
            return "end"
        else:
            print("! 答案质量不合格，将重试")
            return "transform_query"
    except Exception as e:
        print(f"---答案质量评分出错: {e}---，将重试")
        return "transform_query"

def end_node(state: GraphState) -> dict:
    """结束节点"""
    print("\n---流程结束---")
    if "generation" not in state:
        return {"generation": "抱歉，我无法找到合适的答案。"}
    return state

# 7. 构建工作流
workflow = StateGraph(GraphState)

# 添加节点
workflow.add_node("retrieve", retrieve)
workflow.add_node("grade_documents", grade_documents)
workflow.add_node("generate", generate)
workflow.add_node("transform_query", transform_query)
workflow.add_node("end", end_node)

# 构建流程
workflow.add_edge(START, "retrieve")
workflow.add_edge("retrieve", "grade_documents")
workflow.add_conditional_edges(
    "grade_documents",
    decide_to_generate,
    {
        "transform_query": "transform_query",
        "generate": "generate",
        "end": "end"
    },
)
workflow.add_edge("transform_query", "retrieve")
workflow.add_conditional_edges(
    "generate",
    grade_answer,
    {
        "transform_query": "transform_query",
        "useful": END,
        "end": "end"
    },
)
workflow.add_edge("end", END)

# 编译工作流
app = workflow.compile()
# try:
#     # 先获取 PNG 二进制数据
#     png_data = app.get_graph(xray=True).draw_mermaid_png()
#
#     # 将二进制数据保存到当前目录下的 graph.png
#     with open("./data/self_rag_graph.png", "wb") as f:
#         f.write(png_data)
#
#     print("已保存为：graph.png")
# except Exception as e:
#     print(f"保存图片时出错: {e}")

# 8. 测试运行
if __name__ == "__main__":
    test_questions = [
        "解释不同类型的智能体记忆是如何工作的？",
        "什么是思维链提示？",
        "如何对LLM进行对抗攻击？"
    ]

    for question in test_questions:
        print(f"\n{'='*50}")
        print(f"处理问题: {question}")
        print(f"{'='*50}")

        inputs = {"question": question}
        for output in app.stream(inputs):
            for key, value in output.items():
                if key == "generation":
                    print(f"\n最终答案: {value}")