from typing import Literal

from dotenv import load_dotenv
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.tools import create_retriever_tool
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_ollama import ChatOllama
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langgraph.graph import MessagesState
from pydantic import BaseModel, Field
from langchain_core.messages import convert_to_messages
from langgraph.graph import StateGraph, START, END
from langgraph.prebuilt import ToolNode
from langgraph.prebuilt import tools_condition
from IPython.display import Image, display


def generate_query_or_respond(state: MessagesState):
    """Call the model to generate a response based on the current state. Given
    the question, it will decide to retrieve using the retriever tool, or simply respond to the user.
    """
    response = (
        model.bind_tools([retriever_tool]).invoke(state["messages"])
    )
    return {"messages": [response]}


class GradeDocuments(BaseModel):
    """Grade documents using a binary score for relevance check."""
    binary_score: str = Field(
        description="Relevance score: 'yes' if relevant, or 'no' if not relevant"
    )


def grade_documents(
        state: MessagesState,
) -> Literal["generate_answer", "rewrite_question"]:
    """Determine whether the retrieved documents are relevant to the question."""
    question = state["messages"][0].content
    context = state["messages"][-1].content

    prompt = GRADE_PROMPT.format(question=question, context=context)
    response = (
        model.with_structured_output(GradeDocuments).invoke(
            [{"role": "user", "content": prompt}]
        )
    )
    score = response.binary_score
    if score == "yes":
        return "generate_answer"
    else:
        return "rewrite_question"


def rewrite_question(state: MessagesState):
    """Rewrite the original user question."""
    messages = state["messages"]
    question = messages[0].content
    prompt = REWRITE_PROMPT.format(question=question)
    response = model.invoke([{"role": "user", "content": prompt}])
    return {"messages": [{"role": "user", "content": response.content}]}


def generate_answer(state: MessagesState):
    """Generate an answer."""
    question = state["messages"][0].content
    context = state["messages"][-1].content
    prompt = GENERATE_PROMPT.format(question=question, context=context)
    response = model.invoke([{"role": "user", "content": prompt}])
    return {"messages": [response]}


if __name__ == '__main__':
    load_dotenv(override=True)
    model = ChatOllama(model="qwen3:30b", base_url="http://192.168.97.217:11434")
    GRADE_PROMPT = (
        "你是一个评估检索文档与用户问题相关性的评分员。\n"
        "以下是检索到的文档内容：\n\n{context}\n\n"
        "以下是用户提出的问题：{question}\n"
        "如果文档中包含与问题相关的关键词或语义，请评为相关。\n"
        "请仅回答 'yes' 或 'no' 来判断文档是否与问题相关。"
    )

    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "Lilian Weng 如何分类奖励黑客行为?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "奖励黑客的类型"},
                        }
                    ],
                },
                {"role": "tool", "content": "喵", "tool_call_id": "1"},
            ]
        )
    }
    print(f"判断文档与问题相关性:{grade_documents(input)}")

    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "Lilian Weng 如何分类奖励黑客行为?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "奖励黑客的类型"},
                        }
                    ],
                },
                {
                    "role": "tool",
                    "content": "奖励黑客可分为两种类型：环境或目标定义错误，以及奖励篡改。",
                    "tool_call_id": "1",
                },
            ]
        )
    }
    print(f"判断文档与问题相关性:{grade_documents(input)}")

    REWRITE_PROMPT = (
        "请理解以下输入的语义意图。\n"
        "这是原始问题：\n"
        "-------\n"
        "{question}\n"
        "-------\n"
        "请用更清晰、更有针对性的问题重新表达："
    )

    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "Lilian Weng 如何分类奖励黑客行为?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "奖励黑客的类型"},
                        }
                    ],
                },
                {"role": "tool", "content": "meow", "tool_call_id": "1"},
            ]
        )
    }

    response = rewrite_question(input)
    print(response["messages"][-1]["content"])

    GENERATE_PROMPT = (
        "你是一个用于问答任务的智能助手。\n"
        "请使用以下检索到的上下文信息来回答问题。\n"
        "如果你不知道答案，请直接说你不知道。\n"
        "最多使用三句话，简明扼要地回答。\n"
        "问题：{question} \n"
        "上下文：{context}"
    )

    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "Lilian Weng 如何分类奖励黑客行为?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "奖励黑客的类型"},
                        }
                    ],
                },
                # {
                #     "role": "tool",
                #     "content": "奖励黑客行为可以分为两种类型：环境或目标定义错误，以及奖励篡改。",
                #     "tool_call_id": "1",
                # },
            ]
        )
    }

    urls = [
        "https://lilianweng.github.io/posts/2024-11-28-reward-hacking/",
        "https://lilianweng.github.io/posts/2024-07-07-hallucination/",
        "https://lilianweng.github.io/posts/2024-04-12-diffusion-video/",
    ]
    docs = [WebBaseLoader(url).load() for url in urls]
    # print(docs[0][0].page_content.strip()[:1000])

    docs_list = [item for sublist in docs for item in sublist]

    text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
        chunk_size=100, chunk_overlap=50
    )
    doc_splits = text_splitter.split_documents(docs_list)
    # 创建向量数据库
    embedding = DashScopeEmbeddings(model="text-embedding-v1")
    vectorstore = InMemoryVectorStore.from_documents(
        documents=doc_splits, embedding=embedding
    )
    retriever = vectorstore.as_retriever()
    # 构建用于检索的工具
    retriever_tool = create_retriever_tool(
        retriever,
        "retrieve_blog_posts",
        "搜索并返回关于 Lilian Weng 博客文章的信息。",
    )

    response = generate_answer(input)
    print(response["messages"][-1].pretty_print())
    # 定义状态图（LangGraph）
    workflow = StateGraph(MessagesState)

    # Define the nodes we will cycle between
    workflow.add_node(generate_query_or_respond)
    workflow.add_node("retrieve", ToolNode([retriever_tool]))
    workflow.add_node(rewrite_question)
    workflow.add_node(generate_answer)

    workflow.add_edge(START, "generate_query_or_respond")

    # Decide whether to retrieve
    workflow.add_conditional_edges(
        "generate_query_or_respond",
        # Assess LLM decision (call `retriever_tool` tool or respond to the user)
        tools_condition,
        {
            # Translate the condition outputs to nodes in our graph
            "tools": "retrieve",
            END: END,
        },
    )

    # Edges taken after the `action` node is called.
    workflow.add_conditional_edges(
        "retrieve",
        # Assess agent decision
        grade_documents,
    )
    workflow.add_edge("generate_answer", END)
    workflow.add_edge("rewrite_question", "generate_query_or_respond")

    # Compile
    graph = workflow.compile()
    # 可视化状态图
    graph_image = graph.get_graph().draw_mermaid_png()
    with open("graph.png", "wb") as f:
        f.write(graph_image)

    for chunk in graph.stream(
            {
                "messages": [
                    {
                        "role": "user",
                        "content": "Lilian Weng 如何分类奖励黑客行为?",
                    }
                ]
            }
    ):
        for node, update in chunk.items():
            print("Update from node", node)
            update["messages"][-1].pretty_print()
            print("\n\n")
