from typing import Annotated, Dict, Any, TypedDict

from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain_ollama import ChatOllama
from langgraph.constants import START, END
from langgraph.graph import add_messages, StateGraph

from owl_ai.domain.agent_config_entity import AgentConfigEntity
from owl_ai.service.rag.rag_service import RAGEmbeddingService


def update_node_params(state: Dict[str, Any], node_output: Any) -> Dict[str, Any]:
    """
    自定义 reducer 函数，用于更新 node_params 字段
    """
    # 如果 node_params 不存在，则初始化为空字典
    if "node_params" not in state:
        state["node_params"] = {}

    if node_output:
        # 将节点的输出添加到 node_params 中
        state["node_params"].update(node_output)
    return state["node_params"]


class RAGAgentState(TypedDict):
    """
    智能代理状态，用于记录智能代理的状态信息
    """

    # 消息对话记录
    messages: Annotated[list, add_messages]
    # 各节点输入与输出，以节点名称为key，支持内容有input、output
    node_params: Annotated[dict, update_node_params]


@tool
def rag_query(query_list: list[str]):
    """
    知识库查询，支持多个查询关键词
    Args:
        query_list: 查询关键词列表

    Returns: 查询结果
    """
    docs = []

    for query in query_list:
        documents = RAGEmbeddingService.document_embedding_search("knowledge_base_test", query)
        for document in documents:
            docs.append(document.page_content)

    return docs


class RAGTools:

    @classmethod
    def rag_query(cls, query_list: list[str]):
        """
        知识库查询，支持多个查询关键词
        Args:
            query_list: 查询关键词列表

        Returns: 查询结果
        """
        docs = []

        for query in query_list:
            documents = RAGEmbeddingService.document_embedding_search("knowledge_base_test", query)
            for document in documents:
                docs.append(document.page_content)

        return docs


class RAGAgent:
    chat_llm = ChatOllama(base_url="http://182.148.114.142:11434",
                          model="qwen2.5:14b-instruct",
                          verbose=True, stream=True,
                          temperature="0.1",
                          num_predict=16384,
                          num_ctx=16384)

    """
    拥有知识库查询能力的智能代理，workFlow如下：
    1. rag_node: 知识库查询节点，通过LLM生成查询关键词，查询知识库，返回查询结果
    2. summarize_node: 总结节点，通过LLM生成总结，返回总结结果
    """

    @classmethod
    def rag_node(cls, state: RAGAgentState):
        """知识库查询节点，通过LLM生成查询关键词，查询知识库，返回查询结果
        """
        system_prompt = """你需要根据用户的问题，从多个维度生成查询语句，并调用工具查询相关知识"""
        llm_messages = [
            SystemMessage(content=system_prompt)
        ]
        llm_messages.extend(state['messages'])

        tools = [rag_query]
        chat_llm = cls.chat_llm.bind_tools(tools)
        ai_message = chat_llm.invoke(llm_messages)
        return {
            "node_params": {
                "rag_node": {
                    "input": state['messages'],
                    "output": ai_message
                }
            }
        }

    @classmethod
    def rag_tool_node(cls, state: RAGAgentState):
        tools = [rag_query]
        tools_by_name = {t.name: t for t in tools}
        """
        知识库查询工具节点，通过LLM生成查询关键词，查询知识库，返回查询结果
        """
        rag_node_params = state.get("node_params").get("rag_node")
        if rag_node_params:
            tool_calls = rag_node_params.get("output").tool_calls
            for tool_call in tool_calls:
                tool_result = tools_by_name[tool_call["name"]].invoke(
                    tool_call["args"]
                )
                return {
                    "node_params": {
                        "rag_tool_node": {
                            "input": tool_calls,
                            "output": tool_result
                        }
                    }
                }

    @classmethod
    def summarize_node(cls, state: RAGAgentState):
        """
        总结节点，通过LLM生成总结，返回总结结果
        """
        system_prompt = """你需要根据用户的问题，生成总结\n\n"""

        if state.get("node_params").get("rag_tool_node"):
            rag_node_output = state.get("node_params").get("rag_tool_node").get("output")
            system_prompt += f"""根据用户指令，在知识库中查询相关的内容如下：{rag_node_output}"""

        llm_messages = []
        llm_messages.extend(state.get("messages"))
        llm_messages.append(SystemMessage(content=system_prompt))

        ai_message = cls.chat_llm.invoke(llm_messages)
        return {
            "node_params": {
                "summarize_node": {
                    "output": ai_message
                }
            }
        }

    @classmethod
    def compile(cls, config: AgentConfigEntity):
        graph = StateGraph(RAGAgentState)

        graph.add_node("rag_node", cls.rag_node)
        graph.add_node("rag_tool_node", cls.rag_tool_node)
        graph.add_node("summarize_node", cls.summarize_node)

        graph.add_edge(START, "rag_node")
        graph.add_edge("rag_node", "rag_tool_node")
        graph.add_edge("rag_tool_node", "summarize_node")
        graph.add_edge("summarize_node", END)

        return graph.compile()


if __name__ == '__main__':
    work_flow = RAGAgent.compile(AgentConfigEntity())
    ret = work_flow.invoke({"messages": [HumanMessage(content="云上贵州南明区系统前端升级步骤")], "node_params": {}})
    print(ret)
