"""
单智能体图编译
"""
from langchain_openai import ChatOpenAI
from langgraph.constants import START, END
from typing import Callable

from langchain_core.messages import SystemMessage
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph

from owl_ai.domain.agent_config_entity import AgentConfigEntity
from owl_ai.service.agent_flow_service import State
from owl_ai.service.graph.graph_compile import GraphCompile, BasicToolNode, ChatNode


class ChatToolNode(Callable):
    def __init__(self, chat_llm: ChatOpenAI = None, tools: list = None):
        self.chat_llm = chat_llm
        self.tools = tools
        self.system_prompt = """As a dedicated tool node, your task is to closely follow user instructions and precisely invoke the appropriate tools to collect the necessary data.  Once you are confident that the collected data adequately addresses the user's issue, seamlessly transition to the next node for content integration."""

    def __call__(self, state: State, config: RunnableConfig):
        messages = state.get("messages")
        llm_messages = [
            SystemMessage(content=self.system_prompt)
        ]
        llm_messages.extend(messages)
        ai_message = self.chat_llm.invoke(llm_messages, stream=False)

        return {
            "messages": [
                ai_message
            ]
        }


class ToolRoute:
    def __init__(self, tools: list, tool_node_name: str = None, next_node_name: str = None):
        self.tools = tools
        self.tool_node_name = tool_node_name
        self.next_node_name = next_node_name

    def __call__(self, state: State, config: RunnableConfig):
        if isinstance(state, list):
            ai_message = state[-1]
        elif messages := state.get("messages", []):
            ai_message = messages[-1]
        else:
            raise ValueError(f"No messages found in input state to tool_edge: {state}")
        if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
            return self.tool_node_name
        return self.next_node_name


class AgentGraphCompile(GraphCompile):

    @classmethod
    def compile(cls, agent_config: AgentConfigEntity):
        """
        智能体图编译
        {
            "id": "唯一id",
            "name": "名称",
            "type": "agent",
            "config": {
                "name": "智能体名称",
                "llm": {
                    "type": "模型类型: ollama、openai等",
                    "url": "服务地址",
                    "modelName": "模型名称",
                    "params": "其它参数，字典格式"
                },
                "systemPrompt": "系统提示词",
                "tools": "工具id列表"
            }
        }
        """
        graph = StateGraph(State)

        config = agent_config.config
        tools = config.get("tools")
        agent_name = config.get('name')
        # 按照有无工具节点进行节点添加
        if tools:
            # 工具LLM节点
            chat_tool_llm = cls.chat_llm_generate(config.get("llm"), tools)
            chat_tool_node = ChatToolNode(chat_llm=chat_tool_llm, tools=tools)
            chat_tool_llm_node_name = f"{agent_name}_llm_tool_node"

            graph.add_node(chat_tool_llm_node_name, chat_tool_node)

            tool_node_name = f"{agent_name}_tool_node"
            graph.add_node(tool_node_name, BasicToolNode(tools=tools))

            agent_node_chat_llm = cls.chat_llm_generate(config.get("llm"))
            graph.add_node(agent_name, ChatNode(agent_node_chat_llm, system_prompt=config.get("systemPrompt")))

            graph.add_edge(START, chat_tool_llm_node_name)
            graph.add_edge(chat_tool_llm_node_name, tool_node_name)
            graph.add_edge(tool_node_name, chat_tool_llm_node_name)
            graph.add_edge(chat_tool_llm_node_name, agent_name)
            graph.add_edge(agent_name, END)
        else:
            # 不需要调用工具，直接编排
            system_prompt = config.get("systemPrompt")
            agent_node_chat_llm = cls.chat_llm_generate(config.get("llm"))
            graph.add_node(node=agent_name, action=ChatNode(system_prompt=system_prompt, chat_llm=agent_node_chat_llm))

            graph.add_edge(START, agent_name)
            graph.add_edge(agent_name, END)
        return graph.compile()
