"""
Agent是拥有自主推理能力的智能体，它可以自主决定调用工具来完成任务。Agent由三个节点组成：
1. 工具选择节点：Agent根据当前的任务和环境，选择合适的工具来完成任务。
2. 工具调用节点：Agent根据工具选择节点的结果，调用相应的工具来完成任务。
3. 工具结果处理节点：Agent根据工具调用节点的结果，处理工具的返回结果，生成新的任务或结果。
"""
from langchain_core.messages import SystemMessage, ToolMessage
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import tool
from langgraph.constants import START, END
from langgraph.graph import add_messages, StateGraph
from typing import TypedDict, Annotated, Callable

from owl_ai.domain.agent_config_entity import AgentConfigEntity
from owl_ai.service.graph.graph_compile import GraphCompile
from owl_ai.service.tools.tool_center import ToolCenter


@tool
def tool_finish():
    """
    结束对话
    """
    return ""


def chat_llm_generate(config: dict, chat_stream: bool = True):
    """
    聊天LLM节点，用于生成聊天LLM的响应
    """
    llm_type = config.get("type")

    base_url = config.get("baseUrl")
    model_name = config.get("modelName")
    stream = config.get("stream")

    chat_llm = None
    if llm_type == "ollama":
        from langchain_ollama import ChatOllama
        chat_llm = ChatOllama(
            base_url=base_url,
            model=model_name,
            # streaming=stream if chat_stream else False
            disable_streaming=False
        )
    elif llm_type == "openai":
        from langchain_openai import ChatOpenAI
        chat_llm = ChatOpenAI(
            base_url=base_url,
            model_name=model_name,
            # streaming=stream if chat_stream else False
            stream=True,
            disable_streaming="tool_calling"
        )
    return chat_llm


class AgentState(TypedDict):
    messages: Annotated[list, add_messages]


class ChatLLMToolNode(Callable):
    """
    构建带有工具的聊天LLM节点
    """

    def __init__(self, agent_config: AgentConfigEntity, tools: list = None):
        config = agent_config.config
        system_prompt = config.get("systemPrompt")
        # self.system_prompt = system_prompt
        self.system_prompt = "\n你作为一个工具助手，请理解工具用途然后决定是否需要调用工具。当你认为不再需要工具，请使用finish结束，后续流程会对你的数据进行总结"

        chat_llm = chat_llm_generate(config, chat_stream=False if tools else True)

        if tools:
            node_tools = []
            node_tools.extend(tools)
            # node_tools.append(tool_finish)
            self.chat_llm = chat_llm.bind_tools(node_tools)
        else:
            self.chat_llm = chat_llm

    def __call__(self, state: AgentState, config: RunnableConfig):
        llm_messages = [SystemMessage(self.system_prompt)]
        llm_messages.extend(state.get("messages"))

        ai_message = self.chat_llm.invoke(input=llm_messages)

        return {
            "messages": [ai_message]
        }


class ChatLLMNode(Callable):
    """
    构建聊天LLM节点
    """

    def __init__(self, agent_config: AgentConfigEntity):
        config = agent_config.config

        self.chat_llm = chat_llm_generate(config, chat_stream=True)
        self.system_prompt = config.get("systemPrompt")

    def __call__(self, state: AgentState, config: RunnableConfig):
        llm_messages = [SystemMessage(self.system_prompt)]
        llm_messages.extend(state.get("messages"))

        ai_message = self.chat_llm.invoke(input=llm_messages)

        return {
            "messages": [ai_message]
        }


class ToolExecuteNode(Callable):
    """
    构建工具执行节点
    """

    def __init__(self, tools: list):
        self.tools = tools
        self.tools_by_name = {tool.name: tool for tool in tools}

    def __call__(self, state: AgentState, config: RunnableConfig):
        last_message = state.get("messages")[-1]
        tool_calls = last_message.tool_calls

        outputs = []
        for tool_call in tool_calls:
            tool = self.tools_by_name.get(tool_call.get("name"))
            if tool:
                tool_ret = tool.invoke(tool_call.get("args"))
                outputs.append(ToolMessage(
                    content=tool_ret,
                    name=tool_call["name"],
                    tool_call_id=tool_call["id"], ))
        return {
            "messages": outputs
        }


def tool_condition_route(state: AgentState, config: RunnableConfig):
    """
    工具选择路由
    """
    last_message = state.get("messages")[-1]

    if last_message.tool_calls:
        if last_message.tool_calls[0].get('name') == 'tool_finish':
            state.get("messages").remove(last_message)
            return END
        return "tool_execute_node"
    else:
        return END


class ChatAgentGraphCompile(GraphCompile):

    @classmethod
    def compile(cls, config: AgentConfigEntity):
        work_app = StateGraph(AgentState)
        tools_config = config.config.get("tools")

        work_app.add_node("chat_llm_node", ChatLLMNode(config))

        tools = []
        if tools_config:
            for tool_name in tools_config:
                tool = ToolCenter.get_tool(tool_name)
                if tool:
                    tools.append(tool)

        if tools:
            work_app.add_node("chat_llm_tool_node", ChatLLMToolNode(config, tools))
            work_app.add_node("tool_execute_node", ToolExecuteNode(tools))

            work_app.add_edge(START, "chat_llm_tool_node")
            work_app.add_conditional_edges("chat_llm_tool_node", path=tool_condition_route)
            work_app.add_edge("tool_execute_node", "chat_llm_tool_node")
            #work_app.add_edge("chat_llm_node", END)
            #work_app.add_edge("chat_llm_tool_node", END)
        else:
            work_app.add_edge(START, "chat_llm_node")
            work_app.add_edge("chat_llm_node", END)

        return work_app.compile()
