from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
from langchain_core.runnables import RunnableConfig

from owl_ai.application import ImageUtils
from owl_ai.graph.flow.chat_llm_graph_node import ChatLLMGraphNode
from owl_ai.service.tools.tool_center import ToolCenter


class AgentGraphNode(ChatLLMGraphNode):
    """
    拥有工具调用能力的LLM节点
    """

    def __init__(self, node_config: dict):
        super().__init__(node_config)
        tool_names = node_config.get("tools")

        if tool_names:
            self.tools = [ToolCenter.get_tool(tool_name) for tool_name in tool_names]
            self.tool_by_name = {tool.name: tool for tool in self.tools}
            self.chat_llm = self.chat_llm.bind_tools(self.tools)

    def __call__(self, state, config: RunnableConfig):
        input_params = self.input_params_assem(state, self.input)

        system_prompt = self.system_prompt.format(**input_params)
        user_prompt = self.user_prompt.format(**input_params)

        llm_messages = [
            SystemMessage(content=system_prompt)
        ]

        user_prompt_content = [{
            "type": "text",
            "text": user_prompt,
        }]
        # 如果支持视觉，则需要处理图片，图片需要转换为base64编码，固定输入参数files
        if self.vision_support:
            files = input_params.get("files")
            # 如果存在文件
            if files:
                for file in files:
                    fileName = file.get("name")
                    if ImageUtils.is_image_file(fileName):
                        suffix = fileName.split(".")[-1]
                        image_data = file.get("content")
                        user_prompt_content.append({
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/{suffix};base64,{image_data}",
                            }
                        })
        llm_messages.append(HumanMessage(content=user_prompt_content))

        while True:
            ret = self.chat_llm.invoke(llm_messages)
            if ret.tool_calls:
                tool_call = ret.tool_calls[0]
                tool = self.tool_by_name[tool_call['name']]
                tool_input = tool_call['args']
                tool_output = tool.invoke(tool_input)
                llm_messages.append(ToolMessage(content=tool_output,
                                                name=tool_call["name"],
                                                tool_call_id=tool_call["id"]))
            else:
                return {
                    "node_params": {
                        self.node_name: {
                            "out": ret
                        }
                    }
                }
