'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: function_call_agent.py
* @Time: 2025/11/17
* @All Rights Reserve By Brtc
'''
import json
import time
import uuid
from threading import Thread
from typing import Literal, Generator

from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage, RemoveMessage, ToolMessage, \
    messages_to_dict
from langgraph.constants import END
from langgraph.graph import StateGraph
from internal.entity.agent_entity import AgentState, AGENT_SYSTEM_PROMPT_TEMPLATE
from internal.entity.queue_entity import AgentQueueEvent, QueueEvent
from internal.exception.exception import FailException
from .base_agent import BaseAgent


class FunctionCallAgent(BaseAgent):
    """基于函数调用的工具智能体"""
    def run(self,
            query:str,
            history:list[AnyMessage] = None,
            long_term_memory:str = "")->Generator[AgentQueueEvent, None, None]:
        """运行智能体的应用， 并使用yield关键字返回应用的数据"""
        #1、预处理传递的数据
        if history is None:
            history = []
        #2、调用智能体获取数据
        agent = self._build_graph()
        #3、调用智能体获取数据

        thread = Thread(
            target=agent.invoke,
            args =({
                "messages":[HumanMessage(content=query)],
                "history":history,
                "long_term_memory":long_term_memory
            },)
        )
        thread.start()

        #4、调用异步队列监听数据并返回数据生成
        yield from self.agent_queue_manager.listen()


    def _long_term_memory_recall_node(self, state:AgentState)->AgentState:
        """长期记忆召回节点"""
        #1、根据配置是否需要使用长期记忆召回
        long_term_memory = ""
        if self.agent_config.enable_long_term_memory:
            long_term_memory = state["long_term_memory"]
            self.agent_queue_manager.publish(AgentQueueEvent(
                id=uuid.uuid4(),
                task_id=self.agent_queue_manager.task_id,
                event =QueueEvent.LONG_TERM_MEMORY_RECALL,
                observation=long_term_memory
            ))

        #2、构建预设消息列表， 并将preset_prompt + long_term_memory 填充到系统消息中
        preset_messages = [
            SystemMessage(AGENT_SYSTEM_PROMPT_TEMPLATE.format(
                preset_prompt = self.agent_config.preset_prompt,
                long_term_memory = long_term_memory
            ))]
        #3、将短期记忆消息添加到消息列表中
        short_term_memory = state["history"]
        if isinstance(short_term_memory, list) and len(short_term_memory) > 0:
            #4、检验历史消息是不是复数形式，也就是必须成对[ai, human , ai, human]
            if len(short_term_memory)%2 != 0:
                raise FailException("智能体历史消息格式错误！！")
            #5、拼接历史消息
            preset_message = state["messages"][-1]
        #6、拼接当前用户的消息
        human_message = state["messages"][-1]
        preset_messages.append(HumanMessage(content=human_message.content))
        #7、将预设消息添加到用户消息， 添加前先删除用户的原始信息， 然后补充一个新的代替
        return{
            "messages":[RemoveMessage(id = human_message.id,),*preset_messages]
        }


    def _llm_node(self, state:AgentState)->AgentState:
        """大语言模型节点"""
        #1、从智能体配置中提取大语言模型
        id = uuid.uuid4()
        start_at = time.time()
        llm = self.agent_config.llm
        #2、检测大语言模型中是有 build_tools方法如果没有则不支持 绑定 ， 如果有则检测tools 是否为空
        if hasattr(llm, "bind_tools") and callable(getattr(llm, "bind_tools")) and len(self.agent_config.tools) > 0:
            llm = llm.bind_tools(self.agent_config.tools)

        #3、流式调用llm 输出对应 的内容
        full_content = None
        is_frist_chunk = True
        generation_type = ""
        for chunk in llm.stream(state["messages"]):
            if is_frist_chunk:
                full_content = chunk
                is_frist_chunk = False
            else:
                full_content = full_content + chunk
            #4、检测消息类型是工具参数还是文本生成内容
            if not generation_type:
                if chunk.tool_calls:
                    generation_type = "thought"
                elif chunk.content:
                    generation_type = "message"
            #5、如果是智能体消息则推送 智能体消息事件
            if generation_type == "message":
                self.agent_queue_manager.publish(AgentQueueEvent(
                    id=id,
                    task_id=self.agent_queue_manager.task_id,
                    thought=chunk.content,
                    messages=messages_to_dict(state["messages"]),
                    answer=chunk.content,
                    latency=(time.perf_counter() - start_at),
                    event=QueueEvent.AGENT_MESSAGE,
                ))
        #6、如果消息类新年为推理则添加推理智能体事件
        if generation_type == "thought":
            self.agent_queue_manager.publish(AgentQueueEvent(
                id=id,
                task_id=self.agent_queue_manager.task_id,
                event=QueueEvent.AGENT_THOUGHT,
                messages = messages_to_dict(state["messages"]),
                latency=(time.perf_counter() - start_at),
            ))
        elif generation_type == "message":
            #7、如果llm 直接完成内容生成则拿到最终答案， 那么我们就可以停止监听队列
            self.agent_queue_manager.stop_listen()

        return {"messages":[full_content]}



    def _tools_node(self, state:AgentState)->AgentState:
        """工具执行节点"""
        #1、将工具列表转换成字典， 便于工具绑定
        tools_by_name = {tool.name:tool for tool in self.agent_config.tools}
        #2、提取消息中工具调用参数
        tool_calls = state["messages"][-1].tool_calls
        #3、循环执行工具组装工具消息
        messages = []
        for tool_call in tool_calls:
            #4、创建智能体动作事件id 并记录时间
            id = uuid.uuid4()
            start_at = time.time()
            #5、获取并调用工具
            tool = tools_by_name[tool_call["name"]]
            tool_result = tool.invoke(tool_call["args"])
            #6、将工具消息添加到消息列表中
            messages.append(ToolMessage(
                tool_call_id = tool_call["id"],
                content = json.dumps(tool_result),
                name = tool_call["name"]
            ))
            #7、判断执行的工具的名字， 提交不同的事件， 涵盖智能体动作以及知识库检索
            event =(
                QueueEvent.AGENT_ACTION
                if tool_call["name"] != "dataset_retrieval"
                else QueueEvent.DATASET_RETRIEVAL
            )

            self.agent_queue_manager.publish(AgentQueueEvent(
                id=id,
                task_id=self.agent_queue_manager.task_id,
                event=event,
                observation=json.dumps(tool_result),
                tool = tool_call["name"],
                tool_input=tool_call["args"],
                latency=(time.perf_counter() - start_at),
            ))

        return {"messages":messages}

    @classmethod
    def _tool_condition(cls, state:AgentState)->Literal["tools", "__end__"]:
        """检测下一个节点是否是 tools 节点 还是结束节点"""
        #1、提取状态中的最后一条消息
        messages = state["messages"]
        ai_message = messages[-1]
        if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
            return "tools"
        return END


    def _build_graph(self):
        """构建整个智能体的大图"""
        #1、创建图
        graph = StateGraph(AgentState)
        #2、添加节点
        graph.add_node("long_term_memory_recall", self._long_term_memory_recall_node)
        graph.add_node("llm", self._llm_node)
        graph.add_node("tools", self._tools_node)

        #3、添加边
        graph.set_entry_point("long_term_memory_recall")
        graph.add_edge("long_term_memory_recall", "llm")
        graph.add_conditional_edges("llm", self._tool_condition)
        graph.add_edge("tools", "llm")
        agent  = graph.compile()

        return agent