'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: function_call_agent.py
* @Time: 2025/10/9
* @All Rights Reserve By Brtc
'''
import json
import logging
import re
import time
import uuid

from langchain_core.messages import HumanMessage, SystemMessage, RemoveMessage, ToolMessage, \
    messages_to_dict, AIMessage
from langgraph.constants import END
from langgraph.graph import StateGraph
from langgraph.graph.graph import CompiledGraph
from typing_extensions import Literal

from internal.core.agent.entities.agent_entity import AgentState, AGENT_SYSTEM_PROMPT_TEMPLATE, \
    DATASET_RETRIEVAL_TOOL_NAME, MAX_ITERATION_RESPONSE
from internal.exception.exception import FailException
from .base_agent import BaseAgent
from ..entities.queue_entity import QueueEvent, AgentThought


class FunctionCallAgent(BaseAgent):
    def _build_agent(self)->CompiledGraph:
        """构建LangGraph图结构编译程序"""
        #1、创建图
        graph = StateGraph(AgentState)

        #2、添加节点
        graph.add_node("preset_operation", self._preset_operation_node)
        graph.add_node("long_term_memory_recall", self._long_term_memory_recall_node)
        graph.add_node("llm", self._llm_node)
        graph.add_node("tools", self._tools_node)

        #3、添加边
        graph.set_entry_point("preset_operation")
        graph.add_conditional_edges("preset_operation", self._preset_operation_condition)
        graph.add_edge("long_term_memory_recall","llm")
        graph.add_conditional_edges("llm",self._tools_condition)
        graph.add_edge("tools", "llm")

        #4、编译并返回
        agent = graph.compile()
        return agent


    def _preset_operation_node(self, state:AgentState)->AgentState:
        """预设条件操作，涵盖：输入审核、数据预处理、条件边等等"""
        #1、获取审核配置与用户输入query
        review_config = self.agent_config.review_config
        print('state["messages"]----',state["messages"])
        query = state["messages"][-1]

        #2、检测是否开启审核配置
        if review_config["enable"] and review_config["inputs_config"]["enable"]:
            contains_keyword = any(keyword in query for keyword in review_config["keywords"])
            #3、如果包含敏感词则执行后续操作
            if contains_keyword:
                preset_response = review_config["inputs_config"]["preset_response"]
                self.agent_queue_manager.publish(state["task_id"],
                                                AgentThought(
                                                    id=uuid.uuid4(),
                                                    task_id=state["task_id"],
                                                    event=QueueEvent.AGENT_MESSAGE,
                                                    observation=preset_response,
                                                    message=state["messages"],
                                                    answer=preset_response,
                                                    latency=0
                                                )
                )
                self.agent_queue_manager.publish(state["task_id"],
                                                AgentThought(
                                                    id=uuid.uuid4(),
                                                    task_id=state["task_id"],
                                                    event=QueueEvent.AGENT_END,
                                                )
                )
                return {"messages":[AIMessage(preset_response)]}
        return {"messages":[]}



    def _long_term_memory_recall_node(self, state:AgentState)->AgentState:
        """长期记忆召回节点"""
        #1、根据传递的智能体配置判断是否需要召回长期记忆
        long_term_memory = ""
        if self.agent_config.enable_long_term_memory:
            long_term_memory = state["long_term_memory"]
            self.agent_queue_manager.publish(
                state["task_id"],
                AgentThought(
                    id = uuid.uuid4(),
                    task_id=state["task_id"],
                    event = QueueEvent.LONG_TERM_MEMORY_RECALL,
                    observation=long_term_memory
                )
            )

        #2、构建与预设消息列表， 并将preset_prompt 填充到系统消息中
        preset_messages = [
            SystemMessage(AGENT_SYSTEM_PROMPT_TEMPLATE.format(
                preset_prompt = self.agent_config.preset_prompt,
                long_term_memory = long_term_memory
            ))
        ]

        #3、将短期历史消息添加到消息列表中
        history = state["history"]
        if isinstance(history, list) and len(history)>0:
            if len(history)%2 !=0:
                raise FailException("智能体历史消息格式错误！")
            preset_messages.extend(history)

        # 拼接当前用户的提问信息
        human_message = state["messages"][-1]
        preset_messages.append(HumanMessage(human_message.content))

        return{
            "messages":[RemoveMessage(id = human_message.id), *preset_messages]
        }


    def _llm_node(self, state:AgentState)->AgentState:
        #1、检查当前Agent 迭代次数是否符合要求
        if state["iteration_count"] > self.agent_config.max_iteration_count:
            self.agent_queue_manager.publish(
                state["task_id"],
                AgentThought(
                    id=uuid.uuid4(),
                    task_id=state["task_id"],
                    event=QueueEvent.AGENT_MESSAGE,
                    thought=MAX_ITERATION_RESPONSE,
                    message=messages_to_dict(state["messages"]),
                    answer=MAX_ITERATION_RESPONSE,
                    latency=0
                )
            )

            self.agent_queue_manager.publish(
                state["task_id"],
                AgentThought(
                    id=uuid.uuid4(),
                    task_id=state["task_id"],
                    event=QueueEvent.AGENT_END,
                )
            )
            return {"messages":[AIMessage(MAX_ITERATION_RESPONSE)]}

        # 2.从智能体配置中提取大语言模型
        id = uuid.uuid4()
        start_at = time.perf_counter()
        llm = self.llm
        # 3.检测大语言模型实例是否有bind_tools方法，如果没有则不绑定，如果有还需要检测tools是否为空，不为空则绑定
        if hasattr(llm, "bind_tools") and callable(getattr(llm, "bind_tools")) and len(self.agent_config.tools) > 0:
            llm = llm.bind_tools(self.agent_config.tools)
        # 4.流式调用LLM输出对应内容
        gathered = None
        is_first_chunk = True
        generation_type = ""
        try:
            for chunk in llm.stream(state["messages"]):
                if is_first_chunk:
                    gathered = chunk
                    is_first_chunk = False
                else:
                    gathered += chunk

                #5、检测生成类型是工具参数还是文本生成
                if not generation_type:
                    if chunk.tool_calls:
                        generation_type = "thought"
                    elif chunk.content:
                        generation_type = "message"

                #6、如果生成的是消息则直接提交智能体消息
                if generation_type == "message":
                    #7、提取片段内容检测是否开启了审核
                    review_config  = self.agent_config.review_config
                    content = chunk.content
                    if review_config["enable"] and review_config["outputs_config"]["enable"]:
                        for keyword in review_config["keywords"]:
                            content = re.sub(re.escape(keyword), "**", content, flags=re.IGNORECASE)

                    self.agent_queue_manager.publish(state["task_id"],
                                                    AgentThought(
                                                    id = id,
                                                    task_id= state["task_id"],
                                                    event=QueueEvent.AGENT_MESSAGE,
                                                    thought=chunk.content,
                                                    message=messages_to_dict(state["messages"]),
                                                    answer = chunk.content,
                                                    latency=(time.perf_counter() - start_at)
                                                    )
                    )
        except Exception as e:
            logging.exception(f"LLM节点发生错误, 错误信息:{str(e)}")
            self.agent_queue_manager.publish_error(state["task_id"], f"LLM节点发生错误, 错误信息:{str(e)}")
        #8、计算LLM 输入 + 输出的总数
        input_token_count = self.llm.get_num_tokens_from_messages(state["messages"])
        output_token_count = self.llm.get_num_tokens_from_messages([gathered])

        #9、算token成本
        #input_price, out_price, unit = self.llm.get_pricing()
        input_price = 0.01
        out_price = 0.02
        unit = 1.0

        #10、计算token 总成本
        total_token_count = input_token_count + output_token_count
        total_price = (input_token_count*input_price + out_price*output_token_count)*unit



        #8、如果类型为推理则添加推理事件
        if generation_type == "thought":
            self.agent_queue_manager.publish(state["task_id"],AgentThought(
                id=id,
                task_id=state["task_id"],
                event=QueueEvent.AGENT_THOUGHT,
                thought=json.dumps(gathered.tool_calls),
                #消息相关
                message=messages_to_dict(state["messages"]),
                message_token_count=input_token_count,
                message_price_unit=unit,
                message_unit_price=input_price,
                #答案相关
                answer="",
                answer_token_count= output_token_count,
                answer_unit_price=out_price,
                answer_price_unit=unit,
                #Agent推理统计
                total_token_count=total_token_count,
                total_price=total_price,
                latency=(time.perf_counter() - start_at)
            ))
        elif generation_type == "message":
            # 如果llm 直接生成了 answer 表示已经生成最终的答案， 则停止监听
            self.agent_queue_manager.publish(state["task_id"], AgentThought(
                id=id,
                task_id=state["task_id"],
                event=QueueEvent.AGENT_MESSAGE,
                thought="",
                # 消息相关
                message=messages_to_dict(state["messages"]),
                message_token_count=input_token_count,
                message_price_unit=unit,
                message_unit_price=input_price,
                # 答案相关
                answer="",
                answer_token_count=output_token_count,
                answer_unit_price=out_price,
                answer_price_unit=unit,
                # Agent推理统计
                total_token_count=total_token_count,
                total_price=total_price,
                latency=(time.perf_counter() - start_at)
            ))

            self.agent_queue_manager.publish(state["task_id"],AgentThought(
                                             id=uuid.uuid4(),
                                             task_id=state["task_id"],
                                             event = QueueEvent.AGENT_END)
                                             )

        return {"messages": [gathered], "iteration_count":state["iteration_count"] + 1}


    def _tools_node(self, state:AgentState)->AgentState:
        """工具执行节点"""
        # 1.将工具列表转换成字典，便于调用指定的工具
        tools_by_name = {tool.name: tool for tool in self.agent_config.tools}
        # 2.提取消息中的工具调用参数
        tool_calls = state["messages"][-1].tool_calls
        # 3.循环执行工具组装工具消息
        messages = []

        for tool_call in tool_calls:
            #4、创建智能体动作事件id, 并记录开始时间
            id = uuid.uuid4()
            start_at = time.perf_counter()
            try:
                #5、获取工具并调用 工具
                tool = tools_by_name[tool_call["name"]]
                tool_result = tool.invoke(tool_call["args"])
            except Exception as e:
                #6、添加错误工具信息
                tool_result = f"工具执行出错:{str(e)}"
            #7、将工具消息添加到消息列表中
            messages.append(ToolMessage(
                tool_call_id = tool_call["id"] ,
                content=json.dumps(tool_result),
                name = tool_call["name"]
            ))


            #8、判断执行工具的名字，提交不同的事件， 涵盖智能体动作已经知识库检索
            event = (
                QueueEvent.AGENT_ACTION
                if tool_call["name"] != DATASET_RETRIEVAL_TOOL_NAME
                else QueueEvent.DATASET_RETRIEVAL
            )
            self.agent_queue_manager.publish(state["task_id"],AgentThought(
                id = id,
                task_id= state["task_id"],
                event= event,
                observation=json.dumps(tool_result),
                tool = tool_call["name"],
                tool_input = tool_call["args"],
                latency= (time.perf_counter() - start_at)
            ))
        return {"messages": messages}


    @classmethod
    def _tools_condition(cls, state:AgentState)->Literal["tools", "__end__"]:
        """检测下一个节点是执行tools节点，还是直接结束"""
        # 1.提取状态中的最后一条消息(AI消息)
        messages = state["messages"]
        ai_message = messages[-1]

        # 2.检测是否存在tools_calls这个参数，如果存在则执行tools节点，否则结束
        if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
            return "tools"
        return END

    @classmethod
    def _preset_operation_condition(cls, state:AgentState)->Literal["long_term_memory_recall", "__end__"]:
        """预设操作条件边， 用于判断是否触发预设相应"""
        #1、提取状态最后一条消息
        message = state["messages"][-1]

        #2、判断消息类型如果是AI消息说明触发了审核机制， 直接结束
        if message.type == "ai":
            return END
        return "long_term_memory_recall"
