# -*- coding: utf-8 -*-
"""
此代码定义了一个名为 WorkbenchAgent 的类，该类继承自 RoutedAgent。
WorkbenchAgent 旨在作为一个能够使用 Model Context Protocol (MCP) 工具的 AI 助手。

主要功能包括：
1. 初始化：构造函数接收三个参数：model_client（聊天完成客户端）、model_context（聊天完成上下文）和 workbench（工作台）。
2. 系统消息：在初始化时，WorkbenchAgent 会设置一条系统消息，指示其作为一个 AI 助手的角色。

该类的设计目的是为了支持与用户的交互，并利用 MCP 工具进行更复杂的任务处理。
"""

import json
import asyncio
from dataclasses import dataclass
from typing import List

from autogen_core import (
    AgentId,
    SingleThreadedAgentRuntime,
    FunctionCall,
    MessageContext,
    RoutedAgent,
    message_handler,
)
from autogen_core.model_context import (
    ChatCompletionContext,
    BufferedChatCompletionContext,
)
from autogen_core.models import (
    AssistantMessage,
    ChatCompletionClient,
    FunctionExecutionResult,
    FunctionExecutionResultMessage,
    LLMMessage,
    SystemMessage,
    UserMessage,
)
from autogen_core.tools import ToolResult, Workbench

from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.tools.mcp import SseServerParams, McpWorkbench


@dataclass
class Message:
    content: str


class WorkbenchAgent(RoutedAgent):
    def __init__(
        self,
        model_client: ChatCompletionClient,
        model_context: ChatCompletionContext,
        workbench: Workbench,
    ) -> None:
        super().__init__("An agent with a workbench")
        # 优化系统消息，减少token占用
        self._system_messages: List[LLMMessage] = [
            SystemMessage(
                content="你是MCP工具助手。理解用户请求，使用工具查询资源。列表数据格式化为Markdown表格。"
            )
        ]
        self._model_client = model_client
        # 使用传入的model_context，避免重复创建
        self._model_context = model_context
        self._workbench = workbench

        # 添加token管理参数
        self._max_tool_result_length = 800  # 单个工具结果最大长度
        self._max_total_tokens = 50000  # 总token预估限制

    def _truncate_content(self, content: str, max_length: int = 800) -> str:
        """智能截断内容，保持格式完整性"""
        if len(content) <= max_length:
            return content

        # 尝试在句子边界截断
        truncated = content[:max_length]
        last_sentence = truncated.rfind("。")
        last_period = truncated.rfind(".")
        last_newline = truncated.rfind("\n")

        # 选择最佳截断点
        cut_point = max(last_sentence, last_period, last_newline)
        if cut_point > max_length * 0.8:  # 如果截断点不会损失太多内容
            truncated = content[: cut_point + 1]

        return truncated + f"\n... (内容已截断，原长度: {len(content)} 字符)"

    def _extract_key_info(self, obj: dict, key_mappings: dict = None) -> dict:
        """从复杂对象中提取关键信息"""
        if key_mappings is None:
            key_mappings = {}

        result = {}

        # 如果是K8s资源，使用特殊处理
        if "kind" in obj and "metadata" in obj:
            return self._extract_k8s_resource_info(obj)

        # 通用提取逻辑
        for key, value in obj.items():
            if isinstance(value, dict):
                # 对于嵌套字典，提取名称相关字段
                if "name" in value:
                    result[key] = value["name"]
                elif "type" in value:
                    result[key] = value["type"]
                else:
                    result[key] = f"<{type(value).__name__}>"
            elif isinstance(value, list):
                if len(value) > 0:
                    result[key] = f"[{len(value)} items]"
                else:
                    result[key] = "[]"
            else:
                result[key] = str(value)[:100]  # 限制长度但比之前更长

        return result

    def _extract_k8s_resource_info(self, resource: dict) -> dict:
        """专门处理K8s资源的信息提取"""
        result = {}

        # 基本信息
        result["Kind"] = resource.get("kind", "Unknown")
        result["APIVersion"] = resource.get("apiVersion", "Unknown")

        # Metadata信息
        metadata = resource.get("metadata", {})
        result["Name"] = metadata.get("name", "Unknown")
        result["Namespace"] = metadata.get("namespace", "default")
        result["CreationTime"] = metadata.get("creationTimestamp", "Unknown")

        # 如果是Pod，提取Pod特有信息
        if resource.get("kind") == "Pod":
            spec = resource.get("spec", {})
            status = resource.get("status", {})

            # 提取容器信息
            containers = spec.get("containers", [])
            if containers:
                container_names = [c.get("name", "unnamed") for c in containers]
                result["Containers"] = ", ".join(container_names[:3])  # 最多显示3个
                if len(containers) > 3:
                    result["Containers"] += f" (+{len(containers) - 3} more)"

            # 提取状态信息
            result["Phase"] = status.get("phase", "Unknown")
            result["Node"] = spec.get("nodeName", "Unknown")

            # 提取就绪状态
            conditions = status.get("conditions", [])
            ready_condition = next(
                (c for c in conditions if c.get("type") == "Ready"), None
            )
            if ready_condition:
                result["Ready"] = ready_condition.get("status", "Unknown")

        # 如果是Service，提取Service信息
        elif resource.get("kind") == "Service":
            spec = resource.get("spec", {})
            result["Type"] = spec.get("type", "Unknown")
            result["ClusterIP"] = spec.get("clusterIP", "Unknown")
            ports = spec.get("ports", [])
            if ports:
                port_info = [
                    f"{p.get('port', 'Unknown')}/{p.get('protocol', 'TCP')}"
                    for p in ports[:2]
                ]
                result["Ports"] = ", ".join(port_info)

        return result

    def _format_json_to_markdown_table(
        self, data: List[dict], max_rows: int = 15
    ) -> str:
        """优化的表格格式化，智能处理复杂对象"""
        if not data:
            return "没有找到任何条目。"

        summary_message = ""
        display_data = data[:max_rows]

        if len(data) > max_rows:
            summary_message = f"总共 {len(data)} 条，显示前 {max_rows} 条。\n\n"

        # 预处理数据，提取关键信息
        processed_data = []
        for item in display_data:
            if isinstance(item, dict):
                processed_item = self._extract_key_info(item)
                processed_data.append(processed_item)
            else:
                processed_data.append({"Value": str(item)})

        if not processed_data:
            return summary_message + "数据处理后为空。"

        # 获取所有可能的表头，限制数量
        all_headers = set()
        for item in processed_data:
            all_headers.update(item.keys())

        # 对表头进行排序，K8s资源优先显示重要字段
        priority_headers = [
            "Name",
            "Kind",
            "Namespace",
            "Phase",
            "Ready",
            "Containers",
            "Node",
            "Type",
            "ClusterIP",
        ]
        headers = []

        # 先添加优先级高的表头
        for header in priority_headers:
            if header in all_headers:
                headers.append(header)
                all_headers.remove(header)

        # 然后添加其他表头，限制总数
        remaining_headers = sorted(list(all_headers))
        headers.extend(remaining_headers[: max(0, 10 - len(headers))])  # 总共最多10列

        # 构建表格
        header_row = "| " + " | ".join(headers) + " |"
        separator_row = "| " + " | ".join(["---"] * len(headers)) + " |"

        data_rows = []
        for item in processed_data:
            row_values = []
            for header in headers:
                value = str(item.get(header, ""))
                # 清理值，移除换行符和管道符
                value = value.replace("\n", " ").replace("\r", " ").replace("|", "\\|")
                # 限制单元格长度，但保持可读性
                if len(value) > 80:
                    value = value[:77] + "..."
                row_values.append(value)
            data_rows.append("| " + " | ".join(row_values) + " |")

        table_content = "\n".join([header_row, separator_row] + data_rows)
        return summary_message + table_content

    async def _estimate_tokens(self, messages: List[LLMMessage]) -> int:
        """粗略估算token数量（中文字符*2 + 英文单词*1.3）"""
        total_chars = 0
        for msg in messages:
            content = msg.content if isinstance(msg.content, str) else str(msg.content)
            # 简单估算：中文字符约2个token，英文单词约1.3个token
            chinese_chars = sum(1 for c in content if "\u4e00" <= c <= "\u9fff")
            other_chars = len(content) - chinese_chars
            total_chars += chinese_chars * 2 + other_chars * 1.3
        return int(total_chars)

    async def _manage_context_size(self):
        """动态管理上下文大小，防止溢出"""
        current_messages = await self._model_context.get_messages()
        estimated_tokens = await self._estimate_tokens(
            self._system_messages + current_messages
        )

        print(f"---当前估算token数: {estimated_tokens}---")

        # 如果接近上限，清理旧消息
        if estimated_tokens > self._max_total_tokens:
            print("---上下文接近上限，清理旧消息---")
            # 保留最近的消息
            messages_to_keep = (
                current_messages[-3:] if len(current_messages) > 3 else current_messages
            )

            # 清理并重新添加消息
            await self._model_context.clear()
            for msg in messages_to_keep:
                await self._model_context.add_message(msg)

    @message_handler
    async def handle_user_message(
        self, message: Message, ctx: MessageContext
    ) -> Message:
        try:
            # 管理上下文大小
            await self._manage_context_size()

            # 添加用户消息
            await self._model_context.add_message(
                UserMessage(content=message.content, source="user")
            )

            # 准备LLM调用
            messages_for_llm = self._system_messages + (
                await self._model_context.get_messages()
            )

            print("---获取工具列表---")
            tools_for_llm = await self._workbench.list_tools()
            print(f"---可用工具数量: {len(tools_for_llm)}---")

            # 调用LLM
            create_result = await self._model_client.create(
                messages=messages_for_llm,
                tools=tools_for_llm,
                cancellation_token=ctx.cancellation_token,
            )

            if not create_result or not create_result.content:
                return Message(content="LLM未返回内容，请重试。")

            # 工具调用循环
            while isinstance(create_result.content, list) and all(
                isinstance(call, FunctionCall) for call in create_result.content
            ):
                # 添加函数调用到上下文
                await self._model_context.add_message(
                    AssistantMessage(content=create_result.content, source="assistant")
                )

                # 处理工具调用
                all_function_execution_results: List[FunctionExecutionResult] = []

                for call in create_result.content:
                    print(f"---调用工具: {call.name}---")
                    try:
                        result = await self._workbench.call_tool(
                            call.name,
                            arguments=json.loads(call.arguments),
                            cancellation_token=ctx.cancellation_token,
                        )

                        # 智能处理和截断工具结果
                        raw_content = result.to_text()
                        processed_content = raw_content

                        if isinstance(raw_content, str):
                            try:
                                json_data = json.loads(raw_content)
                                if isinstance(json_data, list):
                                    processed_content = (
                                        self._format_json_to_markdown_table(json_data)
                                    )
                                else:
                                    processed_content = self._truncate_content(
                                        raw_content
                                    )
                            except json.JSONDecodeError:
                                processed_content = self._truncate_content(raw_content)

                    except Exception as e:
                        print(f"---工具调用失败: {e}---")
                        processed_content = f"工具 {call.name} 调用失败: {str(e)[:200]}"
                        result = ToolResult(
                            name=call.name, content=processed_content, is_error=True
                        )

                    # 添加执行结果
                    all_function_execution_results.append(
                        FunctionExecutionResult(
                            call_id=call.id,
                            content=processed_content,
                            is_error=result.is_error,
                            name=result.name,
                        )
                    )

                # 添加工具执行结果到上下文
                await self._model_context.add_message(
                    FunctionExecutionResultMessage(
                        content=all_function_execution_results
                    )
                )

                # 再次管理上下文大小
                await self._manage_context_size()

                # 再次调用LLM
                create_result = await self._model_client.create(
                    messages=self._system_messages
                    + (await self._model_context.get_messages()),
                    tools=(await self._workbench.list_tools()),
                    cancellation_token=ctx.cancellation_token,
                )

            # 处理最终结果
            assert isinstance(create_result.content, str)

            # 添加助手回复到上下文
            await self._model_context.add_message(
                AssistantMessage(content=create_result.content, source="assistant")
            )

            return Message(content=create_result.content)

        except Exception as e:
            print(f"---处理消息时出错: {e}---")
            return Message(content=f"处理请求时出错: {str(e)[:200]}")


async def main():
    mcp_server_params = SseServerParams(url="http://localhost:8080/sse")

    print("---启动McpWorkbench---")
    async with McpWorkbench(mcp_server_params) as workbench:
        print("---McpWorkbench启动成功---")
        runtime = SingleThreadedAgentRuntime()

        # 创建共享的model_context，使用较小的buffer_size
        shared_model_context = BufferedChatCompletionContext(buffer_size=10)

        await WorkbenchAgent.register(
            runtime=runtime,
            type="WebAgent",
            factory=lambda: WorkbenchAgent(
                model_client=OpenAIChatCompletionClient(
                    model="deepseek-chat",
                    api_key="sk-920dae1ed7a345fab8dd7abc2e64211f",
                    base_url="https://api.deepseek.com/v1",
                    timeout=120,
                    model_info={
                        "vision": False,
                        "function_calling": True,
                        "json_output": True,
                        "structured_output": True,
                        "family": "deepseek",
                    },
                ),
                model_context=shared_model_context,  # 使用共享的上下文
                workbench=workbench,
            ),
        )

        print("\n输入消息，输入 'exit' 退出，输入 'clear' 清空历史：")
        runtime.start()

        while True:
            user_input = input("您: ")
            if user_input.lower() == "exit":
                break
            elif user_input.lower() == "clear":
                await shared_model_context.clear()
                print("对话历史已清空。")
                continue

            try:
                response = await runtime.send_message(
                    Message(content=user_input),
                    recipient=AgentId("WebAgent", "default"),
                )

                if response and response.content:
                    print(f"助手: {response.content}")
                else:
                    print("助手: 没有收到回复。")

            except Exception as e:
                print(f"发送消息时出错: {e}")
                continue

        await runtime.stop()


if __name__ == "__main__":
    asyncio.run(main())
