# MCP Server工具 - HTTP Stream 类 MCP Server
import os
import asyncio
from mcp.server.fastmcp import FastMCP
from insight_agent.utils.http_post import get_insight_request, update_insight_result
from insight_agent.utils.agent_generator import AgentGenerator
from dotenv import load_dotenv


# 定义 MCP Server
mcp = FastMCP("agent_statistics", port=5129)

# 加载环境变量
load_dotenv(override=True)
# 加载Agent所需大模型
llm_model_name = os.getenv("LLM_MODEL")
# 加载Agent所需提示词
prompts_file_path = './agent_statistics_prompts.txt'
# 加载Agent所需工具配置
servers_config_path = './agent_statistics_servers_config.json'
# 配置线程
thread_config_1 = {"configurable": {"thread_id": "thread1"}}

# 加载Agent生成器
agent_generator = AgentGenerator()
# 初始化 Insight Agent
agent_statistics = asyncio.run(agent_generator.create_agent(llm_model_name,
                                                      prompts_file_path,
                                                      servers_config_path))

@mcp.tool()
def statistics_info(user_request):
    '''
    统算数据获取
    :param user_request: 用户请求
    :return:
    '''
    # print(f'💡子智能体 agent_statistics 收到用户请求：{user_request}')
    insight_request = get_insight_request("agent_statistics")  # 获取 insight_request 中 agent_statistics 相关的用户输入/请求
    print(insight_request['keywords'])
    updated_result = get_updated_result()
    update_insight_result("agent_statistics", updated_result)  # 向 insight_result 中更新 agent_statistics 相关的洞察结果

    return updated_result["summary"]


async def get_updated_result(input):
    result = await agent_statistics.ainvoke(
        {"messages": [{"role": "system", "content": input}]},
        thread_config_1
    )
    print(f'✅response_result: {result}')
    response_content = result['messages'][-1].content
    print(f'✅response_content: {response_content}')
    return response_content


if __name__ == "__main__":
    mcp.run(transport="streamable-http")





