# 第5版fastapi测试，将不同agent的提示词分开，添加航班查询功能，
# 添加12306车票查询功能（要求固定的json格式）比如：
# {
#   "from_station": "九江",
#   "to_station": "永修",
#   "train_date": "2025-09-11"
# }
import json
import logging
import os
from typing import Any, AsyncGenerator

from dotenv import load_dotenv
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain.chat_models import init_chat_model
from langchain_mcp_adapters.client import MultiServerMCPClient
from fastapi import Request, FastAPI
from fastapi.responses import JSONResponse, StreamingResponse
from contextlib import asynccontextmanager
import uvicorn
from llm_provider import get_llm
import asyncio
from datetime import datetime

# 设置日志格式
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
LOG_FILE_NAME = f"logs/myfastapi5_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
os.makedirs(os.path.dirname(LOG_FILE_NAME), exist_ok=True)
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    handlers=[
        logging.FileHandler(LOG_FILE_NAME, encoding="utf-8"),  # 写入文件时指定编码
        logging.StreamHandler()  # 控制台输出（依赖终端编码）
    ]
)
logger = logging.getLogger(__name__)

# 环境配置
class Configuration:
    """读取.env 与 servers_config.json"""
    def __init__(self) -> None:
        load_dotenv(dotenv_path=".env")
        self.api_key: str = os.getenv("DEEPSEEK_API_KEY", "").strip()
        self.base_url: str | None = os.getenv("BASE_URL")
        self.model: str = os.getenv("MODEL")
        self.model_type = os.getenv("MODEL_TYPE").lower()

    @staticmethod
    def load_server_configs(file_path: str = "servers_config2.json") -> dict[str, Any]:
        with open(file_path, "r", encoding="utf-8") as f:
            config = json.load(f)
        # logging.info(f"✅ 成功加载配置文件 {file_path}")
        all_servers = config.get("mcpServers", {})
        server_configs = {
            "map": {"mcpServers": {}},
            "weather": {"mcpServers": {}},
            "rag": {"mcpServers": {}},
            "general": {"mcpServers": {}},
            "ticket": {"mcpServers": {}},
            "flight": {"mcpServers": {}},
        }
        for server_name, server_config in all_servers.items():
            if "weather" in server_name.lower():
                server_configs["weather"]["mcpServers"][server_name] = server_config
            elif "map" in server_name.lower() or "baidu" in server_name.lower():
                server_configs["map"]["mcpServers"][server_name] = server_config
            elif "rag" in server_name.lower():
                server_configs["rag"]["mcpServers"][server_name] = server_config
            elif "ticket" in server_name.lower():
                server_configs["ticket"]["mcpServers"][server_name] = server_config
            elif "flight" in server_name.lower():
                server_configs["flight"]["mcpServers"][server_name] = server_config
            else:
                server_configs["general"]["mcpServers"][server_name] = server_config
        return server_configs

async def initialize_agents() -> tuple[dict[str, AgentExecutor], dict[str, MultiServerMCPClient]]:
    cfg = Configuration()
    servers_cfgs = Configuration.load_server_configs()
    agents = {}
    mcp_clients = {}

    if cfg.model_type == "ollama":
        logging.info(f"🦙 使用 Ollama 模型: {cfg.model}")
        llm_model = get_llm()
    elif cfg.model_type == "vllm":
        logging.info(f"使用 vllm 模型: {cfg.model}")
        llm_model = get_llm()
    else:
        logging.info(f"☁️ 使用 API 模型: {cfg.model}")
        os.environ["OPENAI_API_KEY"] = cfg.api_key
        if cfg.base_url:
            os.environ["OPENAI_BASE_URL"] = cfg.base_url
        llm_model = init_chat_model(
            model=cfg.model,
            model_provider="deepseek" if "deepseek" in cfg.model.lower() else "openai"
        )

    # 为每个类别初始化独立的MCP客户端和agent
    prompt = hub.pull("hwchase17/openai-tools-agent")
    for category, config in servers_cfgs.items():
        if config["mcpServers"]:
            try:
                logging.info(f"🚀 初始化 {category} MCP 客户端...")
                # 为每个类别创建独立的MCP客户端
                mcp_client = MultiServerMCPClient(config["mcpServers"])
                mcp_clients[category] = mcp_client

                # 获取该类别专用的工具
                tools = await mcp_client.get_tools()
                logging.info(f"✅ {category} 类别已加载 {len(tools)} 个 MCP 工具")

                if tools:
                    agent = create_openai_tools_agent(llm_model, tools, prompt)
                    agents[category] = AgentExecutor(agent=agent, tools=tools, verbose=True)
                    logging.info(f"✅ {category} Agent 初始化成功")
                else:
                    logging.warning(f"⚠️ {category} 类别没有找到工具")

            except Exception as e:
                logging.error(f"初始化 {category} MCP 客户端失败: {e}", exc_info=True)

    # 确保至少有一个通用agent（合并所有工具）
    if not agents and mcp_clients:
        logging.info("🔄 创建通用agent（合并所有工具）...")
        all_tools = []
        for category, client in mcp_clients.items():
            try:
                tools = await client.get_tools()
                all_tools.extend(tools)
                logging.info(f"📦 从 {category} 添加了 {len(tools)} 个工具")
            except Exception as e:
                logging.error(f"从 {category} 获取工具失败: {e}")

        if all_tools:
            prompt = hub.pull("hwchase17/openai-tools-agent")
            agent = create_openai_tools_agent(llm_model, all_tools, prompt)
            agents["general"] = AgentExecutor(agent=agent, tools=all_tools, verbose=True)
            logging.info(f"✅ 通用Agent初始化成功，共 {len(all_tools)} 个工具")
        else:
            logging.warning("⚠️ 没有找到任何工具，无法创建通用agent")

    return agents, mcp_clients

@asynccontextmanager
async def lifespan(app: FastAPI):
    logging.info("🚀 正在初始化 MCP Agent...")
    agents, mcp_clients = await initialize_agents()
    app.state.agents = agents
    app.state.mcp_client = mcp_clients
    logging.info("✅ MCP Agents 初始化完成！")

    yield  # 应用运行中

    logging.info("🛑 正在清理 MCP 客户端资源...")
    # await mcp_client.cleanup()
    logging.info("✅ 资源清理完成")

app = FastAPI(lifespan=lifespan)

@app.post("/chat/ticket")
async def ticket_chat(request: Request):
    # logger.info(">>> 进入 /chat/ticket 端点")
    agents: dict = request.app.state.agents
    agent_executor = agents.get("ticket")
    if not agent_executor:
        return JSONResponse(status_code=500, content={"error": "ticket Agent未初始化"})

    try:
        # 👇 先读取原始 body，避免 json() 抛异常导致流程中断
        body = await request.body()
        logger.info(f"原始请求体: {body.decode('utf-8', errors='replace')}")

        data = await request.json()
        user_input = data
        logger.info(f"解析后的请求体: {user_input}")

        if not user_input:
            return JSONResponse(status_code=400, content={"error": "请输入有效消息"})

        input_data = {"input": user_input}
        response_stream = stream_agent_response(agent_executor, input_data)
        return StreamingResponse(response_stream, media_type="text/plain")

    except Exception as e:
        logger.error(f"❌ ticket聊天出错: {e}", exc_info=True)
        return JSONResponse(status_code=500, content={"error": str(e)})

@app.post("/chat/{agent_type}")
async def chat_with_agent(agent_type: str, request: Request):
    """通用聊天端点，根据agent_type选择不同的agent"""
    # start_total = time.time()
    agents: dict = request.app.state.agents

    agent_executor = agents.get(agent_type)
    if not agent_executor:
        available_agents = list(agents.keys())
        return JSONResponse(
            status_code=404,
            content={
                "error": f"{agent_type} Agent未初始化",
                "available_agents": available_agents
            }
        )

    try:
        data = await request.json()
        user_input = data.get("message", "").strip()
        if not user_input:
            return JSONResponse(status_code=400, content={"error": "请输入有效消息"})

        logging.info(f"使用 {agent_type} agent 处理请求: {user_input}")

        # 调用指定类型的agent
        input_data = {"input": user_input}
        response_stream = stream_agent_response(agent_executor, input_data)

        return StreamingResponse(response_stream, media_type="text/plain")

    except Exception as e:
        logging.error(f"{agent_type} 聊天出错: {e}")
        return JSONResponse(status_code=500, content={"error": str(e)})

@app.post("/chat/general")
async def general_chat_stream(request: Request):
    return await chat_with_agent("weather", request)

@app.post("/chat/rag")
async def rag_chat(request: Request):
    return await chat_with_agent("rag", request)

@app.post("/chat/map")
async def map_chat(request: Request):
    return await chat_with_agent("map", request)

@app.post("/chat/weather")
async def weather_chat(request: Request):
    return await chat_with_agent("weather", request)


@app.post("/chat/flight")
async def flight_chat(request: Request):
    return await chat_with_agent("flight", request)

# @app.post("/chat/ticket")
# async def ticket_chat(request: Request):
#     agents: dict = request.app.state.agents
#     agent_executor = agents.get("ticket")
#     if not agent_executor:
#         return JSONResponse(status_code=500, content={"error": "ticket Agent未初始化"})
#
#     try:
#         data = await request.json()
#         user_input = data
#         print(f"接收用户请求体（原始）: {user_input}")
#         logger.info(f"接收用户请求体（原始）: {user_input}")
#         if not user_input:
#             return JSONResponse(status_code=400, content={"error": "请输入有效消息"})
#
#         # 调用ticket专用agent
#         input_data = {"input": user_input}
#         response_stream = stream_agent_response(agent_executor, input_data)
#
#         return StreamingResponse(response_stream, media_type="text/plain")
#
#     except Exception as e:
#         logging.error(f"ticket聊天出错: {e}")
#         return JSONResponse(status_code=500, content={"error": str(e)})


# 测试路由
@app.get("/test-stream")
async def test_stream():
    async def fake_stream():
        for i in range(10):
            yield f"data: Message {i}\n\n" # SSE 格式示例
            # 或者简单文本: yield f"Message {i}\n"
            await asyncio.sleep(1) # 模拟延迟
    # 如果使用 SSE 格式，media_type="text/event-stream"
    return StreamingResponse(fake_stream(), media_type="text/plain") # 或 "text/event-stream"

async def stream_agent_response(agent_executor, input_data: dict) -> AsyncGenerator[str, None]:
    """
    将 AgentExecutor 的流式输出转换为字符串流。
    这里使用 astream_events 来获取更丰富的信息，或者使用 astream 获取简单的 token。
    """
    try:
        async for event in agent_executor.astream_events(input_data, version="v2"):
            kind = event["event"]
            if kind == "on_chat_model_stream":
                content = event["data"]["chunk"].content
                if content:
                    yield content
                    await asyncio.sleep(0) # 让出控制权，有助于流式传输
    except Exception as e:
        logging.error(f"流式传输过程中出错: {e}")
        yield f"Error during streaming: {str(e)}"

@app.get("/")
async def root():
    return {"message": "你好! 欢迎使用 MCP-Agent 后端服务"}


@app.get("/health")
async def health(request: Request):
    agents = request.app.state.agents
    mcp_clients = request.app.state.mcp_clients
    return {
        "status": "ok",
        "agents_ready": list(agents.keys()),
        "mcp_clients_ready": list(mcp_clients.keys()),
        "agent_details": {agent: f"已初始化" for agent in agents.keys()}
    }


if __name__ == '__main__':
    uvicorn.run("myfastapi5:app", host="0.0.0.0", port=12344, reload=False)