{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a7b51924-2932-4fe4-a04b-c54871ddc760",
   "metadata": {},
   "source": [
    "MCP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "856866ad-d9a9-4c23-a6e6-6c0fca0319f9",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[2mResolved \u001b[1m178 packages\u001b[0m \u001b[2min 2.27s\u001b[0m\u001b[0m\n",
      "\u001b[2mPrepared \u001b[1m2 packages\u001b[0m \u001b[2min 443ms\u001b[0m\u001b[0m\n",
      "\u001b[1m\u001b[33mwarning\u001b[39m\u001b[0m\u001b[1m:\u001b[0m \u001b[1mFailed to hardlink files; falling back to full copy. This may lead to degraded performance.\n",
      "         If the cache and target directories are on different filesystems, hardlinking may not be supported.\n",
      "         If this is intentional, set `export UV_LINK_MODE=copy` or use `--link-mode=copy` to suppress this warning.\u001b[0m\n",
      "\u001b[2mInstalled \u001b[1m2 packages\u001b[0m \u001b[2min 161ms\u001b[0m\u001b[0m\n",
      " \u001b[32m+\u001b[39m \u001b[1mmcp\u001b[0m\u001b[2m==1.9.4\u001b[0m\n",
      " \u001b[32m+\u001b[39m \u001b[1msse-starlette\u001b[0m\u001b[2m==2.3.6\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "# 安装 MCP SDK\n",
    "! uv add mcp openai python-dotenv httpx"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2c4c56ff-7fd9-472f-b0f0-5ddb2c73f29b",
   "metadata": {},
   "source": [
    "用于天气查询的server服务器代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "86d8cd08-34e3-4988-8206-1f25e7c4361e",
   "metadata": {},
   "outputs": [
    {
     "ename": "RuntimeError",
     "evalue": "Already running asyncio in this thread",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mRuntimeError\u001b[39m                              Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 85\u001b[39m\n\u001b[32m     81\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m format_weather(data)\n\u001b[32m     83\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[34m__name__\u001b[39m == \u001b[33m\"\u001b[39m\u001b[33m__main__\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m     84\u001b[39m     \u001b[38;5;66;03m# 以标准 I/O 方式运行 MCP 服务器\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m85\u001b[39m     \u001b[43mmcp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtransport\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mstdio\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32mD:\\LangChain\\Jupyter\\.venv\\Lib\\site-packages\\mcp\\server\\fastmcp\\server.py:217\u001b[39m, in \u001b[36mFastMCP.run\u001b[39m\u001b[34m(self, transport, mount_path)\u001b[39m\n\u001b[32m    215\u001b[39m \u001b[38;5;28;01mmatch\u001b[39;00m transport:\n\u001b[32m    216\u001b[39m     \u001b[38;5;28;01mcase\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mstdio\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m217\u001b[39m         \u001b[43manyio\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mrun_stdio_async\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    218\u001b[39m     \u001b[38;5;28;01mcase\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33msse\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m    219\u001b[39m         anyio.run(\u001b[38;5;28;01mlambda\u001b[39;00m: \u001b[38;5;28mself\u001b[39m.run_sse_async(mount_path))\n",
      "\u001b[36mFile \u001b[39m\u001b[32mD:\\LangChain\\Jupyter\\.venv\\Lib\\site-packages\\anyio\\_core\\_eventloop.py:59\u001b[39m, in \u001b[36mrun\u001b[39m\u001b[34m(func, backend, backend_options, *args)\u001b[39m\n\u001b[32m     57\u001b[39m     \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[32m     58\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m59\u001b[39m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mAlready running \u001b[39m\u001b[38;5;132;01m{\u001b[39;00masynclib_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m in this thread\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m     61\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m     62\u001b[39m     async_backend = get_async_backend(backend)\n",
      "\u001b[31mRuntimeError\u001b[39m: Already running asyncio in this thread"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import httpx\n",
    "from typing import Any\n",
    "from mcp.server.fastmcp import FastMCP\n",
    "\n",
    "# 初始化 MCP 服务器\n",
    "mcp = FastMCP(\"WeatherServer\")\n",
    "\n",
    "# OpenWeather API 配置\n",
    "OPENWEATHER_API_BASE = \"https://api.openweathermap.org/data/2.5/weather\"\n",
    "API_KEY = \"1564bd59e86f981289a646fb2c421a63\"  # 请替换为你自己的 OpenWeather API Key\n",
    "USER_AGENT = \"weather-app/1.0\"\n",
    "\n",
    "async def fetch_weather(city: str) -> dict[str, Any] | None:\n",
    "    \"\"\"\n",
    "    从 OpenWeather API 获取天气信息。\n",
    "    :param city: 城市名称（需使用英文，如 Beijing）\n",
    "    :return: 天气数据字典；若出错返回包含 error 信息的字典\n",
    "    \"\"\"\n",
    "    params = {\n",
    "        \"q\": city,\n",
    "        \"appid\": API_KEY,\n",
    "        \"units\": \"metric\",\n",
    "        \"lang\": \"zh_cn\"\n",
    "    }\n",
    "    headers = {\"User-Agent\": USER_AGENT}\n",
    "\n",
    "    async with httpx.AsyncClient() as client:\n",
    "        try:\n",
    "            response = await client.get(OPENWEATHER_API_BASE, params=params, headers=headers, timeout=30.0)\n",
    "            response.raise_for_status()\n",
    "            return response.json()  # 返回字典类型\n",
    "        except httpx.HTTPStatusError as e:\n",
    "            return {\"error\": f\"HTTP 错误: {e.response.status_code}\"}\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"请求失败: {str(e)}\"}\n",
    "\n",
    "def format_weather(data: dict[str, Any] | str) -> str:\n",
    "    \"\"\"\n",
    "    将天气数据格式化为易读文本。\n",
    "    :param data: 天气数据（可以是字典或 JSON 字符串）\n",
    "    :return: 格式化后的天气信息字符串\n",
    "    \"\"\"\n",
    "    # 如果传入的是字符串，则先转换为字典\n",
    "    if isinstance(data, str):\n",
    "        try:\n",
    "            data = json.loads(data)\n",
    "        except Exception as e:\n",
    "            return f\"无法解析天气数据: {e}\"\n",
    "\n",
    "    # 如果数据中包含错误信息，直接返回错误提示\n",
    "    if \"error\" in data:\n",
    "        return f\"⚠️ {data['error']}\"\n",
    "\n",
    "    # 提取数据时做容错处理\n",
    "    city = data.get(\"name\", \"未知\")\n",
    "    country = data.get(\"sys\", {}).get(\"country\", \"未知\")\n",
    "    temp = data.get(\"main\", {}).get(\"temp\", \"N/A\")\n",
    "    humidity = data.get(\"main\", {}).get(\"humidity\", \"N/A\")\n",
    "    wind_speed = data.get(\"wind\", {}).get(\"speed\", \"N/A\")\n",
    "    # weather 可能为空列表，因此用 [0] 前先提供默认字典\n",
    "    weather_list = data.get(\"weather\", [{}])\n",
    "    description = weather_list[0].get(\"description\", \"未知\")\n",
    "\n",
    "    return (\n",
    "        f\"🌍 {city}, {country}\\n\"\n",
    "        f\"🌡 温度: {temp}°C\\n\"\n",
    "        f\"💧 湿度: {humidity}%\\n\"\n",
    "        f\"🌬 风速: {wind_speed} m/s\\n\"\n",
    "        f\"🌤 天气: {description}\\n\"\n",
    "    )\n",
    "\n",
    "@mcp.tool()\n",
    "async def query_weather(city: str) -> str:\n",
    "    \"\"\"\n",
    "    输入指定城市的英文名称，返回今日天气查询结果。\n",
    "    :param city: 城市名称（需使用英文）\n",
    "    :return: 格式化后的天气信息\n",
    "    \"\"\"\n",
    "    data = await fetch_weather(city)\n",
    "    return format_weather(data)\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    # 以标准 I/O 方式运行 MCP 服务器\n",
    "    mcp.run(transport='stdio')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "25c685c9-b0fd-4eaa-84cc-47728ef4f377",
   "metadata": {},
   "source": [
    "- 创建write_server.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "9eaa7174-2fec-4a2e-96a7-63124168d12f",
   "metadata": {},
   "outputs": [
    {
     "ename": "RuntimeError",
     "evalue": "Already running asyncio in this thread",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mRuntimeError\u001b[39m                              Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 21\u001b[39m\n\u001b[32m     17\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33m已成功写入本地文件。\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m     19\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[34m__name__\u001b[39m == \u001b[33m\"\u001b[39m\u001b[33m__main__\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m     20\u001b[39m     \u001b[38;5;66;03m# 以标准 I/O 方式运行 MCP 服务器\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m21\u001b[39m     \u001b[43mmcp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtransport\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mstdio\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32mD:\\LangChain\\Jupyter\\.venv\\Lib\\site-packages\\mcp\\server\\fastmcp\\server.py:217\u001b[39m, in \u001b[36mFastMCP.run\u001b[39m\u001b[34m(self, transport, mount_path)\u001b[39m\n\u001b[32m    215\u001b[39m \u001b[38;5;28;01mmatch\u001b[39;00m transport:\n\u001b[32m    216\u001b[39m     \u001b[38;5;28;01mcase\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mstdio\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m217\u001b[39m         \u001b[43manyio\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mrun_stdio_async\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    218\u001b[39m     \u001b[38;5;28;01mcase\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33msse\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m    219\u001b[39m         anyio.run(\u001b[38;5;28;01mlambda\u001b[39;00m: \u001b[38;5;28mself\u001b[39m.run_sse_async(mount_path))\n",
      "\u001b[36mFile \u001b[39m\u001b[32mD:\\LangChain\\Jupyter\\.venv\\Lib\\site-packages\\anyio\\_core\\_eventloop.py:59\u001b[39m, in \u001b[36mrun\u001b[39m\u001b[34m(func, backend, backend_options, *args)\u001b[39m\n\u001b[32m     57\u001b[39m     \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[32m     58\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m59\u001b[39m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mAlready running \u001b[39m\u001b[38;5;132;01m{\u001b[39;00masynclib_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m in this thread\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m     61\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m     62\u001b[39m     async_backend = get_async_backend(backend)\n",
      "\u001b[31mRuntimeError\u001b[39m: Already running asyncio in this thread"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import httpx\n",
    "from typing import Any\n",
    "from mcp.server.fastmcp import FastMCP\n",
    "\n",
    "# 初始化 MCP 服务器\n",
    "mcp = FastMCP(\"WriteServer\")\n",
    "USER_AGENT = \"write-app/1.0\"\n",
    "\n",
    "@mcp.tool()\n",
    "async def write_file(content: str) -> str:\n",
    "    \"\"\"\n",
    "    将指定内容写入本地文件。\n",
    "    :param content: 必要参数，字符串类型，用于表示需要写入文档的具体内容。\n",
    "    :return：是否成功写入\n",
    "    \"\"\"\n",
    "    return \"已成功写入本地文件。\"\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    # 以标准 I/O 方式运行 MCP 服务器\n",
    "    mcp.run(transport='stdio')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b95281c1-e021-490c-817b-f96844f1533d",
   "metadata": {},
   "source": [
    "- 天气查询客户端client创建流程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f46b7d27-7aa6-481c-ba23-e70aa6ccb49a",
   "metadata": {},
   "outputs": [],
   "source": [
    "import asyncio\n",
    "import json\n",
    "import logging\n",
    "import os\n",
    "import shutil\n",
    "from contextlib import AsyncExitStack\n",
    "from typing import Any, Dict, List, Optional\n",
    "\n",
    "import httpx\n",
    "from dotenv import load_dotenv\n",
    "from openai import OpenAI  # OpenAI Python SDK\n",
    "from mcp import ClientSession, StdioServerParameters\n",
    "from mcp.client.stdio import stdio_client\n",
    "\n",
    "# Configure logging\n",
    "logging.basicConfig(\n",
    "    level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\"\n",
    ")\n",
    "\n",
    "\n",
    "# =============================\n",
    "# 配置加载类（支持环境变量及配置文件）\n",
    "# =============================\n",
    "class Configuration:\n",
    "    \"\"\"管理 MCP 客户端的环境变量和配置文件\"\"\"\n",
    "\n",
    "    def __init__(self) -> None:\n",
    "        load_dotenv()\n",
    "        # 从环境变量中加载 API key, base_url 和 model\n",
    "        self.api_key = os.getenv(\"LLM_API_KEY\")\n",
    "        self.base_url = os.getenv(\"BASE_URL\")\n",
    "        self.model = os.getenv(\"MODEL\")\n",
    "        if not self.api_key:\n",
    "            raise ValueError(\"❌ 未找到 LLM_API_KEY，请在 .env 文件中配置\")\n",
    "\n",
    "    @staticmethod\n",
    "    def load_config(file_path: str) -> Dict[str, Any]:\n",
    "        \"\"\"\n",
    "        从 JSON 文件加载服务器配置\n",
    "        \n",
    "        Args:\n",
    "            file_path: JSON 配置文件路径\n",
    "        \n",
    "        Returns:\n",
    "            包含服务器配置的字典\n",
    "        \"\"\"\n",
    "        with open(file_path, \"r\") as f:\n",
    "            return json.load(f)\n",
    "\n",
    "\n",
    "# =============================\n",
    "# MCP 服务器客户端类\n",
    "# =============================\n",
    "class Server:\n",
    "    \"\"\"管理单个 MCP 服务器连接和工具调用\"\"\"\n",
    "\n",
    "    def __init__(self, name: str, config: Dict[str, Any]) -> None:\n",
    "        self.name: str = name\n",
    "        self.config: Dict[str, Any] = config\n",
    "        self.session: Optional[ClientSession] = None\n",
    "        self.exit_stack: AsyncExitStack = AsyncExitStack()\n",
    "        self._cleanup_lock = asyncio.Lock()\n",
    "\n",
    "    async def initialize(self) -> None:\n",
    "        \"\"\"初始化与 MCP 服务器的连接\"\"\"\n",
    "        # command 字段直接从配置获取\n",
    "        command = self.config[\"command\"]\n",
    "        if command is None:\n",
    "            raise ValueError(\"command 不能为空\")\n",
    "\n",
    "        server_params = StdioServerParameters(\n",
    "            command=command,\n",
    "            args=self.config[\"args\"],\n",
    "            env={**os.environ, **self.config[\"env\"]} if self.config.get(\"env\") else None,\n",
    "        )\n",
    "        try:\n",
    "            stdio_transport = await self.exit_stack.enter_async_context(\n",
    "                stdio_client(server_params)\n",
    "            )\n",
    "            read_stream, write_stream = stdio_transport\n",
    "            session = await self.exit_stack.enter_async_context(\n",
    "                ClientSession(read_stream, write_stream)\n",
    "            )\n",
    "            await session.initialize()\n",
    "            self.session = session\n",
    "        except Exception as e:\n",
    "            logging.error(f\"Error initializing server {self.name}: {e}\")\n",
    "            await self.cleanup()\n",
    "            raise\n",
    "\n",
    "    async def list_tools(self) -> List[Any]:\n",
    "        \"\"\"获取服务器可用的工具列表\n",
    "\n",
    "        Returns:\n",
    "            工具列表\n",
    "        \"\"\"\n",
    "        if not self.session:\n",
    "            raise RuntimeError(f\"Server {self.name} not initialized\")\n",
    "        tools_response = await self.session.list_tools()\n",
    "        tools = []\n",
    "        for item in tools_response:\n",
    "            if isinstance(item, tuple) and item[0] == \"tools\":\n",
    "                for tool in item[1]:\n",
    "                    tools.append(Tool(tool.name, tool.description, tool.inputSchema))\n",
    "        return tools\n",
    "\n",
    "    async def execute_tool(\n",
    "        self, tool_name: str, arguments: Dict[str, Any], retries: int = 2, delay: float = 1.0\n",
    "    ) -> Any:\n",
    "        \"\"\"执行指定工具，并支持重试机制\n",
    "\n",
    "        Args:\n",
    "            tool_name: 工具名称\n",
    "            arguments: 工具参数\n",
    "            retries: 重试次数\n",
    "            delay: 重试间隔秒数\n",
    "\n",
    "        Returns:\n",
    "            工具调用结果\n",
    "        \"\"\"\n",
    "        if not self.session:\n",
    "            raise RuntimeError(f\"Server {self.name} not initialized\")\n",
    "        attempt = 0\n",
    "        while attempt < retries:\n",
    "            try:\n",
    "                logging.info(f\"Executing {tool_name} on server {self.name}...\")\n",
    "                result = await self.session.call_tool(tool_name, arguments)\n",
    "                return result\n",
    "            except Exception as e:\n",
    "                attempt += 1\n",
    "                logging.warning(\n",
    "                    f\"Error executing tool: {e}. Attempt {attempt} of {retries}.\"\n",
    "                )\n",
    "                if attempt < retries:\n",
    "                    logging.info(f\"Retrying in {delay} seconds...\")\n",
    "                    await asyncio.sleep(delay)\n",
    "                else:\n",
    "                    logging.error(\"Max retries reached. Failing.\")\n",
    "                    raise\n",
    "\n",
    "    async def cleanup(self) -> None:\n",
    "        \"\"\"清理服务器资源\"\"\"\n",
    "        async with self._cleanup_lock:\n",
    "            try:\n",
    "                await self.exit_stack.aclose()\n",
    "                self.session = None\n",
    "            except Exception as e:\n",
    "                logging.error(f\"Error during cleanup of server {self.name}: {e}\")\n",
    "\n",
    "\n",
    "# =============================\n",
    "# 工具封装类\n",
    "# =============================\n",
    "class Tool:\n",
    "    \"\"\"封装 MCP 返回的工具信息\"\"\"\n",
    "\n",
    "    def __init__(self, name: str, description: str, input_schema: Dict[str, Any]) -> None:\n",
    "        self.name: str = name\n",
    "        self.description: str = description\n",
    "        self.input_schema: Dict[str, Any] = input_schema\n",
    "\n",
    "    def format_for_llm(self) -> str:\n",
    "        \"\"\"生成用于 LLM 提示的工具描述\"\"\"\n",
    "        args_desc = []\n",
    "        if \"properties\" in self.input_schema:\n",
    "            for param_name, param_info in self.input_schema[\"properties\"].items():\n",
    "                arg_desc = f\"- {param_name}: {param_info.get('description', 'No description')}\"\n",
    "                if param_name in self.input_schema.get(\"required\", []):\n",
    "                    arg_desc += \" (required)\"\n",
    "                args_desc.append(arg_desc)\n",
    "        return f\"\"\"\n",
    "Tool: {self.name}\n",
    "Description: {self.description}\n",
    "Arguments:\n",
    "{chr(10).join(args_desc)}\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "# =============================\n",
    "# LLM 客户端封装类（使用 OpenAI SDK）\n",
    "# =============================\n",
    "class LLMClient:\n",
    "    \"\"\"使用 OpenAI SDK 与大模型交互\"\"\"\n",
    "\n",
    "    def __init__(self, api_key: str, base_url: Optional[str], model: str) -> None:\n",
    "        self.client = OpenAI(api_key=api_key, base_url=base_url)\n",
    "        self.model = model\n",
    "\n",
    "    def get_response(self, messages: List[Dict[str, Any]], tools: Optional[List[Dict[str, Any]]] = None) -> Any:\n",
    "        \"\"\"\n",
    "        发送消息给大模型 API，支持传入工具参数（function calling 格式）\n",
    "        \"\"\"\n",
    "        payload = {\n",
    "            \"model\": self.model,\n",
    "            \"messages\": messages,\n",
    "            \"tools\": tools,\n",
    "        }\n",
    "        try:\n",
    "            response = self.client.chat.completions.create(**payload)\n",
    "            return response\n",
    "        except Exception as e:\n",
    "            logging.error(f\"Error during LLM call: {e}\")\n",
    "            raise\n",
    "\n",
    "\n",
    "# =============================\n",
    "# 多服务器 MCP 客户端类（集成配置文件、工具格式转换与 OpenAI SDK 调用）\n",
    "# =============================\n",
    "class MultiServerMCPClient:\n",
    "    def __init__(self) -> None:\n",
    "        \"\"\"\n",
    "        管理多个 MCP 服务器，并使用 OpenAI Function Calling 风格的接口调用大模型\n",
    "        \"\"\"\n",
    "        self.exit_stack = AsyncExitStack()\n",
    "        config = Configuration()\n",
    "        self.openai_api_key = config.api_key\n",
    "        self.base_url = config.base_url\n",
    "        self.model = config.model\n",
    "        self.client = LLMClient(self.openai_api_key, self.base_url, self.model)\n",
    "        # (server_name -> Server 对象)\n",
    "        self.servers: Dict[str, Server] = {}\n",
    "        # 各个 server 的工具列表\n",
    "        self.tools_by_server: Dict[str, List[Any]] = {}\n",
    "        self.all_tools: List[Dict[str, Any]] = []\n",
    "\n",
    "    async def connect_to_servers(self, servers_config: Dict[str, Any]) -> None:\n",
    "        \"\"\"\n",
    "        根据配置文件同时启动多个服务器并获取工具\n",
    "        servers_config 的格式为：\n",
    "        {\n",
    "          \"mcpServers\": {\n",
    "              \"sqlite\": { \"command\": \"uvx\", \"args\": [ ... ] },\n",
    "              \"puppeteer\": { \"command\": \"npx\", \"args\": [ ... ] },\n",
    "              ...\n",
    "          }\n",
    "        }\n",
    "        \"\"\"\n",
    "        mcp_servers = servers_config.get(\"mcpServers\", {})\n",
    "        for server_name, srv_config in mcp_servers.items():\n",
    "            server = Server(server_name, srv_config)\n",
    "            await server.initialize()\n",
    "            self.servers[server_name] = server\n",
    "            tools = await server.list_tools()\n",
    "            self.tools_by_server[server_name] = tools\n",
    "\n",
    "            for tool in tools:\n",
    "                # 统一重命名：serverName_toolName\n",
    "                function_name = f\"{server_name}_{tool.name}\"\n",
    "                self.all_tools.append({\n",
    "                    \"type\": \"function\",\n",
    "                    \"function\": {\n",
    "                        \"name\": function_name,\n",
    "                        \"description\": tool.description,\n",
    "                        \"input_schema\": tool.input_schema\n",
    "                    }\n",
    "                })\n",
    "\n",
    "        # 转换为 OpenAI Function Calling 所需格式\n",
    "        self.all_tools = await self.transform_json(self.all_tools)\n",
    "\n",
    "        logging.info(\"\\n✅ 已连接到下列服务器:\")\n",
    "        for name in self.servers:\n",
    "            srv_cfg = mcp_servers[name]\n",
    "            logging.info(f\"  - {name}: command={srv_cfg['command']}, args={srv_cfg['args']}\")\n",
    "        logging.info(\"\\n汇总的工具:\")\n",
    "        for t in self.all_tools:\n",
    "            logging.info(f\"  - {t['function']['name']}\")\n",
    "\n",
    "    async def transform_json(self, json_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n",
    "        \"\"\"\n",
    "        将工具的 input_schema 转换为 OpenAI 所需的 parameters 格式，并删除多余字段\n",
    "        \"\"\"\n",
    "        result = []\n",
    "        for item in json_data:\n",
    "            if not isinstance(item, dict) or \"type\" not in item or \"function\" not in item:\n",
    "                continue\n",
    "            old_func = item[\"function\"]\n",
    "            if not isinstance(old_func, dict) or \"name\" not in old_func or \"description\" not in old_func:\n",
    "                continue\n",
    "            new_func = {\n",
    "                \"name\": old_func[\"name\"],\n",
    "                \"description\": old_func[\"description\"],\n",
    "                \"parameters\": {}\n",
    "            }\n",
    "            if \"input_schema\" in old_func and isinstance(old_func[\"input_schema\"], dict):\n",
    "                old_schema = old_func[\"input_schema\"]\n",
    "                new_func[\"parameters\"][\"type\"] = old_schema.get(\"type\", \"object\")\n",
    "                new_func[\"parameters\"][\"properties\"] = old_schema.get(\"properties\", {})\n",
    "                new_func[\"parameters\"][\"required\"] = old_schema.get(\"required\", [])\n",
    "            new_item = {\n",
    "                \"type\": item[\"type\"],\n",
    "                \"function\": new_func\n",
    "            }\n",
    "            result.append(new_item)\n",
    "        return result\n",
    "\n",
    "    async def chat_base(self, messages: List[Dict[str, Any]]) -> Any:\n",
    "        \"\"\"\n",
    "        使用 OpenAI 接口进行对话，并支持多次工具调用（Function Calling）。\n",
    "        如果返回 finish_reason 为 \"tool_calls\"，则进行工具调用后再发起请求。\n",
    "        \"\"\"\n",
    "        response = self.client.get_response(messages, tools=self.all_tools)\n",
    "        # 如果模型返回工具调用\n",
    "        if response.choices[0].finish_reason == \"tool_calls\":\n",
    "            while True:\n",
    "                messages = await self.create_function_response_messages(messages, response)\n",
    "                response = self.client.get_response(messages, tools=self.all_tools)\n",
    "                if response.choices[0].finish_reason != \"tool_calls\":\n",
    "                    break\n",
    "        return response\n",
    "\n",
    "    async def create_function_response_messages(self, messages: List[Dict[str, Any]], response: Any) -> List[Dict[str, Any]]:\n",
    "        \"\"\"\n",
    "        将模型返回的工具调用解析执行，并将结果追加到消息队列中\n",
    "        \"\"\"\n",
    "        function_call_messages = response.choices[0].message.tool_calls\n",
    "        messages.append(response.choices[0].message.model_dump())\n",
    "        for function_call_message in function_call_messages:\n",
    "            tool_name = function_call_message.function.name\n",
    "            tool_args = json.loads(function_call_message.function.arguments)\n",
    "            # 调用 MCP 工具\n",
    "            function_response = await self._call_mcp_tool(tool_name, tool_args)\n",
    "            # 🔍 打印返回值及其类型\n",
    "            # print(f\"[DEBUG] tool_name: {tool_name}\")\n",
    "            # print(f\"[DEBUG] tool_args: {tool_args}\")\n",
    "            # print(f\"[DEBUG] function_response: {function_response}\")\n",
    "            # print(f\"[DEBUG] type(function_response): {type(function_response)}\")\n",
    "            messages.append({\n",
    "                \"role\": \"tool\",\n",
    "                \"content\": function_response,\n",
    "                \"tool_call_id\": function_call_message.id,\n",
    "            })\n",
    "        return messages\n",
    "\n",
    "    async def process_query(self, user_query: str) -> str:\n",
    "        \"\"\"\n",
    "        OpenAI Function Calling 流程：\n",
    "         1. 发送用户消息 + 工具信息\n",
    "         2. 若模型返回 finish_reason 为 \"tool_calls\"，则解析并调用 MCP 工具\n",
    "         3. 将工具调用结果返回给模型，获得最终回答\n",
    "        \"\"\"\n",
    "        messages = [{\"role\": \"user\", \"content\": user_query}]\n",
    "        response = self.client.get_response(messages, tools=self.all_tools)\n",
    "        content = response.choices[0]\n",
    "        logging.info(content)\n",
    "        if content.finish_reason == \"tool_calls\":\n",
    "            tool_call = content.message.tool_calls[0]\n",
    "            tool_name = tool_call.function.name\n",
    "            tool_args = json.loads(tool_call.function.arguments)\n",
    "            logging.info(f\"\\n[ 调用工具: {tool_name}, 参数: {tool_args} ]\\n\")\n",
    "            result = await self._call_mcp_tool(tool_name, tool_args)\n",
    "            messages.append(content.message.model_dump())\n",
    "            messages.append({\n",
    "                \"role\": \"tool\",\n",
    "                \"content\": result,\n",
    "                \"tool_call_id\": tool_call.id,\n",
    "            })\n",
    "            response = self.client.get_response(messages, tools=self.all_tools)\n",
    "            return response.choices[0].message.content\n",
    "        return content.message.content\n",
    "\n",
    "    async def _call_mcp_tool(self, tool_full_name: str, tool_args: Dict[str, Any]) -> str:\n",
    "        \"\"\"\n",
    "        根据 \"serverName_toolName\" 格式调用相应 MCP 工具\n",
    "        \"\"\"\n",
    "        parts = tool_full_name.split(\"_\", 1)\n",
    "        if len(parts) != 2:\n",
    "            return f\"无效的工具名称: {tool_full_name}\"\n",
    "        server_name, tool_name = parts\n",
    "        server = self.servers.get(server_name)\n",
    "        if not server:\n",
    "            return f\"找不到服务器: {server_name}\"\n",
    "        resp = await server.execute_tool(tool_name, tool_args)\n",
    "        \n",
    "        # 🛠️ 修复点：提取 TextContent 中的文本（或转成字符串）\n",
    "        content = resp.content\n",
    "        if isinstance(content, list):\n",
    "            # 提取所有 TextContent 对象中的 text 字段\n",
    "            texts = [c.text for c in content if hasattr(c, \"text\")]\n",
    "            return \"\\n\".join(texts)\n",
    "        elif isinstance(content, dict) or isinstance(content, list):\n",
    "            # 如果是 dict 或 list，但不是 TextContent 类型\n",
    "            return json.dumps(content, ensure_ascii=False)\n",
    "        elif content is None:\n",
    "            return \"工具执行无输出\"\n",
    "        else:\n",
    "            return str(content)\n",
    "\n",
    "    async def chat_loop(self) -> None:\n",
    "        \"\"\"多服务器 MCP + OpenAI Function Calling 客户端主循环\"\"\"\n",
    "        logging.info(\"\\n🤖 多服务器 MCP + Function Calling 客户端已启动！输入 'quit' 退出。\")\n",
    "        messages: List[Dict[str, Any]] = []\n",
    "        while True:\n",
    "            query = input(\"\\n你: \").strip()\n",
    "            if query.lower() == \"quit\":\n",
    "                break\n",
    "            try:\n",
    "                messages.append({\"role\": \"user\", \"content\": query})\n",
    "                messages = messages[-20:]  # 保持最新 20 条上下文\n",
    "                response = await self.chat_base(messages)\n",
    "                messages.append(response.choices[0].message.model_dump())\n",
    "                result = response.choices[0].message.content\n",
    "                # logging.info(f\"\\nAI: {result}\")\n",
    "                print(f\"\\nAI: {result}\")\n",
    "            except Exception as e:\n",
    "                print(f\"\\n⚠️  调用过程出错: {e}\")\n",
    "\n",
    "    async def cleanup(self) -> None:\n",
    "        \"\"\"关闭所有资源\"\"\"\n",
    "        await self.exit_stack.aclose()\n",
    "\n",
    "\n",
    "# =============================\n",
    "# 主函数\n",
    "# =============================\n",
    "async def main() -> None:\n",
    "    # 从配置文件加载服务器配置\n",
    "    config = Configuration()\n",
    "    servers_config = config.load_config(\"servers_config.json\")\n",
    "    client = MultiServerMCPClient()\n",
    "    try:\n",
    "        await client.connect_to_servers(servers_config)\n",
    "        await client.chat_loop()\n",
    "    finally:\n",
    "        try:\n",
    "            await asyncio.sleep(0.1)\n",
    "            await client.cleanup()\n",
    "        except RuntimeError as e:\n",
    "            # 如果是因为退出 cancel scope 导致的异常，可以选择忽略\n",
    "            if \"Attempted to exit cancel scope\" in str(e):\n",
    "                logging.info(\"退出时检测到 cancel scope 异常，已忽略。\")\n",
    "            else:\n",
    "                raise\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    asyncio.run(main())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d1a7c676-e313-415c-b094-bc6299e52cc0",
   "metadata": {},
   "source": [
    "多服务器 MCP + LangChain Agent 示例\n",
    "---------------------------------\n",
    "1. 读取 .env 中的 LLM_API_KEY / BASE_URL / MODEL\n",
    "2. 读取 servers_config.json 中的 MCP 服务器信息\n",
    "3. 启动 MCP 服务器（支持多个）\n",
    "4. 将所有工具注入 LangChain Agent，由大模型自动选择并调用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ed6fa2e8-6c3d-4647-b860-75d40873587b",
   "metadata": {},
   "outputs": [],
   "source": [
    "import asyncio\n",
    "import json\n",
    "import logging\n",
    "import os\n",
    "from typing import Any, Dict, List\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "from langchain import hub\n",
    "from langchain.agents import AgentExecutor, create_openai_tools_agent\n",
    "from langchain.chat_models import init_chat_model\n",
    "from langchain_mcp_adapters.client import MultiServerMCPClient\n",
    "from langchain_mcp_adapters.tools import load_mcp_tools\n",
    "\n",
    "# ────────────────────────────\n",
    "# 环境配置\n",
    "# ────────────────────────────\n",
    "\n",
    "class Configuration:\n",
    "    \"\"\"读取 .env 与 servers_config.json\"\"\"\n",
    "\n",
    "    def __init__(self) -> None:\n",
    "        load_dotenv()\n",
    "        self.api_key: str = os.getenv(\"LLM_API_KEY\") or \"\"\n",
    "        self.base_url: str | None = os.getenv(\"BASE_URL\")  # DeepSeek 用 https://api.deepseek.com\n",
    "        self.model: str = os.getenv(\"MODEL\") or \"deepseek-chat\"\n",
    "        if not self.api_key:\n",
    "            raise ValueError(\"❌ 未找到 LLM_API_KEY，请在 .env 中配置\")\n",
    "\n",
    "    @staticmethod\n",
    "    def load_servers(file_path: str = \"servers_config.json\") -> Dict[str, Any]:\n",
    "        with open(file_path, \"r\", encoding=\"utf-8\") as f:\n",
    "            return json.load(f).get(\"mcpServers\", {})\n",
    "\n",
    "# ────────────────────────────\n",
    "# 主逻辑\n",
    "# ────────────────────────────\n",
    "async def run_chat_loop() -> None:\n",
    "    \"\"\"启动 MCP-Agent 聊天循环\"\"\"\n",
    "    cfg = Configuration()\n",
    "    os.environ[\"DEEPSEEK_API_KEY\"] = os.getenv(\"LLM_API_KEY\", \"\")\n",
    "    if cfg.base_url:\n",
    "        os.environ[\"DEEPSEEK_API_BASE\"] = cfg.base_url\n",
    "    servers_cfg = Configuration.load_servers()\n",
    "\n",
    "    # 把 key 注入环境，LangChain-OpenAI / DeepSeek 会自动读取\n",
    "    os.environ[\"OPENAI_API_KEY\"] = cfg.api_key\n",
    "    if cfg.base_url:  # 对 DeepSeek 之类的自定义域名很有用\n",
    "        os.environ[\"OPENAI_BASE_URL\"] = cfg.base_url\n",
    "\n",
    "    # 1️⃣ 连接多台 MCP 服务器\n",
    "    mcp_client = MultiServerMCPClient(servers_cfg)\n",
    "\n",
    "    tools = await mcp_client.get_tools()         # LangChain Tool 对象列表\n",
    "\n",
    "    logging.info(f\"✅ 已加载 {len(tools)} 个 MCP 工具： {[t.name for t in tools]}\")\n",
    "\n",
    "    # 2️⃣ 初始化大模型（DeepSeek / OpenAI / 任意兼容 OpenAI 协议的模型）\n",
    "    llm = init_chat_model(\n",
    "        model=cfg.model,\n",
    "        model_provider=\"deepseek\" if \"deepseek\" in cfg.model else \"openai\",\n",
    "    )\n",
    "\n",
    "    # 3️⃣ 构造 LangChain Agent（用通用 prompt）\n",
    "    prompt = hub.pull(\"hwchase17/openai-tools-agent\")\n",
    "    agent = create_openai_tools_agent(llm, tools, prompt)\n",
    "    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\n",
    "\n",
    "    # 4️⃣ CLI 聊天\n",
    "    print(\"\\n🤖 MCP Agent 已启动，输入 'quit' 退出\")\n",
    "    while True:\n",
    "        user_input = input(\"\\n你: \").strip()\n",
    "        if user_input.lower() == \"quit\":\n",
    "            break\n",
    "        try:\n",
    "            result = await agent_executor.ainvoke({\"input\": user_input})\n",
    "            print(f\"\\nAI: {result['output']}\")\n",
    "        except Exception as exc:\n",
    "            print(f\"\\n⚠️  出错: {exc}\")\n",
    "\n",
    "    # 5️⃣ 清理\n",
    "    await mcp_client.cleanup()\n",
    "    print(\"🧹 资源已清理，Bye!\")\n",
    "\n",
    "# ────────────────────────────\n",
    "# 入口\n",
    "# ────────────────────────────\n",
    "if __name__ == \"__main__\":\n",
    "    logging.basicConfig(level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\")\n",
    "    asyncio.run(run_chat_loop())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "69041642-1caf-41ec-b2e3-24888043757c",
   "metadata": {},
   "source": [
    "LangChain接入MCP的核心原理为： weather_server.py → 启动为子进程 → stdio 通信 → MCP 协议 → 转换为 LangChain 工具 → LangChain Agent 执行读写，核心转换过程为：：\n",
    "1. @mcp.tool() → 标准 LangChain Tool\n",
    "2. stdio_client() → 自动处理 read/write 流，其中read 表示从 MCP 服务器读取响应的流，write 表示向 MCP 服务器发送请求的流，对于 stdio weather_server.py，它们就是子进程的 stdout 和 stdin\n",
    "3. load_mcp_tools() → 一键转换所有工具"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
