{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "这是一个完整的代码片段，请接着往下看基础部分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_anthropic import ChatAnthropic\n",
    "from langchain_community.tools.tavily_search import TavilySearchResults\n",
    "from langchain_core.messages import HumanMessage\n",
    "from langgraph.checkpoint.memory import MemorySaver\n",
    "from langgraph.prebuilt import create_react_agent\n",
    "\n",
    "# Create the agent\n",
    "memory = MemorySaver()\n",
    "model = ChatAnthropic(model_name=\"claude-3-sonnet-20240229\")\n",
    "search = TavilySearchResults(max_results=2)\n",
    "tools = [search]\n",
    "agent_executor = create_react_agent(model, tools, checkpointer=memory)\n",
    "\n",
    "# Use the agent\n",
    "config = {\"configurable\": {\"thread_id\": \"abc123\"}}\n",
    "for chunk in agent_executor.stream(\n",
    "    {\"messages\": [HumanMessage(content=\"hi im bob! and i live in sf\")]}, config\n",
    "):\n",
    "    print(chunk)\n",
    "    print(\"----\")\n",
    "\n",
    "for chunk in agent_executor.stream(\n",
    "    {\"messages\": [HumanMessage(content=\"whats the weather where I live?\")]}, config\n",
    "):\n",
    "    print(chunk)\n",
    "    print(\"----\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from dotenv import load_dotenv\n",
    "load_dotenv()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "d:\\conda\\envs\\langchain_rag\\lib\\site-packages\\requests\\__init__.py:86: RequestsDependencyWarning: Unable to find acceptable character detection dependency (chardet or charset_normalizer).\n",
      "  warnings.warn(\n",
      "C:\\Users\\17154\\AppData\\Local\\Temp\\ipykernel_16736\\506345155.py:3: LangChainDeprecationWarning: The class `TavilySearchResults` was deprecated in LangChain 0.3.25 and will be removed in 1.0. An updated version of the class exists in the :class:`~langchain-tavily package and should be used instead. To use it run `pip install -U :class:`~langchain-tavily` and import as `from :class:`~langchain_tavily import TavilySearch``.\n",
      "  search = TavilySearchResults(max_results=2)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[{'title': 'San Francisco weather in September 2025 | Weather25.com', 'url': 'https://www.weather25.com/north-america/usa/california/san-francisco?page=month&month=September', 'content': '| 14 Image 41: Sunny 69°/57° | 15 Image 42: Partly cloudy 68°/55° | 16 Image 43: Mist 62°/55° | 17 Image 44: Mist 66°/53° | 18 Image 45: Thundery outbreaks possible 62°/57° | 19 Image 46: Sunny 68°/59° | 20 Image 47: Sunny 68°/59° |\\n| 21 Image 48: Sunny 71°/59° | 22 Image 49: Sunny 71°/59° | 23 Image 50: Sunny 69°/59° | 24 Image 51: Sunny 68°/59° | 25 Image 52: Patchy light rain 68°/60° | 26 Image 53: Patchy light rain 62°/59° | 27 Image 54: Sunny 64°/57° | [...] Tuesday Sep 16 Image 8: Mist 0 in 62°/55°Wednesday Sep 17 Image 9: Mist 0 in 66°/53°Thursday Sep 18 Image 10: Thundery outbreaks possible 0 in 62°/57°Friday Sep 19 Image 11: Sunny 0 in 68°/59°Saturday Sep 20 Image 12: Sunny 0 in 68°/59°Sunday Sep 21 Image 13: Sunny 0 in 71°/59°Monday Sep 22 Image 14: Sunny 0 in 71°/59°Tuesday Sep 23 Image 15: Sunny 0 in 69°/59°Wednesday Sep 24 Image 16: Sunny 0 in 68°/59°Thursday Sep 25 Image 17: Patchy light rain 0 in 68°/60°Friday Sep 26 Image 18: Patchy [...] | 28 Image 55: Sunny 66°/59° | 29 Image 56: Sunny 73°/60° | 30 Image 57: Sunny 71°/57° |  |  |  |  |', 'score': 0.90330297}, {'title': 'Weather in San Francisco in September 2025', 'url': 'https://world-weather.info/forecast/usa/san_francisco/september-2025/', 'content': \"Weather\\n Archive\\n Weather Widget\\n\\n World\\n United States\\n California\\n Weather in San Francisco\\n\\n# Weather in San Francisco in September 2025\\n\\nSan Francisco Weather Forecast for September 2025 is based on long term prognosis and previous years' statistical data.\\n\\nJanFebMarAprMayJunJulAugSepOctNovDec\\n\\n  \\n\\n## September\\n\\nStart Week On\\n\\n Sun\\n Mon\\n Tue\\n Wed\\n Thu\\n Fri\\n Sat\\n\\n +73°\\n\\n  8.7 mph SW 29.9 inHg87 %06:40 am07:38 pm\\n 2\\n\\n  +72°\\n\\n  +59°\\n\\n  6.7 mph SW 29.8 inHg84 %06:41 am07:37 pm\\n 3\\n\\n  +68°\\n\\n  +61° [...] +70°\\n\\n  +63°\\n\\n  14.5 mph W 29.8 inHg73 %06:55 am07:11 pm\\n 20\\n\\n  +70°\\n\\n  +61°\\n\\n  15.4 mph W 29.9 inHg71 %06:56 am07:09 pm\\n 21\\n\\n  +70°\\n\\n  +61°\\n\\n  5.8 mph W 30 inHg85 %06:57 am07:07 pm\\n 22\\n\\n  +72°\\n\\n  +61°\\n\\n  19 mph SW 29.7 inHg50 %06:57 am07:06 pm\\n 23\\n\\n  +68°\\n\\n  +61°\\n\\n  18.1 mph SW 29.8 inHg74 %06:58 am07:04 pm\\n 24\\n\\n  +72°\\n\\n  +61°\\n\\n  7.6 mph W 30 inHg72 %06:59 am07:03 pm\\n 25\\n\\n  +64°\\n\\n  +61°\\n\\n  3.8 mph W 30 inHg85 %07:00 am07:01 pm\\n 26\\n\\n  +64°\\n\\n  +61° [...] 23.3 mph NW 30 inHg57 %07:01 am07:00 pm\\n 27\\n\\n  +68°\\n\\n  +61°\\n\\n  10.5 mph W 30 inHg47 %07:02 am06:58 pm\\n 28\\n\\n  +75°\\n\\n  +64°\\n\\n  15 mph NE 29.9 inHg31 %07:03 am06:57 pm\\n 29\\n\\n  +70°\\n\\n  +66°\\n\\n  17 mph N 29.6 inHg31 %07:04 am06:55 pm\\n 30\\n\\n  +70°\\n\\n  +64°\\n\\n  6.5 mph SW 29.9 inHg35 %07:04 am06:54 pm\\n\\n  \\n\\n## Extended weather forecast in San Francisco\\n\\nHourlyWeek10-Day14-Day30-DayYear\\n\\n## Weather in large and nearby cities\\n\\nWeather in Washington, D.C.+64°\\n\\nSacramento+70°\\n\\nPleasanton+63°\", 'score': 0.882276}]\n"
     ]
    }
   ],
   "source": [
    "from langchain_community.tools.tavily_search import TavilySearchResults\n",
    "\n",
    "search = TavilySearchResults(max_results=2)\n",
    "search_results = search.invoke(\"what is the weather in SF\")\n",
    "print(search_results)\n",
    "# If we want, we can create other tools.\n",
    "# Once we have all the tools we want, we can put them in a list that we will reference later.\n",
    "tools = [search]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "# model = ChatOpenAI(model=\"gpt-4\")\n",
    "model = ChatOpenAI(\n",
    "  base_url=os.getenv(\"OPENAI_API_BASE_URL\"),  # deepseek的 API 地址\n",
    "  api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
    "  model=\"deepseek-reasoner\",                          # 使用的模型名称\n",
    "  temperature=0.7\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.messages import HumanMessage\n",
    "\n",
    "response = model.invoke([HumanMessage(content=\"hi!\")])\n",
    "response.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_with_tools = model.bind_tools(tools)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "response = model_with_tools.invoke([HumanMessage(content=\"Hi!\")])\n",
    "\n",
    "print(f\"ContentString: {response.content}\")\n",
    "print(f\"ToolCalls: {response.tool_calls}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "response = model_with_tools.invoke([HumanMessage(content=\"What's the weather in SF?\")])\n",
    "\n",
    "print(f\"ContentString: {response.content}\")\n",
    "print(f\"ToolCalls: {response.tool_calls}\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们将使用 LangGraph 来构建代理。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langgraph.prebuilt import create_react_agent\n",
    "\n",
    "agent_executor = create_react_agent(model, tools)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "运行代理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "response = agent_executor.invoke({\"messages\": [HumanMessage(content=\"hi!\")]})\n",
    "\n",
    "response[\"messages\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "response = agent_executor.invoke(\n",
    "  {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]}\n",
    ")\n",
    "response[\"messages\"]"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "流式消息"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 我们已经看到如何使用 .invoke 调用代理以获取最终响应。\n",
    "for chunk in agent_executor.stream(\n",
    "  {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]}\n",
    "):\n",
    "  print(chunk)\n",
    "  print(\"----\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "流式令牌"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "async for event in agent_executor.astream_events(\n",
    "  {\"messages\": [HumanMessage(content=\"whats the weather in sf?\")]}, version=\"v1\"\n",
    "):\n",
    "  kind = event[\"event\"]\n",
    "  if kind == \"on_chain_start\":\n",
    "    if (\n",
    "      event[\"name\"] == \"Agent\"\n",
    "    ):  # Was assigned when creating the agent with `.with_config({\"run_name\": \"Agent\"})`\n",
    "      print(\n",
    "        f\"Starting agent: {event['name']} with input: {event['data'].get('input')}\"\n",
    "      )\n",
    "  elif kind == \"on_chain_end\":\n",
    "      if (\n",
    "          event[\"name\"] == \"Agent\"\n",
    "      ):  # Was assigned when creating the agent with `.with_config({\"run_name\": \"Agent\"})`\n",
    "          print()\n",
    "          print(\"--\")\n",
    "          print(\n",
    "              f\"Done agent: {event['name']} with output: {event['data'].get('output')['output']}\"\n",
    "          )\n",
    "  if kind == \"on_chat_model_stream\":\n",
    "      content = event[\"data\"][\"chunk\"].content\n",
    "      if content:\n",
    "          # Empty content in the context of OpenAI means\n",
    "          # that the model is asking for a tool to be invoked.\n",
    "          # So we only print non-empty content\n",
    "          print(content, end=\"|\")\n",
    "  elif kind == \"on_tool_start\":\n",
    "      print(\"--\")\n",
    "      print(\n",
    "          f\"Starting tool: {event['name']} with inputs: {event['data'].get('input')}\"\n",
    "      )\n",
    "  elif kind == \"on_tool_end\":\n",
    "      print(f\"Done tool: {event['name']}\")\n",
    "      print(f\"Tool output was: {event['data'].get('output')}\")\n",
    "      print(\"--\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "添加内存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langgraph.checkpoint.memory import MemorySaver\n",
    "\n",
    "memory = MemorySaver()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "agent_executor = create_react_agent(model, tools, checkpointer=memory)\n",
    "\n",
    "config = {\"configurable\": {\"thread_id\": \"abc123\"}}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for chunk in agent_executor.stream(\n",
    "  {\"messages\": [HumanMessage(content=\"hi im bob!\")]}, config\n",
    "):\n",
    "  print(chunk)\n",
    "  print(\"----\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for chunk in agent_executor.stream(\n",
    "  {\"messages\": [HumanMessage(content=\"whats my name?\")]}, config\n",
    "):\n",
    "  print(chunk)\n",
    "  print(\"----\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如果我想开始一个新的对话，我所要做的就是更改使用的 thread_id\n",
    "\n",
    "config = {\"configurable\": {\"thread_id\": \"xyz123\"}}\n",
    "for chunk in agent_executor.stream(\n",
    "  {\"messages\": [HumanMessage(content=\"whats my name?\")]}, config\n",
    "):\n",
    "  print(chunk)\n",
    "  print(\"----\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "到此为止！在这个快速入门中，我们介绍了如何创建一个简单的代理。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "langchain_rag",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "5b5a265cbb1216af63f1f72a862a66e83add8614363640086a5983a352821fe5"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
