{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-06-15T14:37:42.072674Z",
     "start_time": "2024-06-15T14:37:22.827367Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple/\r\n",
      "Requirement already satisfied: openai in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (0.28.1)\r\n",
      "Requirement already satisfied: requests>=2.20 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from openai) (2.31.0)\r\n",
      "Requirement already satisfied: tqdm in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from openai) (4.66.2)\r\n",
      "Requirement already satisfied: aiohttp in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from openai) (3.9.5)\r\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.20->openai) (3.3.2)\r\n",
      "Requirement already satisfied: idna<4,>=2.5 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.20->openai) (3.7)\r\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.20->openai) (2.2.1)\r\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.20->openai) (2024.2.2)\r\n",
      "Requirement already satisfied: aiosignal>=1.1.2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp->openai) (1.3.1)\r\n",
      "Requirement already satisfied: attrs>=17.3.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp->openai) (23.2.0)\r\n",
      "Requirement already satisfied: frozenlist>=1.1.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp->openai) (1.4.1)\r\n",
      "Requirement already satisfied: multidict<7.0,>=4.5 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp->openai) (6.0.5)\r\n",
      "Requirement already satisfied: yarl<2.0,>=1.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp->openai) (1.9.4)\r\n",
      "Note: you may need to restart the kernel to use updated packages.\n",
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple/\r\n",
      "Requirement already satisfied: langchain in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (0.0.330)\r\n",
      "Requirement already satisfied: PyYAML>=5.3 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (6.0.1)\r\n",
      "Requirement already satisfied: SQLAlchemy<3,>=1.4 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (2.0.30)\r\n",
      "Requirement already satisfied: aiohttp<4.0.0,>=3.8.3 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (3.9.5)\r\n",
      "Requirement already satisfied: anyio<4.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (3.7.1)\r\n",
      "Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (0.6.6)\r\n",
      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (1.33)\r\n",
      "Requirement already satisfied: langsmith<0.1.0,>=0.0.52 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (0.0.92)\r\n",
      "Requirement already satisfied: numpy<2,>=1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (1.26.4)\r\n",
      "Requirement already satisfied: pydantic<3,>=1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (2.7.1)\r\n",
      "Requirement already satisfied: requests<3,>=2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (2.31.0)\r\n",
      "Requirement already satisfied: tenacity<9.0.0,>=8.1.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain) (8.3.0)\r\n",
      "Requirement already satisfied: aiosignal>=1.1.2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.3.1)\r\n",
      "Requirement already satisfied: attrs>=17.3.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (23.2.0)\r\n",
      "Requirement already satisfied: frozenlist>=1.1.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.4.1)\r\n",
      "Requirement already satisfied: multidict<7.0,>=4.5 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (6.0.5)\r\n",
      "Requirement already satisfied: yarl<2.0,>=1.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain) (1.9.4)\r\n",
      "Requirement already satisfied: idna>=2.8 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from anyio<4.0->langchain) (3.7)\r\n",
      "Requirement already satisfied: sniffio>=1.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from anyio<4.0->langchain) (1.3.1)\r\n",
      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain) (3.21.2)\r\n",
      "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain) (0.9.0)\r\n",
      "Requirement already satisfied: jsonpointer>=1.9 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain) (2.4)\r\n",
      "Requirement already satisfied: annotated-types>=0.4.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from pydantic<3,>=1->langchain) (0.6.0)\r\n",
      "Requirement already satisfied: pydantic-core==2.18.2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from pydantic<3,>=1->langchain) (2.18.2)\r\n",
      "Requirement already satisfied: typing-extensions>=4.6.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from pydantic<3,>=1->langchain) (4.11.0)\r\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests<3,>=2->langchain) (3.3.2)\r\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests<3,>=2->langchain) (2.2.1)\r\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests<3,>=2->langchain) (2024.2.2)\r\n",
      "Requirement already satisfied: packaging>=17.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from marshmallow<4.0.0,>=3.18.0->dataclasses-json<0.7,>=0.5.7->langchain) (23.2)\r\n",
      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain) (1.0.0)\r\n",
      "Note: you may need to restart the kernel to use updated packages.\n",
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple/\r\n",
      "Requirement already satisfied: tiktoken in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (0.7.0)\r\n",
      "Requirement already satisfied: regex>=2022.1.18 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from tiktoken) (2024.4.16)\r\n",
      "Requirement already satisfied: requests>=2.26.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from tiktoken) (2.31.0)\r\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (3.3.2)\r\n",
      "Requirement already satisfied: idna<4,>=2.5 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (3.7)\r\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (2.2.1)\r\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (2024.2.2)\r\n",
      "Note: you may need to restart the kernel to use updated packages.\n",
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple/\r\n",
      "Requirement already satisfied: faiss-cpu in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (1.8.0)\r\n",
      "Requirement already satisfied: numpy in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from faiss-cpu) (1.26.4)\r\n",
      "Note: you may need to restart the kernel to use updated packages.\n",
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple/\r\n",
      "Collecting langchain_experimental\r\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/61/9b/1bc4df69a1f92e3e96020d22c2a0850e7df566a6770bb365255f5355cd26/langchain_experimental-0.0.61-py3-none-any.whl (202 kB)\r\n",
      "\u001B[2K     \u001B[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001B[0m \u001B[32m202.5/202.5 kB\u001B[0m \u001B[31m1.0 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0ma \u001B[36m0:00:01\u001B[0m\r\n",
      "\u001B[?25hCollecting langchain-community<0.3.0,>=0.2.5 (from langchain_experimental)\r\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/d2/27/9c310c60c572b69a8eeb27f828b0df097834062862f541128b02b87df8f0/langchain_community-0.2.5-py3-none-any.whl (2.2 MB)\r\n",
      "\u001B[2K     \u001B[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001B[0m \u001B[32m2.2/2.2 MB\u001B[0m \u001B[31m1.0 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m00:01\u001B[0m00:01\u001B[0m0m\r\n",
      "\u001B[?25hCollecting langchain-core<0.3.0,>=0.2.7 (from langchain_experimental)\r\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/62/e7/122e57ff1aa6adc3710ae4a4b8c545944165bcf822a5df93710b2e064426/langchain_core-0.2.7-py3-none-any.whl (315 kB)\r\n",
      "\u001B[2K     \u001B[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001B[0m \u001B[32m315.6/315.6 kB\u001B[0m \u001B[31m929.8 kB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m00:01\u001B[0m00:01\u001B[0m\r\n",
      "\u001B[?25hRequirement already satisfied: PyYAML>=5.3 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.5->langchain_experimental) (6.0.1)\r\n",
      "Requirement already satisfied: SQLAlchemy<3,>=1.4 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.5->langchain_experimental) (2.0.30)\r\n",
      "Requirement already satisfied: aiohttp<4.0.0,>=3.8.3 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.5->langchain_experimental) (3.9.5)\r\n",
      "Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.5->langchain_experimental) (0.6.6)\r\n",
      "Collecting langchain<0.3.0,>=0.2.5 (from langchain-community<0.3.0,>=0.2.5->langchain_experimental)\r\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/4c/e3/38d4c5969d91e8e4f2ada469d82322d8b9f5d218613c4a18c11be2bda648/langchain-0.2.5-py3-none-any.whl (974 kB)\r\n",
      "\u001B[2K     \u001B[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001B[0m \u001B[32m974.6/974.6 kB\u001B[0m \u001B[31m1.1 MB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0m00:01\u001B[0m00:01\u001B[0m\r\n",
      "\u001B[?25hCollecting langsmith<0.2.0,>=0.1.0 (from langchain-community<0.3.0,>=0.2.5->langchain_experimental)\r\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/88/9c/6944b7c8f37d690eb98f887e8c58bf67d7971a5b891fccab2cee7d4fc776/langsmith-0.1.77-py3-none-any.whl (125 kB)\r\n",
      "\u001B[2K     \u001B[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001B[0m \u001B[32m125.2/125.2 kB\u001B[0m \u001B[31m758.2 kB/s\u001B[0m eta \u001B[36m0:00:00\u001B[0ma \u001B[36m0:00:01\u001B[0m\r\n",
      "\u001B[?25hRequirement already satisfied: numpy<2.0.0,>=1.26.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.5->langchain_experimental) (1.26.4)\r\n",
      "Requirement already satisfied: requests<3,>=2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.5->langchain_experimental) (2.31.0)\r\n",
      "Requirement already satisfied: tenacity<9.0.0,>=8.1.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-community<0.3.0,>=0.2.5->langchain_experimental) (8.3.0)\r\n",
      "Requirement already satisfied: jsonpatch<2.0,>=1.33 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.7->langchain_experimental) (1.33)\r\n",
      "Requirement already satisfied: packaging<25,>=23.2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.7->langchain_experimental) (23.2)\r\n",
      "Requirement already satisfied: pydantic<3,>=1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langchain-core<0.3.0,>=0.2.7->langchain_experimental) (2.7.1)\r\n",
      "Requirement already satisfied: aiosignal>=1.1.2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (1.3.1)\r\n",
      "Requirement already satisfied: attrs>=17.3.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (23.2.0)\r\n",
      "Requirement already satisfied: frozenlist>=1.1.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (1.4.1)\r\n",
      "Requirement already satisfied: multidict<7.0,>=4.5 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (6.0.5)\r\n",
      "Requirement already satisfied: yarl<2.0,>=1.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.8.3->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (1.9.4)\r\n",
      "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (3.21.2)\r\n",
      "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (0.9.0)\r\n",
      "Requirement already satisfied: jsonpointer>=1.9 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<0.3.0,>=0.2.7->langchain_experimental) (2.4)\r\n",
      "Collecting langchain-text-splitters<0.3.0,>=0.2.0 (from langchain<0.3.0,>=0.2.5->langchain-community<0.3.0,>=0.2.5->langchain_experimental)\r\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/a9/d9/31b1b5415be5201ec1ba34ab04f47a92c69174d7817d70b51693fb60e780/langchain_text_splitters-0.2.1-py3-none-any.whl (23 kB)\r\n",
      "Requirement already satisfied: orjson<4.0.0,>=3.9.14 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from langsmith<0.2.0,>=0.1.0->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (3.10.3)\r\n",
      "Requirement already satisfied: annotated-types>=0.4.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from pydantic<3,>=1->langchain-core<0.3.0,>=0.2.7->langchain_experimental) (0.6.0)\r\n",
      "Requirement already satisfied: pydantic-core==2.18.2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from pydantic<3,>=1->langchain-core<0.3.0,>=0.2.7->langchain_experimental) (2.18.2)\r\n",
      "Requirement already satisfied: typing-extensions>=4.6.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from pydantic<3,>=1->langchain-core<0.3.0,>=0.2.7->langchain_experimental) (4.11.0)\r\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests<3,>=2->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (3.3.2)\r\n",
      "Requirement already satisfied: idna<4,>=2.5 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests<3,>=2->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (3.7)\r\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests<3,>=2->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (2.2.1)\r\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests<3,>=2->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (2024.2.2)\r\n",
      "Requirement already satisfied: mypy-extensions>=0.3.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community<0.3.0,>=0.2.5->langchain_experimental) (1.0.0)\r\n",
      "Installing collected packages: langsmith, langchain-core, langchain-text-splitters, langchain, langchain-community, langchain_experimental\r\n",
      "  Attempting uninstall: langsmith\r\n",
      "    Found existing installation: langsmith 0.0.92\r\n",
      "    Uninstalling langsmith-0.0.92:\r\n",
      "      Successfully uninstalled langsmith-0.0.92\r\n",
      "  Attempting uninstall: langchain-core\r\n",
      "    Found existing installation: langchain-core 0.0.13\r\n",
      "    Uninstalling langchain-core-0.0.13:\r\n",
      "      Successfully uninstalled langchain-core-0.0.13\r\n",
      "  Attempting uninstall: langchain-text-splitters\r\n",
      "    Found existing installation: langchain-text-splitters 0.0.1\r\n",
      "    Uninstalling langchain-text-splitters-0.0.1:\r\n",
      "      Successfully uninstalled langchain-text-splitters-0.0.1\r\n",
      "  Attempting uninstall: langchain\r\n",
      "    Found existing installation: langchain 0.0.330\r\n",
      "    Uninstalling langchain-0.0.330:\r\n",
      "      Successfully uninstalled langchain-0.0.330\r\n",
      "  Attempting uninstall: langchain-community\r\n",
      "    Found existing installation: langchain-community 0.0.38\r\n",
      "    Uninstalling langchain-community-0.0.38:\r\n",
      "      Successfully uninstalled langchain-community-0.0.38\r\n",
      "Successfully installed langchain-0.2.5 langchain-community-0.2.5 langchain-core-0.2.7 langchain-text-splitters-0.2.1 langchain_experimental-0.0.61 langsmith-0.1.77\r\n",
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "%pip install openai\n",
    "%pip install langchain\n",
    "%pip install tiktoken\n",
    "%pip install faiss-cpu\n",
    "%pip install langchain_experimental"
   ]
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "import os\n",
    "import getpass\n",
    "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"输入openAi-key\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-15T14:52:46.311890Z",
     "start_time": "2024-06-15T14:52:33.807264Z"
    }
   },
   "id": "f51b45c1eb38e547",
   "execution_count": 5
  },
  {
   "cell_type": "markdown",
   "source": [
    "# prompt tenplate"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "ed0dd966212196a8"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "组合后的用户请求: \n",
      "你作为工作5-10年的程序员回答如下问题。一句话概括。\n",
      "我想学习python编程，需要哪几个步骤?\n",
      "大语言模型的回应: content='1. 了解Python的基本语法和特性。\\n2. 练习编写简单的Python程序。\\n3. 深入学习Python的高级特性和库。\\n4. 参与实际项目并不断提升编程技能。'\n"
     ]
    }
   ],
   "source": [
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "\n",
    "llms = ChatOpenAI(model = \"gpt-3.5-turbo\")\n",
    "#下面的{Coding}作为替换符\n",
    "#加入了Instruction，让LLM知道提问的背景。让它知道调用旅游方面的能力。\n",
    "template = \"\"\"\n",
    "你作为工作5-10年的程序员回答如下问题。一句话概括。\n",
    "我想学习{Coding}编程，需要哪几个步骤?\n",
    "\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    input_variables = [\"Coding\"],\n",
    "    template = template\n",
    ")\n",
    "\n",
    "final_prompt = prompt.format(Coding=\"python\")\n",
    "\n",
    "print (f\"组合后的用户请求: {final_prompt}\")\n",
    "\n",
    "print (f\"大语言模型的回应: {llms.invoke(final_prompt)}\")\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-02T14:15:22.827403Z",
     "start_time": "2024-06-02T14:15:20.085765Z"
    }
   },
   "id": "16a3b94d9858f1e",
   "execution_count": 10
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.ChatMessagePromptTemplate 通过Role实现精准回应\n",
    "LangChain提供了不同类型的MessagePromptTemplate。\n",
    "\n",
    "HumanMessagePromptTemplate：人类消息\n",
    "\n",
    "SystemMessagePromptTemplate：系统消息（Instruction，Conexti）\n",
    "\n",
    "AIMessagePromptTemplate：AI消息\n",
    "\n",
    "可以使用ChatMessagePromptTemplate，它允许用户指定角色名称。\n",
    "ChatMessagePromptTemplate 通过role + 模版 的方式生成prompt\n"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "fabea00307301673"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "content='描写人物时可以从外貌特征、性格特点、行为举止等方面入手。以下是一些描写人物的方法：\\n\\n1. 外貌特征：可以描述人物的身高、体型、肤色、头发颜色、眼睛颜色等外貌特征。比如，可以说一个人物有一头乌黑的长发，皮肤呈现出健康的古铜色。\\n\\n2. 性格特点：可以描述人物的性格特点，比如善良、勇敢、聪明、憨厚等。可以通过人物的言行举止来展现其性格特点。比如，可以描述一个人物总是乐于助人，乐观向上。\\n\\n3. 行为举止：可以描述人物的行为举止，比如动作、表情、语言等。可以通过人物的行为来展现其性格特点。比如，可以描述一个人物总是微笑着和他人交流，充满活力。\\n\\n在描写人物时，可以结合以上几个方面进行综合描写，让人物形象更加生动丰富。希望以上内容对你有所帮助！'\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import ChatMessagePromptTemplate\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "# llm = ChatOpenAI(model_name=\"gpt-4-turbo\")\n",
    "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
    "prompt = \"我想知道如何描写{subject}\"\n",
    "\n",
    "chat_message_prompt = ChatMessagePromptTemplate.from_template(role=\"小学生\", template=prompt)\n",
    "formatted_prompt =chat_message_prompt.format(subject=\"人物\")\n",
    "prompts = str(formatted_prompt)\n",
    "# 将格式化的提示传递给LLM模型并获取回应\n",
    "response = llm.invoke(prompts)\n",
    "\n",
    "# 打印LLM模型的回应\n",
    "print(response)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-02T14:41:12.455859Z",
     "start_time": "2024-06-02T14:41:06.894281Z"
    }
   },
   "id": "7efef3c19e6ebce0",
   "execution_count": 35
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.通过MessagesPlaceholder 实现多类型Message合作\n",
    "LangChain中的\"MessagesPlaceholder\"功能，的应用场景：\n",
    "\n",
    "例如，一个聊天系统，用户和AI进行了多轮的对话，对很多输入的内容进行了分析。\n",
    "\n",
    "在这种情况下，需要对上面的对话进行总结。就需要使用\"MessagesPlaceholder\"讲上面人类输入和AI相应的信息作为模版放入到其中，并且限制总结的字数。"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5fa84ac27ae4e0d8"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "generations=[[ChatGeneration(text='阅读广泛，多练习，精编辑校对。', generation_info={'finish_reason': 'stop'}, message=AIMessage(content='阅读广泛，多练习，精编辑校对。'))]] llm_output={'token_usage': {'prompt_tokens': 308, 'completion_tokens': 20, 'total_tokens': 328}, 'model_name': 'gpt-4-turbo'} run=[RunInfo(run_id=UUID('1ebf1d85-f89d-431c-a437-74a566961802'))]\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import MessagesPlaceholder\n",
    "from langchain.prompts import HumanMessagePromptTemplate\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.schema.messages import HumanMessage,AIMessage\n",
    "\n",
    "# 创建语音模型\n",
    "llm = ChatOpenAI(model_name=\"gpt-4-turbo\")\n",
    "# llm = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
    "# 人类消息模版\n",
    "human_prompt = \"使用{word_count}个字总结一下上面的对话.\"\n",
    "human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)\n",
    "\n",
    "#创建聊天模版\n",
    "#定义变量conversation，这个是用来存放多个消息模版的\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages([MessagesPlaceholder(variable_name=\"conversation\"),human_message_template])\n",
    "\n",
    "# 定义对话消息\n",
    "human_message = HumanMessage(content=\"如何写好小学作文\")\n",
    "\n",
    "ai_message = AIMessage(content=\n",
    "    \"\"\"\\\n",
    "    1. 阅读广泛：阅读是提升写作技巧的关键。阅读各种文学作品、新闻报道和其他优秀的写作作品，可以扩展你的词汇量和写作风格，并为你提供灵感和观点.\n",
    "    2. 练习写作：写作就像是一项技能，需要不断的练习。坚持写作，并接受反馈和指导，以不断改进你的表达和结构。多样化你的写作练习，包括叙事、说明和议论等不同类型的作文.\n",
    "    3. 重视编辑和校对：写作并不仅仅是把思绪表达出来，而是经过多次修改和润色才能达到最佳效果。花时间编辑和校对你的作文，确保语法准确、句子流畅，同时注意段落结构和逻辑连贯\\\n",
    "    \"\"\"\n",
    ")\n",
    "\n",
    "final_prompt = chat_prompt_template.format_prompt(conversation=[human_message,ai_message],word_count=\"10\")\n",
    "\n",
    "# prompts = [str(formatted_prompt)]\n",
    "# prompts = [str(final_prompt)]\n",
    "\n",
    "response = llm.generate_prompt([final_prompt])\n",
    "\n",
    "print(response)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-02T15:52:12.956028Z",
     "start_time": "2024-06-02T15:52:11.007803Z"
    }
   },
   "id": "f11684749919309f",
   "execution_count": 56
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 4 Partial prompt templates 模版的部分加载\n",
    "LangChain支持两种方式进行部分填充：\n",
    "字符串值进行部分格式化。 返回字符串值的函数进行部分格式化。\n",
    "日期的函数就是函数部分格式化"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "f2487ba52fd1d30e"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 4.1 字符串方式\n"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "ae0eeedb6b647558"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "generations=[[ChatGeneration(text='\\n\\n那很棒！打球是一种很好的运动方式。祝愿他玩得开心！', generation_info={'finish_reason': 'stop'}, message=AIMessage(content='\\n\\n那很棒！打球是一种很好的运动方式。祝愿他玩得开心！'))]] llm_output={'token_usage': {'prompt_tokens': 18, 'completion_tokens': 32, 'total_tokens': 50}, 'model_name': 'gpt-3.5-turbo'} run=[RunInfo(run_id=UUID('756f22bd-b441-4df3-899e-3ee78275cfaa'))]\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
    "# 定义模板\n",
    "template = \"\"\"\n",
    "    {name} 正在 {action}\n",
    "\"\"\"\n",
    "# 创建一个PromptTemplate\n",
    "prompt = PromptTemplate(\n",
    "    input_variables = [\"name\",\"action\"],\n",
    "    template = template\n",
    ")\n",
    "# 使用partial方法，预先填充\"name\"变量为\"xin\"\n",
    "partial_prompt = prompt.partial(name = \"xin\")\n",
    "\n",
    "final_prompt = partial_prompt.format_prompt(action = \"打球\")\n",
    "\n",
    "response = llm.generate_prompt([final_prompt])\n",
    "print(response)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-04T15:56:13.380835Z",
     "start_time": "2024-06-04T15:56:11.774787Z"
    }
   },
   "id": "f314848de733f840",
   "execution_count": 10
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 4.2 函数方式"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8f71a73cffd29213"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "generations=[[ChatGeneration(text='对不起，我无法提供关于未来特定时间点的笑话，因为我无法预测未来。但我可以给你讲一个普通的笑话：\\n\\n为什么电脑很不安？\\n因为它有太多的内存，却总是在处理过去的事！', generation_info={'finish_reason': 'stop'}, message=AIMessage(content='对不起，我无法提供关于未来特定时间点的笑话，因为我无法预测未来。但我可以给你讲一个普通的笑话：\\n\\n为什么电脑很不安？\\n因为它有太多的内存，却总是在处理过去的事！'))]] llm_output={'token_usage': {'prompt_tokens': 38, 'completion_tokens': 83, 'total_tokens': 121}, 'model_name': 'gpt-4-turbo'} run=[RunInfo(run_id=UUID('7d00f142-2837-4891-bf61-2b0d986e1e95'))]\n"
     ]
    }
   ],
   "source": [
    "from datetime import datetime\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "def _get_time():\n",
    "    now = datetime.now()\n",
    "    return now.strftime(\"%Y年%m月%d日 %H点%M分%S秒\")\n",
    "\n",
    "llm = ChatOpenAI(model_name=\"gpt-4-turbo\")\n",
    "\n",
    "template = \"\"\"\n",
    "  给我讲一个关于{date}的{adjective}笑话\n",
    "\"\"\"\n",
    "\n",
    "prompt_template = PromptTemplate(\n",
    "    input_variables = [\"date\",\"adjective\"],\n",
    "    template = template\n",
    ")\n",
    "\n",
    "partial_prompt = prompt_template.partial(date=_get_time())\n",
    "\n",
    "final_prompt = partial_prompt.format_prompt(adjective=\"有趣\")\n",
    "\n",
    "response = llm.generate_prompt([final_prompt])\n",
    "\n",
    "print(response)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-04T16:09:25.240752Z",
     "start_time": "2024-06-04T16:09:21.167937Z"
    }
   },
   "id": "47e2c1aeba34e6b8",
   "execution_count": 16
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 5. 组合Prompt template"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "9be3ea2b3a124baa"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "content='Instagram'\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.prompts import PipelinePromptTemplate\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(model_name=\"gpt-4-turbo\")\n",
    "\n",
    "full_template = \"\"\"\n",
    "   {introduction}\n",
    "   \n",
    "   {example}\n",
    "   \n",
    "   {start}\n",
    "\"\"\"\n",
    "full_prompt = PromptTemplate.from_template(full_template)\n",
    "\n",
    "# 定义\"introduction\"部分的提示模板\n",
    "introduction_template = \"\"\"\n",
    "    你正在模仿{person}。\n",
    "\"\"\"\n",
    "introduction_prompt = PromptTemplate.from_template(introduction_template)\n",
    "\n",
    "# 定义“example”的提示模板\n",
    "example_template = \"\"\"\n",
    "    这是一个互动的例子:\n",
    "\n",
    "问: {example_q}\n",
    "答: {example_a}\n",
    "\"\"\"\n",
    "example_prompt = PromptTemplate.from_template(example_template)\n",
    "\n",
    "#定义start的提示模板\n",
    "start_template = \"\"\"\n",
    "现在，开始真正的互动！\n",
    "\n",
    "问: {input}\n",
    "答:\n",
    "\"\"\"\n",
    "start_prompt = PromptTemplate.from_template(start_template)\n",
    "\n",
    "# 将所有模板组合到一起\n",
    "input_template = [\n",
    "    (\"introduction\", introduction_prompt),\n",
    "    (\"example\", example_prompt),\n",
    "    (\"start\", start_prompt)\n",
    "]\n",
    "\n",
    "pipeline_template = PipelinePromptTemplate(final_prompt=full_prompt\n",
    "                                           , pipeline_prompts=input_template)\n",
    "\n",
    "pipline_prompt = pipeline_template.format(\n",
    "    person=\"小马哥\",\n",
    "    example_q=\"你最喜欢的车是什么？\",\n",
    "    example_a=\"特斯拉\",\n",
    "    input=\"你最喜欢的社交媒体网站是什么？\"\n",
    ")\n",
    "\n",
    "response =  llm.invoke(pipline_prompt)\n",
    "\n",
    "print(response)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-10T10:41:47.058256Z",
     "start_time": "2024-06-10T10:41:43.029608Z"
    }
   },
   "id": "85d631bbaba002c5",
   "execution_count": 13
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 6.Prompt template 序列化\n",
    "\n",
    "支持JSON和YAML两种格式。\n",
    "\n",
    "支持在一个文件中指定所有内容，或者将不同的组件（模板、示例等）存储在不同的文件中并进行引用。\n",
    "\n",
    "提供了一个加载提示的单一入口点"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "33b5a5bb98d62f8"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:216: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n",
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:811: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "generations=[[Generation(text='输出: 无聊\\n\\n')]] llm_output={'token_usage': <OpenAIObject at 0x117daceb0> JSON: {\n",
      "  \"prompt_tokens\": 54,\n",
      "  \"completion_tokens\": 7,\n",
      "  \"total_tokens\": 61\n",
      "}, 'model_name': 'gpt-4-turbo'} run=[RunInfo(run_id=UUID('e503f8ce-e48f-4a5e-95f2-fcbe1d07eb06'))]\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import load_prompt\n",
    "from langchain.llms import OpenAI\n",
    "\n",
    "llm = OpenAI(model_name=\"gpt-4-turbo\")\n",
    "\n",
    "json_template = load_prompt(\"few_shot_prompt_examples_in.json\")\n",
    "\n",
    "response = llm.generate([json_template.format(adjective = \"有趣\")])\n",
    "\n",
    "print(response)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-10T13:03:57.650518Z",
     "start_time": "2024-06-10T13:03:55.639323Z"
    }
   },
   "id": "72a56122a8d10da",
   "execution_count": 15
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 7.Example Selectors"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "fc620e9760ba2017"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 7.1Select by length（根据Prompt 控制示例长度）"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1b686041775b9e2e"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "给出每个输入的反义词\n",
      "\n",
      "输入: 开心\n",
      "输出: 难过\n",
      "\n",
      "输入: 高\n",
      "输出: 矮\n",
      "\n",
      "输入: 精力充沛\n",
      "输出: 无精打采\n",
      "\n",
      "输入: 阳光明媚\n",
      "输出: 阴沉\n",
      "\n",
      "输入: 风大\n",
      "输出: 风平浪静\n",
      "\n",
      "输入: 热情\n",
      "输出:\n",
      "测试例子\n",
      "content='冷漠'\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.prompts import FewShotPromptTemplate\n",
    "from langchain.prompts.example_selector import LengthBasedExampleSelector\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(model_name=\"gpt-4-turbo\")\n",
    "#例子\n",
    "examples =  [\n",
    "    {\"input\": \"开心\", \"output\": \"难过\"},\n",
    "    {\"input\": \"高\", \"output\": \"矮\"},\n",
    "    {\"input\": \"精力充沛\", \"output\": \"无精打采\"},\n",
    "    {\"input\": \"阳光明媚\", \"output\": \"阴沉\"},\n",
    "    {\"input\": \"风大\", \"output\": \"风平浪静\"},\n",
    "]\n",
    "\n",
    "example_prompt = PromptTemplate(\n",
    "    input_variables = [\"input\",\"output\"],\n",
    "    template=\"输入: {input}\\n输出: {output}\"\n",
    ")\n",
    "\n",
    "selector = LengthBasedExampleSelector(\n",
    "     # 这些是它可以选择的例子。\n",
    "    examples = examples,\n",
    "    # 这是用来格式化例子的PromptTemplate。\n",
    "    example_prompt = example_prompt,\n",
    "     # 这是格式化的例子应该有的最大长度。\n",
    "    # 长度是通过下面的get_text_length函数来测量的。\n",
    "    max_length = 30\n",
    ")\n",
    "\n",
    "dynamic_prompt = FewShotPromptTemplate(\n",
    "     # 我们提供一个ExampleSelector，而不是例子。\n",
    "    #限定了长度的示例选择器\n",
    "    example_selector = selector,\n",
    "    example_prompt = example_prompt,\n",
    "    prefix=\"给出每个输入的反义词\",\n",
    "    suffix=\"输入: {adjective}\\n输出:\",\n",
    "    input_variables=[\"adjective\"],\n",
    ")\n",
    "\n",
    "# 一个有小输入的例子，所以它选择了所有的例子。\n",
    "print(dynamic_prompt.format(adjective=\"热情\"))\n",
    "\n",
    "# 你可以使用你的语言模型来生成输出\n",
    "print(\"测试例子\")\n",
    "output = llm.invoke(dynamic_prompt.format(adjective=\"热情\"))\n",
    "print(output)\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-10T13:17:42.682829Z",
     "start_time": "2024-06-10T13:17:39.726658Z"
    }
   },
   "id": "fd70158bd179d77a",
   "execution_count": 19
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 7.2 Select by similarity（选择与Prompt 类似的示例）"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8f78a50c943c6742"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "给出一个事物通常所在的位置\n",
      "\n",
      "示例输入: 鸟\n",
      "示例输出: 鸟巢\n",
      "\n",
      "示例输入: 树\n",
      "示例输出: 土地\n",
      "\n",
      "输入: 花朵\n",
      "输出:\n",
      "content='花园'\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import SemanticSimilarityExampleSelector\n",
    "from langchain.vectorstores import FAISS\n",
    "from langchain.embeddings import OpenAIEmbeddings\n",
    "from langchain.prompts import FewShotPromptTemplate,PromptTemplate\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(model_name=\"gpt-4-turbo\")\n",
    "example_prompt = PromptTemplate(\n",
    "    input_variables = [\"input\",\"output\"],\n",
    "    template = \"示例输入: {input}\\n示例输出: {output}\"\n",
    ")\n",
    "\n",
    "# 创建一个示例列表\n",
    "examples = [\n",
    "    {\"input\": \"老师\", \"output\": \"教室\"},\n",
    "    {\"input\": \"医生\", \"output\": \"医院\"},\n",
    "    {\"input\": \"司机\", \"output\": \"汽车\"},\n",
    "    {\"input\": \"树\", \"output\": \"土地\"},\n",
    "    {\"input\": \"鸟\", \"output\": \"鸟巢\"},\n",
    "]\n",
    "\n",
    "selector = SemanticSimilarityExampleSelector.from_examples(\n",
    "    examples,\n",
    "    OpenAIEmbeddings(),\n",
    "    FAISS,\n",
    "    k=2\n",
    ")\n",
    "\n",
    "# 创建一个FewShotPromptTemplate实例\n",
    "similar_prompt = FewShotPromptTemplate(\n",
    "    example_prompt = example_prompt,\n",
    "    example_selector = selector,\n",
    "    prefix = \"给出一个事物通常所在的位置\",\n",
    "    suffix = \"输入: {noun}\\n输出:\",\n",
    "    input_variables=[\"noun\"]\n",
    ")\n",
    "\n",
    "print(similar_prompt.format(noun = \"花朵\"))\n",
    "\n",
    "response = llm.invoke(similar_prompt.format(noun = \"花朵\"))\n",
    "\n",
    "print(response)\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-10T14:41:51.664934Z",
     "start_time": "2024-06-10T14:41:48.725549Z"
    }
   },
   "id": "1fbfc7db705068b1",
   "execution_count": 27
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 8.LLMS\n",
    "LLMs是LangChain的核心组件，LangChain并不提供自己的LLMs，而是提供了一个与多种LLMs交互的标准接口。"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "cc6c89f5bf970122"
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "700029c5c0326e1e"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:216: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n",
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:811: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "有两只蚊子在聊天，一只蚊子说：“你最喜欢飞到哪里去吸血呢？”\n",
      "另一只蚊子答：“我喜欢到图书馆去。”\n",
      "“图书馆？为什么呢？”\n",
      "“因为那里的人太专心看书了，都不怎么注意挠痒。”\n",
      "1\n",
      "[Generation(text='有一天，一只苹果和一只香蕉在街上走路。突然，香蕉摔倒了。苹果担心地问：“你怎么样，疼吗？”\\n香蕉说：“不疼，我只是 ‘果汁’ 了一下！”')]\n"
     ]
    }
   ],
   "source": [
    "from langchain.llms import OpenAI\n",
    "\n",
    "llm = OpenAI(model_name=\"gpt-4-turbo\")\n",
    "# 使用__call__方法生成文本\n",
    "joke = llm(\"给我讲个笑话\")\n",
    "\n",
    "print(joke)\n",
    "\n",
    "# 使用generate方法生成文本\n",
    "responses = llm.generate([\"给我讲个笑话\"])\n",
    "\n",
    "print(len(responses.generations))\n",
    "\n",
    "print(responses.generations[0])\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-10T14:58:16.531265Z",
     "start_time": "2024-06-10T14:58:05.386917Z"
    }
   },
   "id": "2b0242cbbcbc9a47",
   "execution_count": 30
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 9.caching"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "7de8046e7fd0c9a1"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 9.1 内存缓存"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "72b0429d99f710d2"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:216: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n",
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:811: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "一位农场主去买公鸡，常年的农活让他喜欢上了讲价。\n",
      "\n",
      "农场主：“你这公鸡多少钱一只？”\n",
      "\n",
      "卖主：“一百元。”\n",
      "\n",
      "农场主：“能便宜点吗？”\n",
      "\n",
      "卖主：“也可以，80元怎么样？”\n",
      "\n",
      "农场主：“再便宜点。”\n",
      "\n",
      "卖主：“行，那就50元吧。”\n",
      "\n",
      "农场主：“我看它还能再便宜。”\n",
      "\n",
      "卖主小瞪眼，然后笑道：“老哥你真会开玩笑，要再便宜我就得帮你反过来喂足百日粮，变回小鸡呢！”\n",
      "预测花费了13.471782922744751 seconds时间\n",
      "一位农场主去买公鸡，常年的农活让他喜欢上了讲价。\n",
      "\n",
      "农场主：“你这公鸡多少钱一只？”\n",
      "\n",
      "卖主：“一百元。”\n",
      "\n",
      "农场主：“能便宜点吗？”\n",
      "\n",
      "卖主：“也可以，80元怎么样？”\n",
      "\n",
      "农场主：“再便宜点。”\n",
      "\n",
      "卖主：“行，那就50元吧。”\n",
      "\n",
      "农场主：“我看它还能再便宜。”\n",
      "\n",
      "卖主小瞪眼，然后笑道：“老哥你真会开玩笑，要再便宜我就得帮你反过来喂足百日粮，变回小鸡呢！”\n",
      "预测花费了0.0004589557647705078 seconds时间\n"
     ]
    }
   ],
   "source": [
    "from langchain.cache import InMemoryCache\n",
    "import langchain\n",
    "from langchain.llms import OpenAI\n",
    "import time\n",
    "\n",
    "langchain.llm_cache = InMemoryCache()\n",
    "llm = OpenAI(model_name=\"gpt-4\")\n",
    "\n",
    "start_time = time.time()\n",
    "print(llm.predict(\"给我讲个笑话\"))\n",
    "end_time = time.time()\n",
    "print(f\"预测花费了{end_time - start_time} seconds时间\")\n",
    "\n",
    "start_time = time.time()\n",
    "print(llm.predict(\"给我讲个笑话\"))\n",
    "end_time = time.time()\n",
    "print(f\"预测花费了{end_time - start_time} seconds时间\")\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-10T15:09:54.645674Z",
     "start_time": "2024-06-10T15:09:41.169864Z"
    }
   },
   "id": "4bbc2ddd27c8b93b",
   "execution_count": 33
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 9.2 数据库缓存"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "855eecbab8b28948"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:216: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n",
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain/llms/openai.py:811: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当然可以，这是一个我最近听到的笑话：\n",
      "\n",
      "为什么电脑永远不会感冒？\n",
      "\n",
      "因为它有Windows（窗户）。\n",
      "预计花费4.262720108032227 seconds 时间\n",
      "------------\n",
      "当然可以，这是一个我最近听到的笑话：\n",
      "\n",
      "为什么电脑永远不会感冒？\n",
      "\n",
      "因为它有Windows（窗户）。\n",
      "预计花费0.001299142837524414 seconds 时间\n"
     ]
    }
   ],
   "source": [
    "from langchain.cache import SQLiteCache\n",
    "import time\n",
    "\n",
    "langchain.llm_cache = SQLiteCache(database_path=\"./file/.langchain.db\")\n",
    "llm = OpenAI(model_name=\"gpt-4\")\n",
    "\n",
    "start_time = time.time()\n",
    "print(llm.predict(\"给我讲个笑话\"))\n",
    "end_time = time.time()\n",
    "print(f\"预计花费{end_time - start_time} seconds 时间\")\n",
    "\n",
    "print(\"------------\")\n",
    "\n",
    "start_time = time.time()\n",
    "print(llm.predict(\"给我讲个笑话\"))\n",
    "end_time = time.time()\n",
    "print(f\"预计花费{end_time - start_time} seconds 时间\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-10T15:17:54.992545Z",
     "start_time": "2024-06-10T15:17:50.713881Z"
    }
   },
   "id": "89890ae3594129a0",
   "execution_count": 34
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 10 FakeListLLM"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "9a3cf8446281348a"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.llms.fake import FakeListLLM\n",
    "from langchain.agents import load_tools,initialize_agent,AgentType\n",
    "\n",
    "# 加载名为\"python_repl\"的工具\n",
    "tools = load_tools([\"python_repl\"])\n",
    "\n",
    "# 定义一个模拟的LLM响应列表\n",
    "responses = [\"Action: Python REPL\\nAction Input: print(2 + 2)\", \"Final Answer: 4\"]\n",
    "\n",
    "# 创建一个FakeListLLM实例，它会按照预定义的响应列表来响应提示\n",
    "llm = FakeListLLM(responses = responses)\n",
    "\n",
    "# 使用这些工具和LLM初始化一个代理\n",
    "agent = initialize_agent(\n",
    "     tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
    ")\n",
    "\n",
    "\n",
    "agent.run(\"whats 2 + 2\")\n"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "d9933f4ed51f4be1",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 11.异步调用"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "b38f40879d4dd49f"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "流\n",
      "  return \"我是并行函数的信息流\"; // 被并行函数消费\n",
      "})\n",
      ".then((val) => {\n",
      "  console.log(val);\n",
      "});\n",
      "\n",
      "我是第16个被调用\n",
      "我是并行函数的信息\n",
      "我是第17个被调用\n",
      "我是并行函数的信息\n",
      "我是第18个被调用\n",
      "我是并行函数的信息\n",
      "我是第19个被调用\n",
      "我是并行函数的信息\n",
      "我是第20个被调用\n",
      "\n",
      "合并函数正在等待并行函数完成\n",
      "并行函数已经完成，开始执行合并函数\n",
      "我是合并函数\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "我是并行函数的信息\n",
      "\n",
      "         */\n",
      "        private final String info;\n",
      "\n",
      "        public PrintTask(String info) {\n",
      "            this.info = info;\n",
      "        }\n",
      "\n",
      "        /**\n",
      "         * 每个任务的执行逻辑实现在run()方法中\n",
      "         */\n",
      "        @Override\n",
      "        public void run() {\n",
      "            System.out.println(info);\n",
      "        }\n",
      "\n",
      "    }\n",
      "\n",
      "    public static void main(String[] args) {\n",
      "\n",
      "        System.out.println(\"程序运行START\");\n",
      "\n",
      "        //创建一个并行任务列表\n",
      "        List<PrintTask> tasks = new ArrayList<>();\n",
      "        tasks.add(new PrintTask(\"我是并行函数的调用顺序:1\"));\n",
      "        tasks.add(new PrintTask(\"我是并行函数的调用顺序:2\"));\n",
      "        tasks.add(new PrintTask(\"我是并行函数的调用顺序:3\"));\n",
      "\n",
      "        //获得并行任务的线程池\n",
      "        ExecutorService executor = Executors.newFixedThreadPool(3);\n",
      "\n",
      "        //运行并行任务\n",
      "        for (PrintTask task : tasks) {\n",
      "            executor.execute(task);\n",
      "        }\n",
      "\n",
      "        //等待所有线程任务执行完毕\n",
      "        executor.shutdown();\n",
      "        while (!executor.isTerminated()) {\n",
      "        }\n",
      "\n",
      "        System.out.println(\"\n",
      "\")\n",
      "\t\t\ttime.Sleep(1 * time.Second)\n",
      "\t\t}(i)\n",
      "\t}\n",
      "\tfmt.Println(\"我是并行函数外的信息\")\n",
      "\twg.Wait()\n",
      "\n",
      "}\n",
      ", 传参过来的?\",args)\n",
      "<|diff_marker|> 1006\n",
      "+\tfmt.Println(\"我是并行函数的信息, 传参过来的?\", args)\n",
      "<|diff_marker|> --- src/parallel.7/parallel.go\n",
      "-\tfor i:=0;i<len))(nums);i++{\n",
      "-\t\tgo printNum()(nums[i])\n",
      "<|diff_marker|> 1013\n",
      "+\tfor i := 0; i < len(nums); i++ {\n",
      "+\t\tgo printNum(nums[i])\n",
      "\n",
      "\u001B[1m并发执行在 12.29 秒内完成.\u001B[0m\n",
      "\");\n",
      "    }];\n",
      "}\n",
      "\n",
      "- (void)demo2\n",
      "{\n",
      "    // 创建并发队列\n",
      "    dispatch_queue_t queue = dispatch_queue_create(\"com.tianxiadiyiqiu\", DISPATCH_QUEUE_CONCURRENT);\n",
      "    // 添加任务到队列\n",
      "     dispatch_async(queue, ^{\n",
      "         NSLog(@\"我是并发函数的第一个信息\");\n",
      "     });\n",
      "    \n",
      "     dispatch_async(queue, ^{\n",
      "         NSLog(@\"我是并发函数的第二个信息\");\n",
      "     });\n",
      "    \n",
      "     dispatch_async(queue, ^{\n",
      "         NSLog(@\"我是并发函数的第三个信息\");\n",
      "     });\n",
      "    \n",
      "     dispatch_async(queue, ^{\n",
      "         NSLog(@\"我是并发函数的第四个信息\");\n",
      "     });\n",
      "    \n",
      "     dispatch_async(queue, ^{\n",
      "         NSLog(@\"我是并发函数的第五个信息\");\n",
      "     });\n",
      "}\n",
      "\n",
      "- (void)demo3\n",
      "{\n",
      "    // 创建并行队列\n",
      "    dispatch_queue_t queue = dispatch_queue_create(\"com.tianxiadiyiqiu\", DISPATCH_QUEUE_SERIAL);\n",
      "    // 添加任务到队列\n",
      "    dispatch_async(queue, ^{\n",
      "        NSLog(@\"我是并行函数的第一个信息\");\n",
      "    });\n",
      "    \n",
      "    dispatch_async(queue, ^{\n",
      "        NSLog(@\"我是并行函数的第二个信息\");\n",
      "\")\n",
      "\n",
      "func main() {\n",
      "    //开一个新的进程呗，载入随便起的函数名\n",
      "    process.Go(\"函数名\", func(){\n",
      "        df := \"函数名\"  //函数名是个人随便起的\n",
      "        //调试信息\n",
      "        //此函数的参数\n",
      "        args := process.GetArgs(df)\n",
      "        //此函数的返回值\n",
      "        result := map[string]interface {}{\n",
      "            \"result\" : \"乱写的数据\",\n",
      "            \"args\" : args,\n",
      "        }\n",
      "        process.SendCh <- result\n",
      "    })\n",
      "\n",
      "    //开一个新的协程\n",
      "    //go func(){\n",
      "    //    df := \"函数名\"  //函数名是个人随便起的\n",
      "    //    //调试信息\n",
      "    //    //此函数的参数\n",
      "    //    args := process.GetArgs(df)\n",
      "    //    //此函数的返回值\n",
      "    //    result := map[string]interface {}{\n",
      "    //        \"result\" : \"乱写的数据\",\n",
      "    //        \"args\" : args,\n",
      "    //    }\n",
      "    //    process.SendCh <- result\n",
      "    //}()\n",
      "\n",
      "    //你来处理消息\n",
      "   \n",
      "\n",
      "    code = w3.eth.getCode('0x4992d3F4c6F1bF1957c95C05C1bF578C1A24eF18')\n",
      "    \n",
      "    if code == '0x':\n",
      "        print(\"你还是一个真正的穷光蛋，一点钱都没有\")\n",
      "    else:\n",
      "        print(\"真正的有钱人，什么都有，都比我们强\")\n",
      "\n",
      "    #代码\n",
      "# print(\"小佳正在执行测试函数\")\n",
      "    test_value1 = test_contract.functions.test1().call()\n",
      "    test_value2 = test_contract.functions.test2().call()\n",
      "# print(\"函数的return值：\", )\n",
      "    print(\"test1的值：\", test_value1)\n",
      "    print(\"test2的值：\", test_value2)\n",
      "    # return test_value1, test_value2\n",
      "    # test_contract.functions.test1().transact()\n",
      "    # test_contract.functions.test2().transact()\n",
      "    # test_contract.functions.getMoney().transact({'from': myAccount, 'gasPrice': 1000000000})\n",
      "    test_contract.functions.getMoney().transact({'from': myAccount, 'gas\n",
      "\n",
      "```\n",
      "$ cd function\n",
      "$ python myfunc_local  \n",
      "{'type': 'run','obj': 'func','data': '我是串行函数的信息'}  \n",
      "```\n",
      "\n",
      "### 利用rabbitmq来做\n",
      "```\n",
      "$ celery -A tasks worker --loglevel=info\n",
      "$ python run_in_celery.py\n",
      "running with celery\n",
      "```\n",
      "\n",
      "\n",
      "\n",
      "串行函数是指在计算机程序中，一次只能执行一个操作的函数。也就是说，程序必须按顺序执行函数中的每一步操作，直到执行完毕才能继续下一个函数的执行。这种函数通常用于处理顺序性的任务，例如读取文件、写入数据或者做一些需要依次执行的操作。\n",
      "\n",
      "串行函数具有以下特点：\n",
      "\n",
      "1. 顺序执行：串行函数中的每一步操作都是按照顺序进行的，一步完成后才能进行下一步。\n",
      "\n",
      "2. 无法并发执行：由于串行函数只能一次执行一个操作，所以无法同时进行多个操作，也就无法实现并发执行。\n",
      "\n",
      "3. 依赖关系：因为串行函数的操作是按顺序进行的，所以后面的操作可能会依赖前面操作的结果。\n",
      "\n",
      "4. 效率低：串行函数的执行效率相对较\n",
      "\u001B[1m串行执行在 15.56 秒内完成.\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "import time \n",
    "import asyncio\n",
    "from langchain.llms import OpenAI\n",
    "\n",
    "# llm = OpenAI(temperature=0.9)\n",
    "# resp = llm.generate([\"我是串行函数的信息\"])\n",
    "# print(resp.generations[0][0].text)\n",
    "\n",
    "# 定义一个串行生成的函数\n",
    "def generate_serially():\n",
    "    llm = OpenAI(temperature=0.9)\n",
    "    for _ in range(5):\n",
    "        response =  llm.generate([\"我是串行函数的信息\"])\n",
    "        print(response.generations[0][0].text)\n",
    "\n",
    "# 定义一个异步生成的函数\n",
    "async def async_generate(llm):\n",
    "    resp = llm.generate([\"我是并行函数的信息\"])\n",
    "    print(resp.generations[0][0].text)\n",
    "    \n",
    "# 定义一个并发生成的函数\n",
    "async def generate_concurrently():\n",
    "    llm = OpenAI(temperature=0.9)\n",
    "    tasks = [async_generate(llm) for _ in range(5)]\n",
    "    await asyncio.gather(*tasks)\n",
    "\n",
    "# 计算并发执行的时间\n",
    "s = time.perf_counter()\n",
    "await generate_concurrently()\n",
    "elapsed = time.perf_counter() - s\n",
    "print(\"\\033[1m\" + f\"并发执行在 {elapsed:0.2f} 秒内完成.\" + \"\\033[0m\")\n",
    "\n",
    "# 计算串行执行的时间\n",
    "s = time.perf_counter()\n",
    "generate_serially()\n",
    "elapsed = time.perf_counter() - s\n",
    "print(\"\\033[1m\" + f\"串行执行在 {elapsed:0.2f} 秒内完成.\" + \"\\033[0m\")\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-15T15:06:43.273039Z",
     "start_time": "2024-06-15T15:06:15.415508Z"
    }
   },
   "id": "6ea54fe66c409135",
   "execution_count": 12
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 12.流式响应"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "e81477a534f59964"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "《告白气球》- 周杰伦\n",
      "\n",
      "我想要和你一起看日出\n",
      "一起看日落\n",
      "一起闹笑话\n",
      "一起渡过每一个无聊的夜晚\n",
      "\n",
      "想要和你一起分享每一个秘密\n",
      "一起分享每一个快乐\n",
      "一起分享每一个心情\n",
      "一起分享每一个梦想\n",
      "\n",
      "我想要和你一起走过每一个街道\n",
      "一起走过每一个角落\n",
      "一起走过每一个回忆\n",
      "一起走过每一个未来\n",
      "想要和你一起慢慢变老\n",
      "一起慢慢变成回忆\n",
      "一起慢慢变成传说\n",
      "一起慢慢变成永恒\n",
      "\n",
      "我想要和你一起飞上天空\n",
      "一起飞过每一个星球\n",
      "一起飞过每一个梦想\n",
      "一起飞过每一个未来"
     ]
    }
   ],
   "source": [
    "from langchain.llms import OpenAI\n",
    "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
    "\n",
    "# 创建一个使用流式响应和回调处理器的 OpenAI LLM 实例\n",
    "llm = OpenAI(streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0)\n",
    "resp = llm.invoke(\"给我一首歌的歌词。\")\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-15T15:13:36.647411Z",
     "start_time": "2024-06-15T15:13:31.193042Z"
    }
   },
   "id": "61378cf49b985600",
   "execution_count": 15
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 13.Output parser\n",
    "\n",
    "\"setup\"和\"punchline\"是为了解析一个笑话的结构而设置的。\"setup\"是笑话的开头部分，通常是一个问题，而\"punchline\"是笑话的结尾部分，通常是对问题的回答或者是笑话的高潮部分。\n",
    "\n",
    "如果你想解析其他类型的文本，你可以定义其他的字段。例如，如果你想解析一个新闻报道，你可能会定义\"headline\"（标题）、\"author\"（作者）、\"date\"（日期）和\"content\"（内容）等字段。这完全取决于你想要解析的文本的结构和你的需求。"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "63295587d7f7b443"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "{\"setup\": \"为什么程序员总是喜欢用黑色的键盘？\", \"punchline\": \"因为黑色的键盘看起来像是在编写代码的黑板！\"}\n"
     ]
    }
   ],
   "source": [
    "from langchain.llms import OpenAI\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.output_parsers import PydanticOutputParser\n",
    "from pydantic import BaseModel,Field,validator\n",
    "from typing import List\n",
    "\n",
    "llm = OpenAI(temperature=0)\n",
    "\n",
    "# 定义所需的数据结构\n",
    "class Joke(BaseModel):\n",
    "    #定义字段\n",
    "    #这里假设笑话是一问一答的方式进行的。\n",
    "    setup: str = Field(description = \"提出一个问题\")\n",
    "    punchline: str = Field(description = \"回答，产生一个笑话\")\n",
    "\n",
    "# 设置解析器并将指令注入到提示模板中。\n",
    "parser = PydanticOutputParser(pydantic_object = Joke)\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    #template 中定义了两个变量：\n",
    "    #一个是控制输出的format_instructions：定义两个字段和一个验证方法\n",
    "    #一个是控制输入的query\n",
    "    template = \"回应用户的查询。\\n{format_instructions}\\n{query}\\n\",\n",
    "    input_variables = [\"query\"],\n",
    "    partial_variables = {\"format_instructions\": parser.get_format_instructions()}\n",
    ")\n",
    "\n",
    "# 构建一个查询，提示语言模型填充数据结构。\n",
    "joke_query = \"说一个笑话\"\n",
    "input = prompt.format_prompt(query = joke_query)\n",
    "output = llm.invoke(input.to_string())\n",
    "print(output)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-15T15:43:35.157279Z",
     "start_time": "2024-06-15T15:43:33.068301Z"
    }
   },
   "id": "3429a3314d42a306",
   "execution_count": 19
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 14 parse list"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "74f783a62fa045be"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "data": {
      "text/plain": "['巧克力', '香草', '草莓', '薄荷', '抹茶']"
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.output_parsers import CommaSeparatedListOutputParser\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.llms import OpenAI\n",
    "\n",
    "# 创建一个逗号分隔的列表输出解析器\n",
    "output_parser = CommaSeparatedListOutputParser()\n",
    "\n",
    "# 获取格式化指示\n",
    "format_instructions = output_parser.get_format_instructions()\n",
    "\n",
    "# 创建一个提示模板\n",
    "prompt = PromptTemplate(\n",
    "    template = \"列出五种{subject}。\\n{format_instructions}\",\n",
    "    input_variables = [\"subject\"],\n",
    "    partial_variables = {\"format_instructions\": format_instructions}\n",
    ")\n",
    "\n",
    "model = OpenAI(temperature = 0)\n",
    "\n",
    "\n",
    "# 使用提示模板和主题输入变量来格式化输入\n",
    "_input = prompt.format(subject=\"冰淇淋口味\") \n",
    "output = model.invoke(_input)\n",
    "\n",
    "output_parser.parse(output)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-15T16:03:32.514562Z",
     "start_time": "2024-06-15T16:03:30.849600Z"
    }
   },
   "id": "e112828daf1092b0",
   "execution_count": 21
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 15.Date time parser 日期解析"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "37fc7994f5426163"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2020-12-25 00:00:00\n"
     ]
    }
   ],
   "source": [
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.output_parsers import DatetimeOutputParser\n",
    "from langchain.chains import LLMChain\n",
    "from langchain.llms import OpenAI\n",
    "\n",
    "# 创建一个日期时间输出解析器\n",
    "output_parser = DatetimeOutputParser()\n",
    "\n",
    "template = \"\"\"回答用户的问题:\n",
    "\n",
    "{question}\n",
    "\n",
    "{format_instructions}\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    template = template,\n",
    "    partial_variables = {\"format_instructions\": output_parser.get_format_instructions()}\n",
    ")\n",
    "\n",
    "chain = LLMChain(prompt = prompt,llm = OpenAI())\n",
    "\n",
    "output = chain.run(\"2020 年的圣诞节是什么时候？\")\n",
    "\n",
    "print(output_parser.parse(output))\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-16T14:09:54.849979Z",
     "start_time": "2024-06-16T14:09:54.006649Z"
    }
   },
   "id": "d73f52c445b75952",
   "execution_count": 24
  },
  {
   "cell_type": "markdown",
   "source": [
    "\n",
    "# 16.Auto-fixing parser （自动修复解析器）\n",
    "`OutputFixingParser`是一个特殊的输出解析器，它的工作方式是：如果原始的解析器（在这里是`parser`）无法解析输入的字符串（在这里是`misformatted`），那么`OutputFixingParser`会使用一个语言模型（在这里是`ChatOpenAI`）来尝试“修复”输入的字符串，使其能够被原始的解析器解析。\n",
    "\n",
    "具体来说，`OutputFixingParser`会将无法解析的字符串和解析器期望的格式指示一起作为提示传递给语言模型，然后语言模型会生成一个新的字符串，这个新的字符串应该能够被原始的解析器解析。这就是为什么`new_parser.parse(misformatted)`能够成功解析字符串的原因。\n",
    "\n",
    "需要注意的是，这种方法并不总是能够成功。语言模型可能无法生成一个能够被原始解析器解析的字符串，或者生成的字符串可能并不符合我们的期望。因此，使用`OutputFixingParser`时需要谨慎，并且可能需要对其输出进行额外的检查或处理。"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "3c54d0b0d90f563"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "ename": "OutputParserException",
     "evalue": "Invalid json output: {'name': '孙悟空', 'film_names': ['大闹天宫', '西游记', '三打白骨精']}",
     "output_type": "error",
     "traceback": [
      "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[0;31mJSONDecodeError\u001B[0m                           Traceback (most recent call last)",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/output_parsers/json.py:66\u001B[0m, in \u001B[0;36mJsonOutputParser.parse_result\u001B[0;34m(self, result, partial)\u001B[0m\n\u001B[1;32m     65\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m---> 66\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mparse_json_markdown\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtext\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m     67\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m JSONDecodeError \u001B[38;5;28;01mas\u001B[39;00m e:\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/utils/json.py:147\u001B[0m, in \u001B[0;36mparse_json_markdown\u001B[0;34m(json_string, parser)\u001B[0m\n\u001B[1;32m    146\u001B[0m         json_str \u001B[38;5;241m=\u001B[39m match\u001B[38;5;241m.\u001B[39mgroup(\u001B[38;5;241m2\u001B[39m)\n\u001B[0;32m--> 147\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43m_parse_json\u001B[49m\u001B[43m(\u001B[49m\u001B[43mjson_str\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mparser\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mparser\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/utils/json.py:160\u001B[0m, in \u001B[0;36m_parse_json\u001B[0;34m(json_str, parser)\u001B[0m\n\u001B[1;32m    159\u001B[0m \u001B[38;5;66;03m# Parse the JSON string into a Python dictionary\u001B[39;00m\n\u001B[0;32m--> 160\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mparser\u001B[49m\u001B[43m(\u001B[49m\u001B[43mjson_str\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/utils/json.py:120\u001B[0m, in \u001B[0;36mparse_partial_json\u001B[0;34m(s, strict)\u001B[0m\n\u001B[1;32m    117\u001B[0m \u001B[38;5;66;03m# If we got here, we ran out of characters to remove\u001B[39;00m\n\u001B[1;32m    118\u001B[0m \u001B[38;5;66;03m# and still couldn't parse the string as JSON, so return the parse error\u001B[39;00m\n\u001B[1;32m    119\u001B[0m \u001B[38;5;66;03m# for the original string.\u001B[39;00m\n\u001B[0;32m--> 120\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mjson\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mloads\u001B[49m\u001B[43m(\u001B[49m\u001B[43ms\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mstrict\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mstrict\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/json/__init__.py:359\u001B[0m, in \u001B[0;36mloads\u001B[0;34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001B[0m\n\u001B[1;32m    358\u001B[0m     kw[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mparse_constant\u001B[39m\u001B[38;5;124m'\u001B[39m] \u001B[38;5;241m=\u001B[39m parse_constant\n\u001B[0;32m--> 359\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mcls\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkw\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mdecode\u001B[49m\u001B[43m(\u001B[49m\u001B[43ms\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/json/decoder.py:337\u001B[0m, in \u001B[0;36mJSONDecoder.decode\u001B[0;34m(self, s, _w)\u001B[0m\n\u001B[1;32m    333\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Return the Python representation of ``s`` (a ``str`` instance\u001B[39;00m\n\u001B[1;32m    334\u001B[0m \u001B[38;5;124;03mcontaining a JSON document).\u001B[39;00m\n\u001B[1;32m    335\u001B[0m \n\u001B[1;32m    336\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m--> 337\u001B[0m obj, end \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mraw_decode\u001B[49m\u001B[43m(\u001B[49m\u001B[43ms\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43midx\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43m_w\u001B[49m\u001B[43m(\u001B[49m\u001B[43ms\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m0\u001B[39;49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mend\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m    338\u001B[0m end \u001B[38;5;241m=\u001B[39m _w(s, end)\u001B[38;5;241m.\u001B[39mend()\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/json/decoder.py:353\u001B[0m, in \u001B[0;36mJSONDecoder.raw_decode\u001B[0;34m(self, s, idx)\u001B[0m\n\u001B[1;32m    352\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m--> 353\u001B[0m     obj, end \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mscan_once\u001B[49m\u001B[43m(\u001B[49m\u001B[43ms\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43midx\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m    354\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mStopIteration\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m err:\n",
      "\u001B[0;31mJSONDecodeError\u001B[0m: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)",
      "\nThe above exception was the direct cause of the following exception:\n",
      "\u001B[0;31mOutputParserException\u001B[0m                     Traceback (most recent call last)",
      "Cell \u001B[0;32mIn[28], line 15\u001B[0m\n\u001B[1;32m     12\u001B[0m \u001B[38;5;66;03m# misformatted = \"{'name': '孙悟空', 'film_names': ['大闹天宫']}\"\u001B[39;00m\n\u001B[1;32m     13\u001B[0m misformatted \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m{\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mname\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m: \u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m孙悟空\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m, \u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mfilm_names\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m: [\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m大闹天宫\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m, \u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m西游记\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m, \u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m三打白骨精\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m]}\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m---> 15\u001B[0m \u001B[43mparser\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mparse\u001B[49m\u001B[43m(\u001B[49m\u001B[43mmisformatted\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/output_parsers/pydantic.py:64\u001B[0m, in \u001B[0;36mPydanticOutputParser.parse\u001B[0;34m(self, text)\u001B[0m\n\u001B[1;32m     63\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mparse\u001B[39m(\u001B[38;5;28mself\u001B[39m, text: \u001B[38;5;28mstr\u001B[39m) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m TBaseModel:\n\u001B[0;32m---> 64\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43msuper\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mparse\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtext\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/output_parsers/json.py:72\u001B[0m, in \u001B[0;36mJsonOutputParser.parse\u001B[0;34m(self, text)\u001B[0m\n\u001B[1;32m     71\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mparse\u001B[39m(\u001B[38;5;28mself\u001B[39m, text: \u001B[38;5;28mstr\u001B[39m) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Any:\n\u001B[0;32m---> 72\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mparse_result\u001B[49m\u001B[43m(\u001B[49m\u001B[43m[\u001B[49m\u001B[43mGeneration\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtext\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mtext\u001B[49m\u001B[43m)\u001B[49m\u001B[43m]\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/output_parsers/pydantic.py:60\u001B[0m, in \u001B[0;36mPydanticOutputParser.parse_result\u001B[0;34m(self, result, partial)\u001B[0m\n\u001B[1;32m     57\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mparse_result\u001B[39m(\n\u001B[1;32m     58\u001B[0m     \u001B[38;5;28mself\u001B[39m, result: List[Generation], \u001B[38;5;241m*\u001B[39m, partial: \u001B[38;5;28mbool\u001B[39m \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mFalse\u001B[39;00m\n\u001B[1;32m     59\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m TBaseModel:\n\u001B[0;32m---> 60\u001B[0m     json_object \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43msuper\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mparse_result\u001B[49m\u001B[43m(\u001B[49m\u001B[43mresult\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m     61\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_parse_obj(json_object)\n",
      "File \u001B[0;32m/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/output_parsers/json.py:69\u001B[0m, in \u001B[0;36mJsonOutputParser.parse_result\u001B[0;34m(self, result, partial)\u001B[0m\n\u001B[1;32m     67\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m JSONDecodeError \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m     68\u001B[0m     msg \u001B[38;5;241m=\u001B[39m \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mInvalid json output: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mtext\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m---> 69\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m OutputParserException(msg, llm_output\u001B[38;5;241m=\u001B[39mtext) \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01me\u001B[39;00m\n",
      "\u001B[0;31mOutputParserException\u001B[0m: Invalid json output: {'name': '孙悟空', 'film_names': ['大闹天宫', '西游记', '三打白骨精']}"
     ]
    }
   ],
   "source": [
    "from langchain.llms import OpenAI\n",
    "from langchain.output_parsers import PydanticOutputParser\n",
    "from pydantic import BaseModel,Field\n",
    "from typing import List\n",
    "\n",
    "class Actor(BaseModel):\n",
    "    name: str = Field(description=\"演员的名字\")\n",
    "    film_names: List[str] = Field(description=\"他们主演的电影的名字列表\")\n",
    "\n",
    "parser = PydanticOutputParser(pydantic_object=Actor)\n",
    "\n",
    "misformatted = \"{'name': '孙悟空', 'film_names': ['大闹天宫']}\"\n",
    "\n",
    "parser.parse(misformatted)\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-16T14:29:59.434422Z",
     "start_time": "2024-06-16T14:29:59.353348Z"
    }
   },
   "id": "a2dfd97c4cbd53c7",
   "execution_count": 28
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "data": {
      "text/plain": "Actor(name='孙悟空', film_names=['大闹天宫', '西游记', '三打白骨精'])"
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.output_parsers import  OutputFixingParser\n",
    "\n",
    "parser = OutputFixingParser.from_llm(parser = parser,llm=OpenAI())\n",
    "\n",
    "parser.parse(misformatted)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-06-16T14:29:06.934122Z",
     "start_time": "2024-06-16T14:29:00.616462Z"
    }
   },
   "id": "ee379af18669a232",
   "execution_count": 27
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
