{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 4. 提示词模板之ChatPromptTemplate\n",
    "4.1 实例消息(构造方法、from_messages)"
   ],
   "id": "f2fd63e8ee001338"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T01:23:50.961648Z",
     "start_time": "2025-10-23T01:23:50.939569Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 4.1使用构造方法构造ChatPromptTemplate\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate(\n",
    "    input_variables=[\"name\", \"age\"],\n",
    "    messages=[\n",
    "        (\"system\", \"你好，我是{name}，今年{age}岁了\"),\n",
    "        (\"human\", \"我的问题是{question}?\"), ]\n",
    ")\n",
    "# 使用invoke方法实例化\n",
    "response = chat_prompt_template.invoke(input={\n",
    "    \"name\": \"小智\",\n",
    "    \"age\": \"18\",\n",
    "    \"question\": \"你叫什么名字,1+2*3=??\"\n",
    "})\n",
    "print(response)\n",
    "print(type(response))  # <class 'langchain_core.prompt_values.ChatPromptValue'>\n",
    "print(response.messages)\n",
    "print(len(response.messages))  #2"
   ],
   "id": "dabac15cb61f076e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "messages=[SystemMessage(content='你好，我是小智，今年18岁了', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是你叫什么名字,1+2*3=???', additional_kwargs={}, response_metadata={})]\n",
      "<class 'langchain_core.prompt_values.ChatPromptValue'>\n",
      "[SystemMessage(content='你好，我是小智，今年18岁了', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是你叫什么名字,1+2*3=???', additional_kwargs={}, response_metadata={})]\n",
      "2\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T01:26:59.793214Z",
     "start_time": "2025-10-23T01:26:59.775551Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 4.1使用构造方法构造ChatPromptTemplate(压缩版)\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate(\n",
    "    [(\"system\", \"你好，我是{name}，今年{age}岁了\"),\n",
    "     (\"human\", \"我的问题是{question}?\"), ]\n",
    ")\n",
    "# 使用invoke方法实例化\n",
    "response = chat_prompt_template.invoke({\n",
    "    \"name\": \"小智\",\n",
    "    \"age\": \"18\",\n",
    "    \"question\": \"你叫什么名字,1+2*3=??\"\n",
    "})\n",
    "print(response)\n",
    "print(type(response))  # <class 'langchain_core.prompt_values.ChatPromptValue'>\n",
    "print(len(response.messages))  #2"
   ],
   "id": "e5b315fea5384c32",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "messages=[SystemMessage(content='你好，我是小智，今年18岁了', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是你叫什么名字,1+2*3=???', additional_kwargs={}, response_metadata={})]\n",
      "<class 'langchain_core.prompt_values.ChatPromptValue'>\n",
      "2\n"
     ]
    }
   ],
   "execution_count": 25
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "4.2使用from——messages方法构造ChatPromptTemplate",
   "id": "6113a68c081e690b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T01:38:11.807772Z",
     "start_time": "2025-10-23T01:38:11.787083Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 4.1使用from_messages构造ChatPromptTemplate(压缩版)\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages(\n",
    "    [(\"system\", \"你好，我是{name}，今年{age}岁了\"),\n",
    "     (\"human\", \"我的问题是{question}?\"), ]\n",
    ")\n",
    "# 使用invoke方法实例化，返回值是ChatPromptValue\n",
    "response = chat_prompt_template.invoke({\n",
    "    \"name\": \"小智\",\n",
    "    \"age\": \"18\",\n",
    "    \"question\": \"你叫什么名字,1+2*3=??\"\n",
    "})\n",
    "print(response)\n",
    "print(type(response))  # <class 'langchain_core.prompt_values.ChatPromptValue'>\n",
    "print(len(response.messages))  #2\n",
    "resstr = response.to_string()\n",
    "print(resstr)  #System: 你好，我是小智，今年18岁了\n",
    "# Human: 我的问题是你叫什么名字,1+2*3=???\n",
    "print(type(resstr))  #<class 'str'>"
   ],
   "id": "5d7d79a0d2dec8ce",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "messages=[SystemMessage(content='你好，我是小智，今年18岁了', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是你叫什么名字,1+2*3=???', additional_kwargs={}, response_metadata={})]\n",
      "<class 'langchain_core.prompt_values.ChatPromptValue'>\n",
      "2\n",
      "System: 你好，我是小智，今年18岁了\n",
      "Human: 我的问题是你叫什么名字,1+2*3=???\n",
      "<class 'str'>\n"
     ]
    }
   ],
   "execution_count": 37
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "%%sql\n",
    "# 2调用提示词模板的几种方法\n",
    "invoke：传入字典，返回ChatPromptValue\n",
    "format：传入变量的值，返回str\n",
    "format_prompt：传入变量的值，返回ChatPromptValue\n",
    "format_messages：传入变量的值，返回消息列表list\n",
    "关于消息的转化：\n",
    "ChatPromptValue-->to_string：返回str\n",
    "                  to_messages：返回消息列表list"
   ],
   "id": "f258f7c6e9c9b7de"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 2.1使用format方法调用提示词模板",
   "id": "f049c9b4c42e7c77"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T01:40:35.611198Z",
     "start_time": "2025-10-23T01:40:35.599670Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 使用from_messages构造ChatPromptTemplate模板\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"你好，我是{name}，今年{age}岁了\"),\n",
    "    (\"human\", \"我的问题是{user_input}?\"),\n",
    "])\n",
    "# 使用formate方法实例化,返回值为str\n",
    "response = chat_prompt_template.format(\n",
    "    name=\"小安\",\n",
    "    age=\"17\",\n",
    "    user_input=\"你叫什么名字,1+2*3=?\"\n",
    ")\n",
    "print(response)\n",
    "print(type(response))  # <class 'str'>"
   ],
   "id": "389b785ccee43c57",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "System: 你好，我是小安，今年17岁了\n",
      "Human: 我的问题是你叫什么名字,1+2*3=??\n",
      "<class 'str'>\n"
     ]
    }
   ],
   "execution_count": 38
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 2.2使用format_prompt方法调用提示词模板",
   "id": "67e2bdc229059ce7"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T01:59:35.837444Z",
     "start_time": "2025-10-23T01:59:35.824157Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 使用from_messages构造ChatPromptTemplate模板\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"你好，我是{name}，今年{age}岁了\"),\n",
    "    (\"human\", \"我的问题是{user_input}?\"),\n",
    "])\n",
    "# 使用format_prompt方法实例化,返回值为ChatPromptValue\n",
    "response = chat_prompt_template.format_prompt(\n",
    "    name=\"小安\",\n",
    "    age=\"17\",\n",
    "    user_input=\"你叫什么名字,1+2*3=?\"\n",
    ")\n",
    "print(response)\n",
    "print(type(response))  # ChatPromptValue"
   ],
   "id": "d0380163a41dcf58",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "messages=[SystemMessage(content='你好，我是小安，今年17岁了', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是你叫什么名字,1+2*3=??', additional_kwargs={}, response_metadata={})]\n",
      "<class 'langchain_core.prompt_values.ChatPromptValue'>\n"
     ]
    }
   ],
   "execution_count": 39
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 2.3使用format_messages方法调用提示词模板",
   "id": "4b8b471e68b89f7c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T02:02:01.978217Z",
     "start_time": "2025-10-23T02:02:01.968575Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 使用from_messages构造ChatPromptTemplate模板\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"你好，我是{name}，今年{age}岁了\"),\n",
    "    (\"human\", \"我的问题是{user_input}?\"),\n",
    "])\n",
    "# 使用format_prompt方法实例化,返回值为消息列表list\n",
    "response = chat_prompt_template.format_messages(\n",
    "    name=\"小子\",\n",
    "    age=\"18\",\n",
    "    user_input=\"你叫什么名字,1+2*3=?\"\n",
    ")\n",
    "print(response)\n",
    "print(type(response))  # <class 'list'>"
   ],
   "id": "ad9f6aa0ac2f8994",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[SystemMessage(content='你好，我是小子，今年18岁了', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是你叫什么名字,1+2*3=??', additional_kwargs={}, response_metadata={})]\n",
      "<class 'list'>\n"
     ]
    }
   ],
   "execution_count": 40
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 消息之间的转化\n",
    "ChatPromptValue与str\\messges之间的转化"
   ],
   "id": "7b600c62052a3027"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T02:27:06.036211Z",
     "start_time": "2025-10-23T02:27:06.021497Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 使用from_messages构造ChatPromptTemplate提示词模板\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"你好，我是{name}，今年{age}岁了\"),\n",
    "    (\"human\", \"我的问题是，{user_question}{user_input1}?\"),\n",
    "]).partial(user_input1=\" 你可太六了\")\n",
    "# 使用format_prompt方法调用实例化模版,返回值为ChatPromptValue\n",
    "response = chat_prompt_template.format_prompt(\n",
    "    name=\"硅谷AK\",\n",
    "    age=\"19\",\n",
    "    user_question=\"你叫什么名字？,1+2*3=?\"\n",
    ")\n",
    "print(response)\n",
    "print(type(response))  # format_prompt默认返回值为ChatPromptValue\n",
    "response_str = response.to_string()\n",
    "print(\"response_str:\", type(response_str))  # 转化为str\n",
    "print(f\"response_str:\", type(response_str))  # 转化为str\n",
    "print(f\"response_str:{type(response_str)}\")  # 转化为str\n",
    "response_messages = response.to_messages()\n",
    "print({\"response_messages\": type(response_messages)})  # 转化为list"
   ],
   "id": "ec1ba5dc39888f2d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "messages=[SystemMessage(content='你好，我是硅谷AK，今年19岁了', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是，你叫什么名字？,1+2*3=? 你可太六了?', additional_kwargs={}, response_metadata={})]\n",
      "<class 'langchain_core.prompt_values.ChatPromptValue'>\n",
      "response_str: <class 'str'>\n",
      "response_str: <class 'str'>\n",
      "response_str:<class 'str'>\n",
      "{'response_messages': <class 'list'>}\n"
     ]
    }
   ],
   "execution_count": 64
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 3.更丰富的实例化参数\n",
    "3.1 SystemMessagePromptTemplate 、 HumanMessagePromptTemplate 和\n",
    "AIMessagePromptTemplate是常用的，分别创建系统消息、人工消息和AI消息，\n",
    "它们是ChatMessagePromptTemplate的特定角色子类\n",
    "\n",
    "3.2 ChatMessagePromptTemplate于构建聊天消息的模板。\n",
    "它允许你创建可重用的消息模板，可以动态地插入变量值来生成最终的聊天消息"
   ],
   "id": "6b45dc2607a07334"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T04:15:49.678392Z",
     "start_time": "2025-10-23T04:15:49.656762Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 举例1 chat_prompt\n",
    "# 导入聊天消息类模板\n",
    "from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n",
    "\n",
    "# 创建消息模板\n",
    "system_template = \"你是一个专业{role}\"\n",
    "system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)\n",
    "human_template = \"用浅显易懂的语言，给我解释{concept}\"\n",
    "human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)\n",
    "# 组合成聊天提示模板\n",
    "chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt,\n",
    "                                                human_message_prompt])\n",
    "# 格式化提示\n",
    "formatted_messages = chat_prompt.format_messages(\n",
    "    role=\"物理学家\",\n",
    "    concept=\"相对论\"\n",
    ")\n",
    "print(type(formatted_messages))  #ChatPromptTemplate返回值是消息列表list\n",
    "print(formatted_messages)"
   ],
   "id": "378ad135a6ae0a4b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'list'>\n",
      "[SystemMessage(content='你是一个专业物理学家', additional_kwargs={}, response_metadata={}), HumanMessage(content='用浅显易懂的语言，给我解释相对论', additional_kwargs={}, response_metadata={})]\n"
     ]
    }
   ],
   "execution_count": 73
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-23T04:28:38.699088Z",
     "start_time": "2025-10-23T04:28:38.678466Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 举例2：ChatMessagePromptTemplate的使用\n",
    "from langchain_core.prompts import ChatMessagePromptTemplate\n",
    "\n",
    "# 2.定义模版\n",
    "prompt = \"今天我们授课的内容是{subject}\"\n",
    "prompt2 = \"今天我们授课的内容是{subject1}\"\n",
    "prompt3 = \"今天我们学习的内容是{subject1}\"\n",
    "prompt5 = \"今天我们学习的内容是{subject1}\"\n",
    "# 3.创建自定义角色聊天消息提示词模版\n",
    "chat_message_prompt = ChatMessagePromptTemplate.from_template(\n",
    "    role=\"teacher\", template=prompt + ';' + prompt2\n",
    ")\n",
    "chat_message_prompt1 = ChatMessagePromptTemplate.from_template(\n",
    "    role=\"stu\", template=prompt3 + ';' + prompt5\n",
    ")\n",
    "resp = chat_message_prompt.format(subject=\"机器学习\", subject1=\"深度学习\")\n",
    "resp1 = chat_message_prompt1.format(subject=\"机器学习\", subject1=\"深度学习\")\n",
    "print(type(resp))  #ChatMessagePromptTemplate返回值是对话消息ChatMessage\n",
    "print(resp)\n",
    "print(resp1)"
   ],
   "id": "82042356aac56e75",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'langchain_core.messages.chat.ChatMessage'>\n",
      "content='今天我们授课的内容是机器学习;今天我们授课的内容是深度学习' additional_kwargs={} response_metadata={} role='teacher'\n",
      "content='今天我们学习的内容是深度学习;今天我们学习的内容是深度学习' additional_kwargs={} response_metadata={} role='stu'\n"
     ]
    }
   ],
   "execution_count": 82
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "ChatPromptTemplate的两种创建方式。我们看到不管使用构造方法，还是使用from_messages()，\n",
    "参数类型都是 列表类型 。列表元素有多种类型 str、字典、Message、ChatPromptTemplate、MessagePromptTemplate"
   ],
   "id": "442cc85c3cffb79c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T02:29:08.950223Z",
     "start_time": "2025-10-24T02:29:08.851949Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 列表参数格式是str类型（不推荐），因为默认角色都是human\n",
    "#1.导入相关依赖\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from langchain_core.messages import SystemMessage, HumanMessage, AIMessage\n",
    "\n",
    "# 2.定义str聊天提示词模版\n",
    "chat_template = ChatPromptTemplate.from_messages(\n",
    "    [\n",
    "        \"Hello, {name}!\"\n",
    "        # 等价于 (\"human\", \"Hello, {name}!\")\n",
    "    ]\n",
    ")\n",
    "# 3.1格式化聊天提示词模版中的变量(自己提供的)\n",
    "messages = chat_template.format_messages(name=\"小智AI\")\n",
    "# 3.2 使用invoke执行\n",
    "# HumanMessage(content='Hello, 小智AI!',\n",
    "# 4.打印格式化后的聊天提示词模版内容\n",
    "print(messages)"
   ],
   "id": "e85afd5f59b8b58e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[HumanMessage(content='Hello, 小智AI!', additional_kwargs={}, response_metadata={})]\n"
     ]
    }
   ],
   "execution_count": 83
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T02:47:55.182912Z",
     "start_time": "2025-10-24T02:47:55.173921Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 示例2: 字典形式的dict消息，可以指定角色、消息\n",
    "prompt = ChatPromptTemplate.from_messages([\n",
    "    {\"role\": \"system\", \"content\": \"你是一个{role}.\"},\n",
    "    {\"role\": \"human\", \"content\": [\"{content}\", {\"type\": \"text\"}]},\n",
    "]).partial(content=\"今天我们授课的内容是\")\n",
    "print(prompt.format_messages(role=\"教师\"))"
   ],
   "id": "b5f89cbdd8dea371",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[SystemMessage(content='你是一个教师.', additional_kwargs={}, response_metadata={}), HumanMessage(content=[{'type': 'text', 'text': '今天我们授课的内容是'}, {'type': 'text'}], additional_kwargs={}, response_metadata={})]\n"
     ]
    }
   ],
   "execution_count": 116
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T02:48:44.787221Z",
     "start_time": "2025-10-24T02:48:44.768997Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 实例3 Message类型\n",
    "from langchain_core.messages import SystemMessage, HumanMessage\n",
    "\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages([\n",
    "    SystemMessage(content=\"我是一个贴心的智能助手\"),\n",
    "    HumanMessage(content=\"我的问题是:人工智能英文怎么说？\")\n",
    "])\n",
    "messages = chat_prompt_template.format_messages()\n",
    "print(messages)\n",
    "print(type(messages))"
   ],
   "id": "e0606fd3932ed0e2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[SystemMessage(content='我是一个贴心的智能助手', additional_kwargs={}, response_metadata={}), HumanMessage(content='我的问题是:人工智能英文怎么说？', additional_kwargs={}, response_metadata={})]\n",
      "<class 'list'>\n"
     ]
    }
   ],
   "execution_count": 117
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 结合LLM",
   "id": "4851741819a0810b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T02:57:30.856224Z",
     "start_time": "2025-10-24T02:57:28.367694Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.prompts.chat import ChatPromptTemplate\n",
    "\n",
    "######1、提供提示词#########\n",
    "chat_prompt = ChatPromptTemplate.from_messages([\n",
    "    (\"system\", \"你是一个数学家，你可以计算任何算式\"),\n",
    "    (\"human\", \"我的问题：{question}\"),\n",
    "])\n",
    "# 输入提示\n",
    "messages = chat_prompt.format_messages(\n",
    "    question=\"我今年26岁，我的舅舅今年39岁，我的爷爷今年79岁，我和舅舅一共多少岁了？\")\n",
    "#print(messages)\n",
    "######2、提供大模型#########\n",
    "import os\n",
    "import dotenv\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "dotenv.load_dotenv()\n",
    "os.environ['OPENAI_API_KEY'] = os.getenv(\"DASHSCOPE_API_KEY\")\n",
    "os.environ['OPENAI_BASE_URL'] = os.getenv(\"DASHSCOPE_BASE_URL\")\n",
    "chat_model = ChatOpenAI(model=\"qwen-max\")\n",
    "######3、结合提示词，调用大模型#########\n",
    "# 得到模型的输出\n",
    "output = chat_model.invoke(messages)\n",
    "# 打印输出内容\n",
    "print(output.content)\n",
    "print(type(output)) #AIMessage"
   ],
   "id": "246f83f2dcef8509",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "你今年26岁，你的舅舅今年39岁。你们两人的年龄加起来是：\n",
      "\n",
      "\\[26 + 39 = 65\\]\n",
      "\n",
      "所以，你和你的舅舅一共65岁了。\n",
      "<class 'langchain_core.messages.ai.AIMessage'>\n"
     ]
    }
   ],
   "execution_count": 119
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 4.6 插入消息列表：MessagesPlaceholder\n",
    "不确定消息提示模板使用什么角色，或者希望在格式化过程插入消息列表\n",
    "使用场景：多轮对话系统存储历史消息以及Agent的中间步骤处理"
   ],
   "id": "7ae5be88876529ce"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:07:04.390548Z",
     "start_time": "2025-10-24T03:07:04.368930Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 举例1\n",
    "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
    "from langchain_core.messages import HumanMessage\n",
    "prompt_template = ChatPromptTemplate.from_messages([\n",
    "(\"system\", \"You are a helpful assistant\"),\n",
    "MessagesPlaceholder(\"msgs\")\n",
    "])\n",
    "# prompt_template.invoke({\"msgs\": [HumanMessage(content=\"hi!\")]})\n",
    "prompt_messages1=prompt_template.format_messages(msgs=[HumanMessage(content=\"hi!\")])\n",
    "print(prompt_messages1)\n",
    "prompt_messages2=prompt_template.format_messages(msgs=[SystemMessage(content=\"hi，sys!\")])\n",
    "print(prompt_messages2)"
   ],
   "id": "4130f8a286aa310b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[SystemMessage(content='You are a helpful assistant', additional_kwargs={}, response_metadata={}), HumanMessage(content='hi!', additional_kwargs={}, response_metadata={})]\n",
      "[SystemMessage(content='You are a helpful assistant', additional_kwargs={}, response_metadata={}), SystemMessage(content='hi，sys!', additional_kwargs={}, response_metadata={})]\n"
     ]
    }
   ],
   "execution_count": 123
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:10:09.955208Z",
     "start_time": "2025-10-24T03:10:09.933088Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 举例2：存储历史对话 内容\n",
    "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
    "from langchain_core.messages import AIMessage\n",
    "\n",
    "prompt = ChatPromptTemplate.from_messages(\n",
    "    [\n",
    "        (\"system\", \"You are a helpful assistant.\"),\n",
    "        MessagesPlaceholder(\"history\"),\n",
    "        (\"human\", \"{question}\")\n",
    "    ]\n",
    ")\n",
    "prompt.format_messages(\n",
    "    history=[HumanMessage(content=\"1+2*3 = ?\"), AIMessage(content=\"1+2*3=7\")],\n",
    "    question=\"我刚才问题是什么？\")"
   ],
   "id": "f4f91792c616f58c",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[SystemMessage(content='You are a helpful assistant.', additional_kwargs={}, response_metadata={}),\n",
       " HumanMessage(content='1+2*3 = ?', additional_kwargs={}, response_metadata={}),\n",
       " AIMessage(content='1+2*3=7', additional_kwargs={}, response_metadata={}),\n",
       " HumanMessage(content='我刚才问题是什么？', additional_kwargs={}, response_metadata={})]"
      ]
     },
     "execution_count": 124,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 124
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:12:27.521752Z",
     "start_time": "2025-10-24T03:12:27.508235Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 运行时填充中间步骤的结果\n",
    "# 2.定义消息模板\n",
    "prompt = ChatPromptTemplate.from_messages([\n",
    "    SystemMessagePromptTemplate.from_template(\"你是{role}\"),\n",
    "    MessagesPlaceholder(variable_name=\"intermediate_steps\"),\n",
    "    HumanMessagePromptTemplate.from_template(\"{query}\")\n",
    "])\n",
    "# 3.定义消息对象（运行时填充中间步骤的结果）\n",
    "intermediate = [\n",
    "    SystemMessage(name=\"search\", content=\"北京: 晴, 25℃\")\n",
    "]\n",
    "# 4.格式化聊天消息提示词模版\n",
    "prompt.format_messages(\n",
    "    role=\"天气预报员\",\n",
    "    intermediate_steps=intermediate,\n",
    "    query=\"北京天气怎么样？\"\n",
    ")"
   ],
   "id": "fcf1761452cd2012",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[SystemMessage(content='你是天气预报员', additional_kwargs={}, response_metadata={}),\n",
       " SystemMessage(content='北京: 晴, 25℃', additional_kwargs={}, response_metadata={}, name='search'),\n",
       " HumanMessage(content='北京天气怎么样？', additional_kwargs={}, response_metadata={})]"
      ]
     },
     "execution_count": 125,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 125
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 4.5少量样本示例的提示词模板\n",
   "id": "90a8b24f537d3646"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:15:54.440002Z",
     "start_time": "2025-10-24T03:15:51.457672Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 少量样本示例的提示词模板\n",
    "res = chat_model.invoke(\"2 🦜 9是多少?\")\n",
    "print(res.content)\n",
    "#没有少量样本数据做参考时，lm无法预测出结果，需要提供少量样本数据作为参考"
   ],
   "id": "d875606afc0ba961",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "看起来您可能是想表达一个数学问题，但格式有些不清楚。如果您是想问“2加9等于多少”，那么答案是11。如果您的意思是其他运算（比如乘法、减法等）或者有其他含义，请提供更多的上下文信息，这样我才能更准确地帮助您解答。\n"
     ]
    }
   ],
   "execution_count": 126
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:19:28.620974Z",
     "start_time": "2025-10-24T03:19:28.595387Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 4.5.2 FewShotPromptTemplate的使用\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.prompts.few_shot import FewShotPromptTemplate\n",
    "\n",
    "#1、创建示例集合\n",
    "examples = [\n",
    "    {\"input\": \"北京天气怎么样\", \"output\": \"北京市\"},\n",
    "    {\"input\": \"南京下雨吗\", \"output\": \"南京市\"},\n",
    "    {\"input\": \"武汉热吗\", \"output\": \"武汉市\"}\n",
    "]\n",
    "#2、创建PromptTemplate实例\n",
    "example_prompt = PromptTemplate.from_template(\n",
    "    template=\"Input: {input}\\nOutput: {output}\"\n",
    ")\n",
    "#3、创建FewShotPromptTemplate实例\n",
    "prompt = FewShotPromptTemplate(\n",
    "    examples=examples,\n",
    "    example_prompt=example_prompt,\n",
    "    suffix=\"Input: {input}\\nOutput:\",  # 要放在示例后面的提示模板字符串。\n",
    "    input_variables=[\"input\"]  # 传入的变量\n",
    ")\n",
    "#4、调用\n",
    "prompt = prompt.invoke({\"input\": \"长沙多少度\"})\n",
    "print(\"===Prompt===\")\n",
    "print(prompt)"
   ],
   "id": "5007ce8ce527b464",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===Prompt===\n",
      "text='Input: 北京天气怎么样\\nOutput: 北京市\\n\\nInput: 南京下雨吗\\nOutput: 南京市\\n\\nInput: 武汉热吗\\nOutput: 武汉市\\n\\nInput: 长沙多少度\\nOutput:'\n"
     ]
    }
   ],
   "execution_count": 127
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:21:05.773157Z",
     "start_time": "2025-10-24T03:20:59.070923Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 结合大模型调用：\n",
    "res = chat_model.invoke(prompt)\n",
    "print(\"===Prompt===\")\n",
    "print(res.content)"
   ],
   "id": "36874d94f5df7af5",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===Prompt===\n",
      "长沙市\n",
      "\n",
      "看起来您是在询问各个城市的天气情况。不过，我当前无法直接获取实时天气数据。为了得到准确的天气信息，建议您可以查看最新的天气预报或使用专门的天气查询网站及应用程序。如果您告诉我具体想了解哪个城市的哪方面天气信息（如温度、是否下雨等），我可以提供更具体的帮助和建议。对于“长沙多少度”的问题，正确的回答应该是：“请查阅最新天气预报以获得长沙市当前的具体气温。”\n"
     ]
    }
   ],
   "execution_count": 128
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:27:14.978509Z",
     "start_time": "2025-10-24T03:27:14.958648Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 5.3 FewShotChatMessagePromptTemplate的使用\n",
    "# 专门为 聊天对话场景设计的少样本（few-shot）提示模板，它继承自 FewShotPromptTemplate，针对聊天消息的格式进行了优化\n",
    "from langchain.prompts import (\n",
    "    FewShotChatMessagePromptTemplate,\n",
    "    ChatPromptTemplate\n",
    ")\n",
    "# 1.示例消息格式\n",
    "examples = [\n",
    "    {\"input\": \"1+1等于几？\", \"output\": \"1+1等于2\"},\n",
    "    {\"input\": \"法国的首都是？\", \"output\": \"巴黎\"}\n",
    "]\n",
    "# 2.定义示例的消息格式提示词模版\n",
    "msg_example_prompt = ChatPromptTemplate.from_messages([\n",
    "    (\"human\", \"{input}\"),\n",
    "    (\"ai\", \"{output}\"),\n",
    "])\n",
    "# 3.定义FewShotChatMessagePromptTemplate对象\n",
    "few_shot_prompt = FewShotChatMessagePromptTemplate(\n",
    "    example_prompt=msg_example_prompt,\n",
    "    examples=examples\n",
    ")\n",
    "# 4.输出格式化后的消息\n",
    "print(few_shot_prompt.format())"
   ],
   "id": "64e4de15183e5562",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Human: 1+1等于几？\n",
      "AI: 1+1等于2\n",
      "Human: 法国的首都是？\n",
      "AI: 巴黎\n"
     ]
    }
   ],
   "execution_count": 129
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-24T03:35:00.981488Z",
     "start_time": "2025-10-24T03:34:54.948871Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 5.4 FewShotChatMessageListPromptTemplate结合llm的使用\n",
    "# 1.导入相关包\n",
    "from langchain_core.prompts import (FewShotChatMessagePromptTemplate,\n",
    "                                    ChatPromptTemplate)\n",
    "\n",
    "# 2.定义示例组\n",
    "examples = [\n",
    "    {\"input\": \"2🦜2\", \"output\": \"4\"},\n",
    "    {\"input\": \"2🦜3\", \"output\": \"8\"},\n",
    "]\n",
    "# 3.定义示例的消息格式提示词模版\n",
    "example_prompt = ChatPromptTemplate.from_messages([\n",
    "    ('human', '{input}等于多少?'),\n",
    "    ('ai', '{output}')\n",
    "])\n",
    "# 4.定义FewShotChatMessagePromptTemplate对象\n",
    "few_shot_prompt = FewShotChatMessagePromptTemplate(\n",
    "    examples=examples,  # 示例组\n",
    "    example_prompt=example_prompt,  # 示例提示词词模版\n",
    ")\n",
    "# 5.输出完整提示词的消息模版\n",
    "final_prompt = ChatPromptTemplate.from_messages(\n",
    "    [\n",
    "        ('system', '你是一个数学奇才'),\n",
    "        few_shot_prompt,\n",
    "        ('human', '{input}'),\n",
    "    ]\n",
    ")\n",
    "#6.提供大模型\n",
    "chat_model = ChatOpenAI(model=\"qwen-max\",\n",
    "temperature=0.4)\n",
    "chat_model.invoke(final_prompt.invoke(input=\"2🦜4\")).content\n",
    "# 结果为16"
   ],
   "id": "74f66da16f305030",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'看起来您可能在使用一种特殊的符号“🦜”来表示某种运算。根据之前的例子，如果2🦜2=4 和 2🦜3=8，那么这个符号似乎代表了乘方操作。因此，2🦜4 就是 \\\\(2^4\\\\)，结果为16。如果有其他含义，请提供更多的上下文或规则说明。'"
      ]
     },
     "execution_count": 137,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 137
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 5.4 使用Example selectors(示例选择器)样本选择器\n",
    "从大量候选示例中选取最相关的示例子集。\n",
    "\n",
    "使用的好处：避免盲目传递所有示例，减少 token 消耗的同时，还可以提升输出效果。\n",
    "\n",
    "示例选择策略：语义相似选择、长度选择、最大边际相关示例选择等"
   ],
   "id": "5c208d4f6adb7275"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-25T05:53:06.057323Z",
     "start_time": "2025-10-25T05:53:05.771668Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关包\n",
    "from langchain_community.vectorstores import Chroma\n",
    "from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
    "import os\n",
    "import dotenv\n",
    "from langchain_openai import OpenAIEmbeddings\n",
    "\n",
    "dotenv.load_dotenv()\n",
    "# 2.定义嵌入模型\n",
    "os.environ['OPENAI_API_KEY'] = os.getenv(\"DASHSCOPE_API_KEY\")\n",
    "os.environ['OPENAI_BASE_URL'] = os.getenv(\"DASHSCOPE_BASE_URL\")\n",
    "embeddings_model = OpenAIEmbeddings(\n",
    "    model=\"text-embedding-ada-002\"\n",
    ")\n",
    "# 3.定义示例组\n",
    "examples = [\n",
    "    {\n",
    "        \"question\": \"谁活得更久，穆罕默德·阿里还是艾伦·图灵?\",\n",
    "        \"answer\": \"\"\"\n",
    "接下来还需要问什么问题吗？\n",
    "追问：穆罕默德·阿里去世时多大年纪？\n",
    "中间答案：穆罕默德·阿里去世时享年74岁。\n",
    "\"\"\",\n",
    "    },\n",
    "    {\n",
    "        \"question\": \"craigslist的创始人是什么时候出生的？\",\n",
    "        \"answer\": \"\"\"\n",
    "接下来还需要问什么问题吗？\n",
    "追问：谁是craigslist的创始人？\n",
    "中级答案：Craigslist是由克雷格·纽马克创立的。\n",
    "\"\"\",\n",
    "    },\n",
    "    {\n",
    "        \"question\": \"谁是乔治·华盛顿的外祖父？\",\n",
    "        \"answer\": \"\"\"\n",
    "接下来还需要问什么问题吗？\n",
    "追问：谁是乔治·华盛顿的母亲？\n",
    "中间答案：乔治·华盛顿的母亲是玛丽·鲍尔·华盛顿。\n",
    "\"\"\",\n",
    "    },\n",
    "    {\n",
    "        \"question\": \"《大白鲨》和《皇家赌场》的导演都来自同一个国家吗？\",\n",
    "        \"answer\": \"\"\"\n",
    "接下来还需要问什么问题吗？\n",
    "追问：《大白鲨》的导演是谁？\n",
    "中级答案：《大白鲨》的导演是史蒂文·斯皮尔伯格。\n",
    "\"\"\",\n",
    "    },\n",
    "]\n",
    "# 4.定义示例选择器\n",
    "example_selector = SemanticSimilarityExampleSelector.from_examples(\n",
    "    # 这是可供选择的示例列表\n",
    "    examples,\n",
    "    # 这是用于生成嵌入的嵌入类，用于衡量语义相似性\n",
    "    embeddings_model,\n",
    "    # 这是用于存储嵌入并进行相似性搜索的 VectorStore 类\n",
    "    Chroma,\n",
    "    # 这是要生成的示例数量\n",
    "    k=1,\n",
    ")\n",
    "# 选择与输入最相似的示例\n",
    "question = \"玛丽·鲍尔·华盛顿的父亲是谁?\"\n",
    "selected_examples = example_selector.select_examples({\"question\": question})\n",
    "print(f\"与输入最相似的示例：{selected_examples}\")\n",
    "# for example in selected_examples:\n",
    "# print(\"\\n\")\n",
    "# for k, v in example.items():\n",
    "# print(f\"{k}: {v}\")"
   ],
   "id": "145e7b831c11f849",
   "outputs": [
    {
     "ename": "NotFoundError",
     "evalue": "Error code: 404 - {'error': {'message': 'The model `text-embedding-ada-002` does not exist or you do not have access to it.', 'type': 'invalid_request_error', 'param': None, 'code': 'model_not_found'}, 'request_id': '84a144a2-7362-41dc-a08a-16c8ac927ff9'}",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNotFoundError\u001B[0m                             Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[175], line 51\u001B[0m\n\u001B[0;32m     16\u001B[0m examples \u001B[38;5;241m=\u001B[39m [\n\u001B[0;32m     17\u001B[0m     {\n\u001B[0;32m     18\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mquestion\u001B[39m\u001B[38;5;124m\"\u001B[39m: \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m谁活得更久，穆罕默德·阿里还是艾伦·图灵?\u001B[39m\u001B[38;5;124m\"\u001B[39m,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m     48\u001B[0m     },\n\u001B[0;32m     49\u001B[0m ]\n\u001B[0;32m     50\u001B[0m \u001B[38;5;66;03m# 4.定义示例选择器\u001B[39;00m\n\u001B[1;32m---> 51\u001B[0m example_selector \u001B[38;5;241m=\u001B[39m \u001B[43mSemanticSimilarityExampleSelector\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfrom_examples\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m     52\u001B[0m \u001B[43m    \u001B[49m\u001B[38;5;66;43;03m# 这是可供选择的示例列表\u001B[39;49;00m\n\u001B[0;32m     53\u001B[0m \u001B[43m    \u001B[49m\u001B[43mexamples\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m     54\u001B[0m \u001B[43m    \u001B[49m\u001B[38;5;66;43;03m# 这是用于生成嵌入的嵌入类，用于衡量语义相似性\u001B[39;49;00m\n\u001B[0;32m     55\u001B[0m \u001B[43m    \u001B[49m\u001B[43membeddings_model\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m     56\u001B[0m \u001B[43m    \u001B[49m\u001B[38;5;66;43;03m# 这是用于存储嵌入并进行相似性搜索的 VectorStore 类\u001B[39;49;00m\n\u001B[0;32m     57\u001B[0m \u001B[43m    \u001B[49m\u001B[43mChroma\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m     58\u001B[0m \u001B[43m    \u001B[49m\u001B[38;5;66;43;03m# 这是要生成的示例数量\u001B[39;49;00m\n\u001B[0;32m     59\u001B[0m \u001B[43m    \u001B[49m\u001B[43mk\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;241;43m1\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[0;32m     60\u001B[0m \u001B[43m)\u001B[49m\n\u001B[0;32m     61\u001B[0m \u001B[38;5;66;03m# 选择与输入最相似的示例\u001B[39;00m\n\u001B[0;32m     62\u001B[0m question \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m玛丽·鲍尔·华盛顿的父亲是谁?\u001B[39m\u001B[38;5;124m\"\u001B[39m\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_core\\example_selectors\\semantic_similarity.py:171\u001B[0m, in \u001B[0;36mSemanticSimilarityExampleSelector.from_examples\u001B[1;34m(cls, examples, embeddings, vectorstore_cls, k, input_keys, example_keys, vectorstore_kwargs, **vectorstore_cls_kwargs)\u001B[0m\n\u001B[0;32m    151\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Create k-shot example selector using example list and embeddings.\u001B[39;00m\n\u001B[0;32m    152\u001B[0m \n\u001B[0;32m    153\u001B[0m \u001B[38;5;124;03mReshuffles examples dynamically based on query similarity.\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    168\u001B[0m \u001B[38;5;124;03m    The ExampleSelector instantiated, backed by a vector store.\u001B[39;00m\n\u001B[0;32m    169\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m    170\u001B[0m string_examples \u001B[38;5;241m=\u001B[39m [\u001B[38;5;28mcls\u001B[39m\u001B[38;5;241m.\u001B[39m_example_to_text(eg, input_keys) \u001B[38;5;28;01mfor\u001B[39;00m eg \u001B[38;5;129;01min\u001B[39;00m examples]\n\u001B[1;32m--> 171\u001B[0m vectorstore \u001B[38;5;241m=\u001B[39m vectorstore_cls\u001B[38;5;241m.\u001B[39mfrom_texts(\n\u001B[0;32m    172\u001B[0m     string_examples, embeddings, metadatas\u001B[38;5;241m=\u001B[39mexamples, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mvectorstore_cls_kwargs\n\u001B[0;32m    173\u001B[0m )\n\u001B[0;32m    174\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mcls\u001B[39m(\n\u001B[0;32m    175\u001B[0m     vectorstore\u001B[38;5;241m=\u001B[39mvectorstore,\n\u001B[0;32m    176\u001B[0m     k\u001B[38;5;241m=\u001B[39mk,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    179\u001B[0m     vectorstore_kwargs\u001B[38;5;241m=\u001B[39mvectorstore_kwargs,\n\u001B[0;32m    180\u001B[0m )\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_community\\vectorstores\\chroma.py:843\u001B[0m, in \u001B[0;36mChroma.from_texts\u001B[1;34m(cls, texts, embedding, metadatas, ids, collection_name, persist_directory, client_settings, client, collection_metadata, **kwargs)\u001B[0m\n\u001B[0;32m    835\u001B[0m     \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01mchromadb\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mutils\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mbatch_utils\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;28;01mimport\u001B[39;00m create_batches\n\u001B[0;32m    837\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m batch \u001B[38;5;129;01min\u001B[39;00m create_batches(\n\u001B[0;32m    838\u001B[0m         api\u001B[38;5;241m=\u001B[39mchroma_collection\u001B[38;5;241m.\u001B[39m_client,\n\u001B[0;32m    839\u001B[0m         ids\u001B[38;5;241m=\u001B[39mids,\n\u001B[0;32m    840\u001B[0m         metadatas\u001B[38;5;241m=\u001B[39mmetadatas,\n\u001B[0;32m    841\u001B[0m         documents\u001B[38;5;241m=\u001B[39mtexts,\n\u001B[0;32m    842\u001B[0m     ):\n\u001B[1;32m--> 843\u001B[0m         \u001B[43mchroma_collection\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43madd_texts\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    844\u001B[0m \u001B[43m            \u001B[49m\u001B[43mtexts\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbatch\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;241;43m3\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mif\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mbatch\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;241;43m3\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01melse\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43m[\u001B[49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    845\u001B[0m \u001B[43m            \u001B[49m\u001B[43mmetadatas\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbatch\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;241;43m2\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mif\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mbatch\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;241;43m2\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01melse\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[0;32m    846\u001B[0m \u001B[43m            \u001B[49m\u001B[43mids\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbatch\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;241;43m0\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    847\u001B[0m \u001B[43m        \u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    848\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m    849\u001B[0m     chroma_collection\u001B[38;5;241m.\u001B[39madd_texts(texts\u001B[38;5;241m=\u001B[39mtexts, metadatas\u001B[38;5;241m=\u001B[39mmetadatas, ids\u001B[38;5;241m=\u001B[39mids)\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_community\\vectorstores\\chroma.py:277\u001B[0m, in \u001B[0;36mChroma.add_texts\u001B[1;34m(self, texts, metadatas, ids, **kwargs)\u001B[0m\n\u001B[0;32m    275\u001B[0m texts \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mlist\u001B[39m(texts)\n\u001B[0;32m    276\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_embedding_function \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[1;32m--> 277\u001B[0m     embeddings \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_embedding_function\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43membed_documents\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtexts\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    278\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m metadatas:\n\u001B[0;32m    279\u001B[0m     \u001B[38;5;66;03m# fill metadatas with empty dicts if somebody\u001B[39;00m\n\u001B[0;32m    280\u001B[0m     \u001B[38;5;66;03m# did not specify metadata for all texts\u001B[39;00m\n\u001B[0;32m    281\u001B[0m     length_diff \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mlen\u001B[39m(texts) \u001B[38;5;241m-\u001B[39m \u001B[38;5;28mlen\u001B[39m(metadatas)\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_openai\\embeddings\\base.py:590\u001B[0m, in \u001B[0;36mOpenAIEmbeddings.embed_documents\u001B[1;34m(self, texts, chunk_size, **kwargs)\u001B[0m\n\u001B[0;32m    587\u001B[0m \u001B[38;5;66;03m# NOTE: to keep things simple, we assume the list may contain texts longer\u001B[39;00m\n\u001B[0;32m    588\u001B[0m \u001B[38;5;66;03m#       than the maximum context and use length-safe embedding function.\u001B[39;00m\n\u001B[0;32m    589\u001B[0m engine \u001B[38;5;241m=\u001B[39m cast(\u001B[38;5;28mstr\u001B[39m, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdeployment)\n\u001B[1;32m--> 590\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_get_len_safe_embeddings(\n\u001B[0;32m    591\u001B[0m     texts, engine\u001B[38;5;241m=\u001B[39mengine, chunk_size\u001B[38;5;241m=\u001B[39mchunk_size, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs\n\u001B[0;32m    592\u001B[0m )\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_openai\\embeddings\\base.py:478\u001B[0m, in \u001B[0;36mOpenAIEmbeddings._get_len_safe_embeddings\u001B[1;34m(self, texts, engine, chunk_size, **kwargs)\u001B[0m\n\u001B[0;32m    476\u001B[0m batched_embeddings: \u001B[38;5;28mlist\u001B[39m[\u001B[38;5;28mlist\u001B[39m[\u001B[38;5;28mfloat\u001B[39m]] \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m    477\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m i \u001B[38;5;129;01min\u001B[39;00m _iter:\n\u001B[1;32m--> 478\u001B[0m     response \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mclient\u001B[38;5;241m.\u001B[39mcreate(\n\u001B[0;32m    479\u001B[0m         \u001B[38;5;28minput\u001B[39m\u001B[38;5;241m=\u001B[39mtokens[i : i \u001B[38;5;241m+\u001B[39m _chunk_size], \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mclient_kwargs\n\u001B[0;32m    480\u001B[0m     )\n\u001B[0;32m    481\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(response, \u001B[38;5;28mdict\u001B[39m):\n\u001B[0;32m    482\u001B[0m         response \u001B[38;5;241m=\u001B[39m response\u001B[38;5;241m.\u001B[39mmodel_dump()\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\openai\\resources\\embeddings.py:129\u001B[0m, in \u001B[0;36mEmbeddings.create\u001B[1;34m(self, input, model, dimensions, encoding_format, user, extra_headers, extra_query, extra_body, timeout)\u001B[0m\n\u001B[0;32m    123\u001B[0m             embedding\u001B[38;5;241m.\u001B[39membedding \u001B[38;5;241m=\u001B[39m np\u001B[38;5;241m.\u001B[39mfrombuffer(  \u001B[38;5;66;03m# type: ignore[no-untyped-call]\u001B[39;00m\n\u001B[0;32m    124\u001B[0m                 base64\u001B[38;5;241m.\u001B[39mb64decode(data), dtype\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mfloat32\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    125\u001B[0m             )\u001B[38;5;241m.\u001B[39mtolist()\n\u001B[0;32m    127\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m obj\n\u001B[1;32m--> 129\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_post\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    130\u001B[0m \u001B[43m    \u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43m/embeddings\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[0;32m    131\u001B[0m \u001B[43m    \u001B[49m\u001B[43mbody\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmaybe_transform\u001B[49m\u001B[43m(\u001B[49m\u001B[43mparams\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43membedding_create_params\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mEmbeddingCreateParams\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    132\u001B[0m \u001B[43m    \u001B[49m\u001B[43moptions\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmake_request_options\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    133\u001B[0m \u001B[43m        \u001B[49m\u001B[43mextra_headers\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mextra_headers\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    134\u001B[0m \u001B[43m        \u001B[49m\u001B[43mextra_query\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mextra_query\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    135\u001B[0m \u001B[43m        \u001B[49m\u001B[43mextra_body\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mextra_body\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    136\u001B[0m \u001B[43m        \u001B[49m\u001B[43mtimeout\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mtimeout\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    137\u001B[0m \u001B[43m        \u001B[49m\u001B[43mpost_parser\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mparser\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    138\u001B[0m \u001B[43m    \u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    139\u001B[0m \u001B[43m    \u001B[49m\u001B[43mcast_to\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mCreateEmbeddingResponse\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    140\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\openai\\_base_client.py:1239\u001B[0m, in \u001B[0;36mSyncAPIClient.post\u001B[1;34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001B[0m\n\u001B[0;32m   1225\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21mpost\u001B[39m(\n\u001B[0;32m   1226\u001B[0m     \u001B[38;5;28mself\u001B[39m,\n\u001B[0;32m   1227\u001B[0m     path: \u001B[38;5;28mstr\u001B[39m,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   1234\u001B[0m     stream_cls: \u001B[38;5;28mtype\u001B[39m[_StreamT] \u001B[38;5;241m|\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[0;32m   1235\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m ResponseT \u001B[38;5;241m|\u001B[39m _StreamT:\n\u001B[0;32m   1236\u001B[0m     opts \u001B[38;5;241m=\u001B[39m FinalRequestOptions\u001B[38;5;241m.\u001B[39mconstruct(\n\u001B[0;32m   1237\u001B[0m         method\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mpost\u001B[39m\u001B[38;5;124m\"\u001B[39m, url\u001B[38;5;241m=\u001B[39mpath, json_data\u001B[38;5;241m=\u001B[39mbody, files\u001B[38;5;241m=\u001B[39mto_httpx_files(files), \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39moptions\n\u001B[0;32m   1238\u001B[0m     )\n\u001B[1;32m-> 1239\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m cast(ResponseT, \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[43mcast_to\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mopts\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mstream\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mstream\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mstream_cls\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mstream_cls\u001B[49m\u001B[43m)\u001B[49m)\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\openai\\_base_client.py:1034\u001B[0m, in \u001B[0;36mSyncAPIClient.request\u001B[1;34m(self, cast_to, options, stream, stream_cls)\u001B[0m\n\u001B[0;32m   1031\u001B[0m             err\u001B[38;5;241m.\u001B[39mresponse\u001B[38;5;241m.\u001B[39mread()\n\u001B[0;32m   1033\u001B[0m         log\u001B[38;5;241m.\u001B[39mdebug(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mRe-raising status error\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m-> 1034\u001B[0m         \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_make_status_error_from_response(err\u001B[38;5;241m.\u001B[39mresponse) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m   1036\u001B[0m     \u001B[38;5;28;01mbreak\u001B[39;00m\n\u001B[0;32m   1038\u001B[0m \u001B[38;5;28;01massert\u001B[39;00m response \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m, \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcould not resolve response (should never happen)\u001B[39m\u001B[38;5;124m\"\u001B[39m\n",
      "\u001B[1;31mNotFoundError\u001B[0m: Error code: 404 - {'error': {'message': 'The model `text-embedding-ada-002` does not exist or you do not have access to it.', 'type': 'invalid_request_error', 'param': None, 'code': 'model_not_found'}, 'request_id': '84a144a2-7362-41dc-a08a-16c8ac927ff9'}"
     ]
    }
   ],
   "execution_count": 175
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-25T05:53:28.575626Z",
     "start_time": "2025-10-25T05:53:28.231190Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 举例2：结合 FewShotPromptTemplate 使用\n",
    "# 1.导入相关包\n",
    "from langchain_community.vectorstores import FAISS\n",
    "from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
    "from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
    "from langchain_openai import OpenAIEmbeddings\n",
    "# 2.定义示例提示词模版\n",
    "example_prompt = PromptTemplate.from_template(\n",
    "    template=\"Input: {input}\\nOutput: {output}\",\n",
    ")\n",
    "# 3.创建一个示例提示词模版\n",
    "examples = [\n",
    "    {\"input\": \"高兴\", \"output\": \"悲伤\"},\n",
    "    {\"input\": \"高\", \"output\": \"矮\"},\n",
    "    {\"input\": \"长\", \"output\": \"短\"},\n",
    "    {\"input\": \"精力充沛\", \"output\": \"无精打采\"},\n",
    "    {\"input\": \"阳光\", \"output\": \"阴暗\"},\n",
    "    {\"input\": \"粗糙\", \"output\": \"光滑\"},\n",
    "    {\"input\": \"干燥\", \"output\": \"潮湿\"},\n",
    "    {\"input\": \"富裕\", \"output\": \"贫穷\"},\n",
    "]\n",
    "# 4.定义嵌入模型\n",
    "# 阿里云常用的嵌入模型包括 text-embedding-v1、text-embedding-v2 等版本\n",
    "embeddings = OpenAIEmbeddings(\n",
    "    model=\"text-embedding-v2\"\n",
    ")\n",
    "# 5.创建语义相似性示例选择器\n",
    "example_selector = SemanticSimilarityExampleSelector.from_examples(\n",
    "    examples,\n",
    "    embeddings,\n",
    "    FAISS,\n",
    "    k=2,\n",
    ")\n",
    "#或者\n",
    "#example_selector = SemanticSimilarityExampleSelector(\n",
    "# examples,\n",
    "# embeddings,\n",
    "# FAISS,\n",
    "# k=2\n",
    "#)\n",
    "# 6.定义小样本提示词模版\n",
    "similar_prompt = FewShotPromptTemplate(\n",
    "    example_selector=example_selector,\n",
    "    example_prompt=example_prompt,\n",
    "    prefix=\"给出每个词组的反义词\",\n",
    "    suffix=\"Input: {word}\\nOutput:\",\n",
    "    input_variables=[\"word\"],\n",
    ")\n",
    "response = similar_prompt.invoke({\"word\": \"忧郁\"})\n",
    "print(response.text)"
   ],
   "id": "f7e0fe4ab6bc575e",
   "outputs": [
    {
     "ename": "BadRequestError",
     "evalue": "Error code: 400 - {'error': {'code': 'InvalidParameter', 'param': None, 'message': '<400> InternalError.Algo.InvalidParameter: Value error, contents is neither str nor list of str.: input.contents', 'type': 'InvalidParameter'}, 'id': 'b53cc2ef-d962-4b21-8980-5413db8d35d2', 'request_id': 'b53cc2ef-d962-4b21-8980-5413db8d35d2'}",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mBadRequestError\u001B[0m                           Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[178], line 28\u001B[0m\n\u001B[0;32m     24\u001B[0m embeddings \u001B[38;5;241m=\u001B[39m OpenAIEmbeddings(\n\u001B[0;32m     25\u001B[0m     model\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mtext-embedding-v2\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m     26\u001B[0m )\n\u001B[0;32m     27\u001B[0m \u001B[38;5;66;03m# 5.创建语义相似性示例选择器\u001B[39;00m\n\u001B[1;32m---> 28\u001B[0m example_selector \u001B[38;5;241m=\u001B[39m \u001B[43mSemanticSimilarityExampleSelector\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfrom_examples\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m     29\u001B[0m \u001B[43m    \u001B[49m\u001B[43mexamples\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m     30\u001B[0m \u001B[43m    \u001B[49m\u001B[43membeddings\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m     31\u001B[0m \u001B[43m    \u001B[49m\u001B[43mFAISS\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m     32\u001B[0m \u001B[43m    \u001B[49m\u001B[43mk\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;241;43m2\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[0;32m     33\u001B[0m \u001B[43m)\u001B[49m\n\u001B[0;32m     34\u001B[0m \u001B[38;5;66;03m#或者\u001B[39;00m\n\u001B[0;32m     35\u001B[0m \u001B[38;5;66;03m#example_selector = SemanticSimilarityExampleSelector(\u001B[39;00m\n\u001B[0;32m     36\u001B[0m \u001B[38;5;66;03m# examples,\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m     40\u001B[0m \u001B[38;5;66;03m#)\u001B[39;00m\n\u001B[0;32m     41\u001B[0m \u001B[38;5;66;03m# 6.定义小样本提示词模版\u001B[39;00m\n\u001B[0;32m     42\u001B[0m similar_prompt \u001B[38;5;241m=\u001B[39m FewShotPromptTemplate(\n\u001B[0;32m     43\u001B[0m     example_selector\u001B[38;5;241m=\u001B[39mexample_selector,\n\u001B[0;32m     44\u001B[0m     example_prompt\u001B[38;5;241m=\u001B[39mexample_prompt,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m     47\u001B[0m     input_variables\u001B[38;5;241m=\u001B[39m[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mword\u001B[39m\u001B[38;5;124m\"\u001B[39m],\n\u001B[0;32m     48\u001B[0m )\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_core\\example_selectors\\semantic_similarity.py:171\u001B[0m, in \u001B[0;36mSemanticSimilarityExampleSelector.from_examples\u001B[1;34m(cls, examples, embeddings, vectorstore_cls, k, input_keys, example_keys, vectorstore_kwargs, **vectorstore_cls_kwargs)\u001B[0m\n\u001B[0;32m    151\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Create k-shot example selector using example list and embeddings.\u001B[39;00m\n\u001B[0;32m    152\u001B[0m \n\u001B[0;32m    153\u001B[0m \u001B[38;5;124;03mReshuffles examples dynamically based on query similarity.\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    168\u001B[0m \u001B[38;5;124;03m    The ExampleSelector instantiated, backed by a vector store.\u001B[39;00m\n\u001B[0;32m    169\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m    170\u001B[0m string_examples \u001B[38;5;241m=\u001B[39m [\u001B[38;5;28mcls\u001B[39m\u001B[38;5;241m.\u001B[39m_example_to_text(eg, input_keys) \u001B[38;5;28;01mfor\u001B[39;00m eg \u001B[38;5;129;01min\u001B[39;00m examples]\n\u001B[1;32m--> 171\u001B[0m vectorstore \u001B[38;5;241m=\u001B[39m vectorstore_cls\u001B[38;5;241m.\u001B[39mfrom_texts(\n\u001B[0;32m    172\u001B[0m     string_examples, embeddings, metadatas\u001B[38;5;241m=\u001B[39mexamples, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mvectorstore_cls_kwargs\n\u001B[0;32m    173\u001B[0m )\n\u001B[0;32m    174\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mcls\u001B[39m(\n\u001B[0;32m    175\u001B[0m     vectorstore\u001B[38;5;241m=\u001B[39mvectorstore,\n\u001B[0;32m    176\u001B[0m     k\u001B[38;5;241m=\u001B[39mk,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    179\u001B[0m     vectorstore_kwargs\u001B[38;5;241m=\u001B[39mvectorstore_kwargs,\n\u001B[0;32m    180\u001B[0m )\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_community\\vectorstores\\faiss.py:1043\u001B[0m, in \u001B[0;36mFAISS.from_texts\u001B[1;34m(cls, texts, embedding, metadatas, ids, **kwargs)\u001B[0m\n\u001B[0;32m   1016\u001B[0m \u001B[38;5;129m@classmethod\u001B[39m\n\u001B[0;32m   1017\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21mfrom_texts\u001B[39m(\n\u001B[0;32m   1018\u001B[0m     \u001B[38;5;28mcls\u001B[39m,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   1023\u001B[0m     \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs: Any,\n\u001B[0;32m   1024\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m FAISS:\n\u001B[0;32m   1025\u001B[0m \u001B[38;5;250m    \u001B[39m\u001B[38;5;124;03m\"\"\"Construct FAISS wrapper from raw documents.\u001B[39;00m\n\u001B[0;32m   1026\u001B[0m \n\u001B[0;32m   1027\u001B[0m \u001B[38;5;124;03m    This is a user friendly interface that:\u001B[39;00m\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   1041\u001B[0m \u001B[38;5;124;03m            faiss = FAISS.from_texts(texts, embeddings)\u001B[39;00m\n\u001B[0;32m   1042\u001B[0m \u001B[38;5;124;03m    \"\"\"\u001B[39;00m\n\u001B[1;32m-> 1043\u001B[0m     embeddings \u001B[38;5;241m=\u001B[39m \u001B[43membedding\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43membed_documents\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtexts\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1044\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mcls\u001B[39m\u001B[38;5;241m.\u001B[39m__from(\n\u001B[0;32m   1045\u001B[0m         texts,\n\u001B[0;32m   1046\u001B[0m         embeddings,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   1050\u001B[0m         \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs,\n\u001B[0;32m   1051\u001B[0m     )\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_openai\\embeddings\\base.py:590\u001B[0m, in \u001B[0;36mOpenAIEmbeddings.embed_documents\u001B[1;34m(self, texts, chunk_size, **kwargs)\u001B[0m\n\u001B[0;32m    587\u001B[0m \u001B[38;5;66;03m# NOTE: to keep things simple, we assume the list may contain texts longer\u001B[39;00m\n\u001B[0;32m    588\u001B[0m \u001B[38;5;66;03m#       than the maximum context and use length-safe embedding function.\u001B[39;00m\n\u001B[0;32m    589\u001B[0m engine \u001B[38;5;241m=\u001B[39m cast(\u001B[38;5;28mstr\u001B[39m, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdeployment)\n\u001B[1;32m--> 590\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_get_len_safe_embeddings(\n\u001B[0;32m    591\u001B[0m     texts, engine\u001B[38;5;241m=\u001B[39mengine, chunk_size\u001B[38;5;241m=\u001B[39mchunk_size, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs\n\u001B[0;32m    592\u001B[0m )\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\langchain_openai\\embeddings\\base.py:478\u001B[0m, in \u001B[0;36mOpenAIEmbeddings._get_len_safe_embeddings\u001B[1;34m(self, texts, engine, chunk_size, **kwargs)\u001B[0m\n\u001B[0;32m    476\u001B[0m batched_embeddings: \u001B[38;5;28mlist\u001B[39m[\u001B[38;5;28mlist\u001B[39m[\u001B[38;5;28mfloat\u001B[39m]] \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m    477\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m i \u001B[38;5;129;01min\u001B[39;00m _iter:\n\u001B[1;32m--> 478\u001B[0m     response \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mclient\u001B[38;5;241m.\u001B[39mcreate(\n\u001B[0;32m    479\u001B[0m         \u001B[38;5;28minput\u001B[39m\u001B[38;5;241m=\u001B[39mtokens[i : i \u001B[38;5;241m+\u001B[39m _chunk_size], \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mclient_kwargs\n\u001B[0;32m    480\u001B[0m     )\n\u001B[0;32m    481\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(response, \u001B[38;5;28mdict\u001B[39m):\n\u001B[0;32m    482\u001B[0m         response \u001B[38;5;241m=\u001B[39m response\u001B[38;5;241m.\u001B[39mmodel_dump()\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\openai\\resources\\embeddings.py:129\u001B[0m, in \u001B[0;36mEmbeddings.create\u001B[1;34m(self, input, model, dimensions, encoding_format, user, extra_headers, extra_query, extra_body, timeout)\u001B[0m\n\u001B[0;32m    123\u001B[0m             embedding\u001B[38;5;241m.\u001B[39membedding \u001B[38;5;241m=\u001B[39m np\u001B[38;5;241m.\u001B[39mfrombuffer(  \u001B[38;5;66;03m# type: ignore[no-untyped-call]\u001B[39;00m\n\u001B[0;32m    124\u001B[0m                 base64\u001B[38;5;241m.\u001B[39mb64decode(data), dtype\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mfloat32\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m    125\u001B[0m             )\u001B[38;5;241m.\u001B[39mtolist()\n\u001B[0;32m    127\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m obj\n\u001B[1;32m--> 129\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_post\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    130\u001B[0m \u001B[43m    \u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43m/embeddings\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[0;32m    131\u001B[0m \u001B[43m    \u001B[49m\u001B[43mbody\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmaybe_transform\u001B[49m\u001B[43m(\u001B[49m\u001B[43mparams\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43membedding_create_params\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mEmbeddingCreateParams\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    132\u001B[0m \u001B[43m    \u001B[49m\u001B[43moptions\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmake_request_options\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    133\u001B[0m \u001B[43m        \u001B[49m\u001B[43mextra_headers\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mextra_headers\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    134\u001B[0m \u001B[43m        \u001B[49m\u001B[43mextra_query\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mextra_query\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    135\u001B[0m \u001B[43m        \u001B[49m\u001B[43mextra_body\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mextra_body\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    136\u001B[0m \u001B[43m        \u001B[49m\u001B[43mtimeout\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mtimeout\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    137\u001B[0m \u001B[43m        \u001B[49m\u001B[43mpost_parser\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mparser\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    138\u001B[0m \u001B[43m    \u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    139\u001B[0m \u001B[43m    \u001B[49m\u001B[43mcast_to\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mCreateEmbeddingResponse\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    140\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\openai\\_base_client.py:1239\u001B[0m, in \u001B[0;36mSyncAPIClient.post\u001B[1;34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001B[0m\n\u001B[0;32m   1225\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21mpost\u001B[39m(\n\u001B[0;32m   1226\u001B[0m     \u001B[38;5;28mself\u001B[39m,\n\u001B[0;32m   1227\u001B[0m     path: \u001B[38;5;28mstr\u001B[39m,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   1234\u001B[0m     stream_cls: \u001B[38;5;28mtype\u001B[39m[_StreamT] \u001B[38;5;241m|\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[0;32m   1235\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m ResponseT \u001B[38;5;241m|\u001B[39m _StreamT:\n\u001B[0;32m   1236\u001B[0m     opts \u001B[38;5;241m=\u001B[39m FinalRequestOptions\u001B[38;5;241m.\u001B[39mconstruct(\n\u001B[0;32m   1237\u001B[0m         method\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mpost\u001B[39m\u001B[38;5;124m\"\u001B[39m, url\u001B[38;5;241m=\u001B[39mpath, json_data\u001B[38;5;241m=\u001B[39mbody, files\u001B[38;5;241m=\u001B[39mto_httpx_files(files), \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39moptions\n\u001B[0;32m   1238\u001B[0m     )\n\u001B[1;32m-> 1239\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m cast(ResponseT, \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[43mcast_to\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mopts\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mstream\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mstream\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mstream_cls\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mstream_cls\u001B[49m\u001B[43m)\u001B[49m)\n",
      "File \u001B[1;32mD:\\ProgramData\\miniconda3\\envs\\pyth310\\lib\\site-packages\\openai\\_base_client.py:1034\u001B[0m, in \u001B[0;36mSyncAPIClient.request\u001B[1;34m(self, cast_to, options, stream, stream_cls)\u001B[0m\n\u001B[0;32m   1031\u001B[0m             err\u001B[38;5;241m.\u001B[39mresponse\u001B[38;5;241m.\u001B[39mread()\n\u001B[0;32m   1033\u001B[0m         log\u001B[38;5;241m.\u001B[39mdebug(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mRe-raising status error\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m-> 1034\u001B[0m         \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_make_status_error_from_response(err\u001B[38;5;241m.\u001B[39mresponse) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m   1036\u001B[0m     \u001B[38;5;28;01mbreak\u001B[39;00m\n\u001B[0;32m   1038\u001B[0m \u001B[38;5;28;01massert\u001B[39;00m response \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m, \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcould not resolve response (should never happen)\u001B[39m\u001B[38;5;124m\"\u001B[39m\n",
      "\u001B[1;31mBadRequestError\u001B[0m: Error code: 400 - {'error': {'code': 'InvalidParameter', 'param': None, 'message': '<400> InternalError.Algo.InvalidParameter: Value error, contents is neither str nor list of str.: input.contents', 'type': 'InvalidParameter'}, 'id': 'b53cc2ef-d962-4b21-8980-5413db8d35d2', 'request_id': 'b53cc2ef-d962-4b21-8980-5413db8d35d2'}"
     ]
    }
   ],
   "execution_count": 178
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-25T06:07:58.149717Z",
     "start_time": "2025-10-25T06:07:58.130024Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 4.6 具体使用：PipelinePromptTemplate\n",
    "from langchain_core.prompts.pipeline import PipelinePromptTemplate\n",
    "from langchain_core.prompts.prompt import PromptTemplate\n",
    "\n",
    "# 阶段1：问题分析\n",
    "analysis_template = PromptTemplate.from_template(\"\"\"\n",
    "分析这个问题：{question}\n",
    "关键要素：\n",
    "\"\"\")\n",
    "# 阶段2：知识检索\n",
    "retrieval_template = PromptTemplate.from_template(\"\"\"\n",
    "基于以下要素搜索资料：\n",
    "{analysis_result}\n",
    "搜索关键词：\n",
    "\"\"\")\n",
    "# 阶段3：生成最终回答\n",
    "answer_template = PromptTemplate.from_template(\"\"\"\n",
    "综合以下信息回答问题：\n",
    "{retrieval_result}\n",
    "最终答案：\n",
    "\"\"\")\n",
    "# 构建管道\n",
    "pipeline = PipelinePromptTemplate(\n",
    "    final_prompt=answer_template,\n",
    "    pipeline_prompts=[\n",
    "        (\"analysis_result\", analysis_template),\n",
    "        (\"retrieval_result\", retrieval_template)\n",
    "    ]\n",
    ")\n",
    "print(pipeline.format(question=\"量子计算的优势是什么？\"))"
   ],
   "id": "9139e96281e47c14",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "综合以下信息回答问题：\n",
      "\n",
      "基于以下要素搜索资料：\n",
      "\n",
      "分析这个问题：量子计算的优势是什么？\n",
      "关键要素：\n",
      "\n",
      "搜索关键词：\n",
      "\n",
      "最终答案：\n",
      "\n"
     ]
    }
   ],
   "execution_count": 180
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
