{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "initial_id",
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    ""
   ]
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 1、ChatMessageHistory的使用\n",
    "\n",
    "场景1：记忆存储\n"
   ],
   "id": "373cb5b23914c53d"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T11:51:45.123941Z",
     "start_time": "2025-09-02T11:51:44.386547Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.memory import ChatMessageHistory\n",
    "from langchain_core.prompts import PromptTemplate\n",
    "\n",
    "# 1、ChatMessageHistory的实例化\n",
    "\n",
    "history = ChatMessageHistory()\n",
    "\n",
    "# 2、添加相关的消息进行存储\n",
    "\n",
    "history.add_user_message(\"你好\")\n",
    "\n",
    "history.add_ai_message(\"很高兴认识你\")\n",
    "\n",
    "# 3、打印存储的消息\n",
    "print(history.messages)"
   ],
   "id": "6d93d1e701a48fcc",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[HumanMessage(content='你好', additional_kwargs={}, response_metadata={}), AIMessage(content='很高兴认识你', additional_kwargs={}, response_metadata={})]\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "场景2：对接LLM\n",
    "\n"
   ],
   "id": "bf411bdb0fe1d587"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T03:35:11.028401Z",
     "start_time": "2025-09-09T03:35:10.637220Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1、获取大模型\n",
    "import os\n",
    "import dotenv\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "dotenv.load_dotenv()\n",
    "\n",
    "os.environ['OPENAI_API_KEY'] = os.getenv(\"OPENAI_API_KEY1\")\n",
    "os.environ['OPENAI_BASE_URL'] = os.getenv(\"OPENAI_BASE_URL\")\n",
    "\n",
    "# 创建大模型实例\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")"
   ],
   "id": "f3493823990de37d",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T11:54:04.262432Z",
     "start_time": "2025-09-02T11:53:41.310722Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.memory import ChatMessageHistory\n",
    "\n",
    "# 1、ChatMessageHistory的实例化\n",
    "\n",
    "history = ChatMessageHistory()\n",
    "\n",
    "# 2、添加相关的消息进行存储\n",
    "history.add_user_message(\"你好\")\n",
    "history.add_ai_message(\"很高兴认识你\")\n",
    "history.add_user_message(\"帮我计算1 + 2 * 3 = ？\")\n",
    "\n",
    "response = llm.invoke(history.messages)\n",
    "print(response.content)"
   ],
   "id": "853f7fa0b835bc8c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "根据数学运算的优先级，先进行乘法再进行加法。因此，计算过程如下：\n",
      "\n",
      "1 + 2 * 3 = 1 + 6 = 7\n",
      "\n",
      "所以，1 + 2 * 3 = 7。\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 2、ConversationBufferMemory的使用\n",
    "\n",
    "举例1：以字符串的方式返回存储的信息"
   ],
   "id": "174e1444d5400aa3"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T12:52:59.780414Z",
     "start_time": "2025-09-02T12:52:59.764774Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.memory import ConversationBufferMemory\n",
    "\n",
    "# 1、ConversationBufferMemory的实例化\n",
    "memory = ConversationBufferMemory()\n",
    "\n",
    "# 2、存储相关的消息\n",
    "# inputs对应的就是用户消息，outputs对应的就是ai消息\n",
    "memory.save_context(inputs={\"human\": \"你好，我叫小明\"}, outputs={\"ai\": \"很高兴认识你\"})\n",
    "memory.save_context(inputs={\"input\": \"帮我回答一下1+2*3=?\"}, outputs={\"output\": \"7\"})\n",
    "\n",
    "# 3、获取存储的信息\n",
    "print(memory.load_memory_variables({}))\n",
    "\n",
    "#说明：返回的字典结构的key叫history."
   ],
   "id": "1e28a2c3000ea82c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'history': 'Human: 你好，我叫小明\\nAI: 很高兴认识你\\nHuman: 帮我回答一下1+2*3=?\\nAI: 7'}\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "举例2：以消息列表的方式返回存储的信息",
   "id": "c4bed31c75d6a846"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T03:44:31.638061Z",
     "start_time": "2025-09-09T03:44:31.617615Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.memory import ConversationBufferMemory\n",
    "\n",
    "# 1、ConversationBufferMemory的实例化\n",
    "memory = ConversationBufferMemory(return_messages=True)\n",
    "\n",
    "# 2、存储相关的消息\n",
    "# inputs对应的就是用户消息，outputs对应的就是ai消息\n",
    "memory.save_context(inputs={\"human\": \"你好，我叫小明\"}, outputs={\"ai\": \"很高兴认识你\"})\n",
    "memory.save_context(inputs={\"input\": \"帮我回答一下1+2*3=?\"}, outputs={\"output\": \"7\"})\n",
    "\n",
    "# 3、获取存储的信息\n",
    "#返回消息列表的方式1：\n",
    "print(memory.load_memory_variables({}))\n",
    "\n",
    "print(\"\\n\")\n",
    "\n",
    "#返回消息列表的方式2：\n",
    "print(memory.chat_memory.messages)\n",
    "\n",
    "#说明：返回的字典结构的key叫history."
   ],
   "id": "89c63d55fa82e54",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'history': [HumanMessage(content='你好，我叫小明', additional_kwargs={}, response_metadata={}), AIMessage(content='很高兴认识你', additional_kwargs={}, response_metadata={}), HumanMessage(content='帮我回答一下1+2*3=?', additional_kwargs={}, response_metadata={}), AIMessage(content='7', additional_kwargs={}, response_metadata={})]}\n",
      "\n",
      "\n",
      "[HumanMessage(content='你好，我叫小明', additional_kwargs={}, response_metadata={}), AIMessage(content='很高兴认识你', additional_kwargs={}, response_metadata={}), HumanMessage(content='帮我回答一下1+2*3=?', additional_kwargs={}, response_metadata={}), AIMessage(content='7', additional_kwargs={}, response_metadata={})]\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "举例3：结合大模型、提示词模板的使用（PromptTemplate）",
   "id": "ec45c4f5acddd8eb"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:02:55.204016Z",
     "start_time": "2025-09-02T13:02:53.805332Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.chains.llm import LLMChain\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts.prompt import PromptTemplate\n",
    "\n",
    "# 1、创建大模型实例\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
    "\n",
    "# 2、提供提示词模板\n",
    "prompt_template = PromptTemplate.from_template(\n",
    "    template=\"\"\"\n",
    "    你可以与人类对话。\n",
    "\n",
    "当前对话历史: {history}\n",
    "\n",
    "人类问题: {question}\n",
    "\n",
    "回复:\n",
    "\"\"\"\n",
    ")\n",
    "\n",
    "# 3、提供memory实例\n",
    "memory = ConversationBufferMemory()\n",
    "\n",
    "# 4、提供Chain\n",
    "chain = LLMChain(llm=llm, prompt=prompt_template, memory=memory)\n",
    "\n",
    "response = chain.invoke({\"question\": \"你好，我的名字叫小明\"})\n",
    "print(response)"
   ],
   "id": "cccac0e809de9553",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '你好，我的名字叫小明', 'history': '', 'text': '你好，小明！很高兴认识你。有什么我可以帮助你的吗？'}\n"
     ]
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:03:15.181870Z",
     "start_time": "2025-09-02T13:03:13.975563Z"
    }
   },
   "cell_type": "code",
   "source": [
    "response = chain.invoke({\"question\": \"我叫什么名字呢？\"})\n",
    "print(response)"
   ],
   "id": "81a90d01bbc732c4",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '我叫什么名字呢？', 'history': 'Human: 你好，我的名字叫小明\\nAI: 你好，小明！很高兴认识你。有什么我可以帮助你的吗？', 'text': '你叫小明。很高兴再次和你交流！还有其他问题或者想聊的内容吗？'}\n"
     ]
    }
   ],
   "execution_count": 13
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "举例4：基于举例3，显式的设置meory的key的值\n",
   "id": "7f093aa4cafc8535"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T03:36:18.207351Z",
     "start_time": "2025-09-09T03:36:16.879092Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.chains.llm import LLMChain\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts.prompt import PromptTemplate\n",
    "\n",
    "# 1、创建大模型实例\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
    "\n",
    "# 2、提供提示词模板\n",
    "prompt_template = PromptTemplate.from_template(\n",
    "    template=\"\"\"\n",
    "    你可以与人类对话。\n",
    "\n",
    "当前对话历史: {chat_history}\n",
    "\n",
    "人类问题: {question}\n",
    "\n",
    "回复:\n",
    "\"\"\"\n",
    ")\n",
    "\n",
    "# 3、提供memory实例\n",
    "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
    "\n",
    "# 4、提供Chain\n",
    "chain = LLMChain(llm=llm, prompt=prompt_template, memory=memory)\n",
    "\n",
    "response = chain.invoke({\"question\": \"你好，我的名字叫小明\"})\n",
    "print(response)"
   ],
   "id": "971a73c022ec0593",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '你好，我的名字叫小明', 'chat_history': '', 'text': '你好，小明！很高兴认识你。有任何问题或者想聊的话题吗？'}\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:05:20.943928Z",
     "start_time": "2025-09-02T13:05:19.301566Z"
    }
   },
   "cell_type": "code",
   "source": [
    "response = chain.invoke({\"question\": \"我叫什么名字呢？\"})\n",
    "print(response)"
   ],
   "id": "907785cf972ec8a0",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '我叫什么名字呢？', 'chat_history': 'Human: 你好，我的名字叫小明\\nAI: 你好，小明！很高兴认识你。有什么我可以帮助你的吗？', 'text': '你叫小明。很高兴再次见到你！有什么想聊的呢？'}\n"
     ]
    }
   ],
   "execution_count": 15
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "举例5：结合大模型、提示词模板的使用（ChatPromptTemplate）",
   "id": "834c9f4d7dac8b2d"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:07:51.408956Z",
     "start_time": "2025-09-02T13:07:50.004483Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关包\n",
    "from langchain_core.messages import SystemMessage\n",
    "from langchain.chains.llm import LLMChain\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "from langchain_core.prompts import MessagesPlaceholder,ChatPromptTemplate,HumanMessagePromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "\n",
    "# 2.创建LLM\n",
    "llm = ChatOpenAI(model_name='gpt-4o-mini')\n",
    "\n",
    "# 3.创建Prompt\n",
    "prompt = ChatPromptTemplate.from_messages([\n",
    "    (\"system\",\"你是一个与人类对话的机器人。\"),\n",
    "    MessagesPlaceholder(variable_name='history'),\n",
    "    (\"human\",\"问题：{question}\")\n",
    "])\n",
    "\n",
    "# 4.创建Memory\n",
    "memory = ConversationBufferMemory(return_messages=True)\n",
    "# 5.创建LLMChain\n",
    "llm_chain = LLMChain(prompt=prompt,llm=llm, memory=memory)\n",
    "\n",
    "# 6.调用LLMChain\n",
    "res1 = llm_chain.invoke({\"question\": \"中国首都在哪里？\"})\n",
    "print(res1,end=\"\\n\\n\")\n",
    "\n"
   ],
   "id": "f4c4ded86ec86ad2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '中国首都在哪里？', 'history': [HumanMessage(content='中国首都在哪里？', additional_kwargs={}, response_metadata={}), AIMessage(content='中国的首都位于北京。', additional_kwargs={}, response_metadata={})], 'text': '中国的首都位于北京。'}\n",
      "\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:08:41.650127Z",
     "start_time": "2025-09-02T13:08:40.510273Z"
    }
   },
   "cell_type": "code",
   "source": [
    "res2 = llm_chain.invoke({\"question\": \"我刚刚问了什么\"})\n",
    "print(res2)\n"
   ],
   "id": "ac8e746f535d9cec",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '我刚刚问了什么', 'history': [HumanMessage(content='中国首都在哪里？', additional_kwargs={}, response_metadata={}), AIMessage(content='中国的首都位于北京。', additional_kwargs={}, response_metadata={}), HumanMessage(content='我刚刚问了什么', additional_kwargs={}, response_metadata={}), AIMessage(content='你刚刚问了中国的首都在哪里。', additional_kwargs={}, response_metadata={})], 'text': '你刚刚问了中国的首都在哪里。'}\n"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 3、ConversationChain的使用\n",
    "\n",
    "举例1：以PromptTemplate为例"
   ],
   "id": "5c78842e6be08b11"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:34:21.201911Z",
     "start_time": "2025-09-02T13:34:19.382541Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.chains.conversation.base import ConversationChain\n",
    "from langchain.chains.llm import LLMChain\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts.prompt import PromptTemplate\n",
    "\n",
    "# 1、创建大模型实例\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
    "\n",
    "# 2、提供提示词模板\n",
    "prompt_template = PromptTemplate.from_template(\n",
    "    template=\"\"\"\n",
    "    你可以与人类对话。\n",
    "\n",
    "当前对话历史: {history}\n",
    "\n",
    "人类问题: {input}\n",
    "\n",
    "回复:\n",
    "\"\"\"\n",
    ")\n",
    "\n",
    "# # 3、提供memory实例\n",
    "# memory = ConversationBufferMemory()\n",
    "#\n",
    "# # 4、提供Chain\n",
    "# chain = LLMChain(llm=llm, prompt=prompt_template, memory=memory)\n",
    "\n",
    "# 3、创建ConversationChain的实例\n",
    "chain = ConversationChain(llm = llm, prompt=prompt_template)\n",
    "\n",
    "response = chain.invoke({\"input\": \"你好，我的名字叫小明\"})\n",
    "print(response)"
   ],
   "id": "d065c6f1a8882c1",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input': '你好，我的名字叫小明', 'history': '', 'response': '你好，小明！很高兴认识你。你今天过得怎么样？'}\n"
     ]
    }
   ],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:34:47.452374Z",
     "start_time": "2025-09-02T13:34:46.184452Z"
    }
   },
   "cell_type": "code",
   "source": [
    "response = chain.invoke({\"input\": \"我的名字叫什么？\"})\n",
    "print(response)"
   ],
   "id": "53262c4a130fde0b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input': '我的名字叫什么？', 'history': 'Human: 你好，我的名字叫小明\\nAI: 你好，小明！很高兴认识你。你今天过得怎么样？', 'response': '你的名字叫小明。'}\n"
     ]
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "举例2：使用默认提供的提示词模板\n",
    "\n"
   ],
   "id": "c5f266c2a99b144d"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:37:38.348975Z",
     "start_time": "2025-09-02T13:37:36.485414Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.chains.conversation.base import ConversationChain\n",
    "from langchain.chains.llm import LLMChain\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts.prompt import PromptTemplate\n",
    "\n",
    "# 1、创建大模型实例\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
    "\n",
    "# 2、提供提示词模板\n",
    "# prompt_template = PromptTemplate.from_template(\n",
    "#     template=\"\"\"\n",
    "#     你可以与人类对话。\n",
    "#\n",
    "# 当前对话历史: {history}\n",
    "#\n",
    "# 人类问题: {input}\n",
    "#\n",
    "# 回复:\n",
    "# \"\"\"\n",
    "# )\n",
    "\n",
    "# # 3、提供memory实例\n",
    "# memory = ConversationBufferMemory()\n",
    "#\n",
    "# # 4、提供Chain\n",
    "# chain = LLMChain(llm=llm, prompt=prompt_template, memory=memory)\n",
    "\n",
    "# 3、创建ConversationChain的实例（内部提供了默认的提示词模板。而此模板中的变量是{input}、{history}\n",
    "chain = ConversationChain(llm = llm)\n",
    "\n",
    "response = chain.invoke({\"input\": \"你好，我的名字叫小明\"})\n",
    "print(response)"
   ],
   "id": "e934331b88c4fd46",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input': '你好，我的名字叫小明', 'history': '', 'response': '你好，小明！很高兴认识你！我是一种人工智能助手，随时准备帮助你。你今天怎么样？有什么特别想聊的吗？'}\n"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T13:37:53.053399Z",
     "start_time": "2025-09-02T13:37:51.763676Z"
    }
   },
   "cell_type": "code",
   "source": [
    "response = chain.invoke({\"input\": \"我的名字叫什么？\"})\n",
    "print(response)"
   ],
   "id": "71caf0ddcf27a5ad",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input': '我的名字叫什么？', 'history': 'Human: 你好，我的名字叫小明\\nAI: 你好，小明！很高兴认识你！我是一种人工智能助手，随时准备帮助你。你今天怎么样？有什么特别想聊的吗？', 'response': '你的名字叫小明！我记得你刚刚告诉我这个信息。你想聊些什么，或者有什么问题需要我帮助的吗？'}\n"
     ]
    }
   ],
   "execution_count": 22
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 4、ConversationBufferWindowMemory的使用\n",
    "\n",
    "举例1：\n"
   ],
   "id": "b4a2858cc12ff573"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T14:21:54.670263Z",
     "start_time": "2025-09-02T14:21:54.654487Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关包\n",
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "\n",
    "# 2.实例化ConversationBufferWindowMemory对象，设定窗口阈值\n",
    "memory = ConversationBufferWindowMemory(k=1)\n",
    "# 3.保存消息\n",
    "memory.save_context({\"input\": \"你好\"}, {\"output\": \"怎么了\"})\n",
    "memory.save_context({\"input\": \"你是谁\"}, {\"output\": \"我是AI助手\"})\n",
    "memory.save_context({\"input\": \"你的生日是哪天？\"}, {\"output\": \"我不清楚\"})\n",
    "# 4.读取内存中消息（返回消息内容的纯文本）\n",
    "print(memory.load_memory_variables({}))"
   ],
   "id": "a0ab558e4e9c6985",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'history': 'Human: 你的生日是哪天？\\nAI: 我不清楚'}\n"
     ]
    }
   ],
   "execution_count": 25
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "举例2：返回消息构成的上下文记忆\n",
   "id": "aae51c134882140c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T14:22:29.486921Z",
     "start_time": "2025-09-02T14:22:29.476307Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关包\n",
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "\n",
    "# 2.实例化ConversationBufferWindowMemory对象，设定窗口阈值\n",
    "memory = ConversationBufferWindowMemory(k=2, return_messages=True)\n",
    "# 3.保存消息\n",
    "memory.save_context({\"input\": \"你好\"}, {\"output\": \"怎么了\"})\n",
    "memory.save_context({\"input\": \"你是谁\"}, {\"output\": \"我是AI助手小智\"})\n",
    "memory.save_context({\"input\": \"初次对话，你能介绍一下你自己吗？\"}, {\"output\": \"当然可以了。我是一个无所不能的小智。\"})\n",
    "# 4.读取内存中消息（返回消息内容的纯文本）\n",
    "print(memory.load_memory_variables({}))"
   ],
   "id": "d67be8cf660be5f2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'history': [HumanMessage(content='你是谁', additional_kwargs={}, response_metadata={}), AIMessage(content='我是AI助手小智', additional_kwargs={}, response_metadata={}), HumanMessage(content='初次对话，你能介绍一下你自己吗？', additional_kwargs={}, response_metadata={}), AIMessage(content='当然可以了。我是一个无所不能的小智。', additional_kwargs={}, response_metadata={})]}\n"
     ]
    }
   ],
   "execution_count": 26
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "举例3：结合llm、chain的使用\n",
   "id": "a3b4bb1d83b783f8"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T14:25:55.675772Z",
     "start_time": "2025-09-02T14:25:50.275370Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "# 1.导入相关包\n",
    "from langchain_core.prompts.prompt import PromptTemplate\n",
    "from langchain.chains.llm import LLMChain\n",
    "\n",
    "# 2.定义模版\n",
    "template = \"\"\"以下是人类与AI之间的友好对话描述。AI表现得很健谈，并提供了大量来自其上下文的具体细节。如果AI不知道问题的答案，它会表示不知道。\n",
    "\n",
    "当前对话：\n",
    "{history}\n",
    "Human: {question}\n",
    "AI:\"\"\"\n",
    "\n",
    "# 3.定义提示词模版\n",
    "prompt_template = PromptTemplate.from_template(template)\n",
    "\n",
    "# 4.创建大模型\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
    "\n",
    "# 5.实例化ConversationBufferWindowMemory对象，设定窗口阈值\n",
    "memory = ConversationBufferWindowMemory(k=1)\n",
    "\n",
    "# 6.定义LLMChain\n",
    "conversation_with_summary = LLMChain(\n",
    "    llm=llm,\n",
    "    prompt=prompt_template,\n",
    "    memory=memory,\n",
    "    #verbose=True,\n",
    ")\n",
    "\n",
    "# 7.执行链（第一次提问）\n",
    "respon1 = conversation_with_summary.invoke({\"question\":\"你好，我是孙小空\"})\n",
    "print(respon1)\n",
    "# 8.执行链（第二次提问）\n",
    "respon2 =conversation_with_summary.invoke({\"question\":\"我还有两个师弟，一个是猪小戒，一个是沙小僧\"})\n",
    "print(respon2)\n",
    "# 9.执行链（第三次提问）\n",
    "respon3 =conversation_with_summary.invoke({\"question\":\"我今年高考，竟然考上了1本\"})\n",
    "print(respon3)\n",
    "# 10.执行链（第四次提问）\n",
    "respon4 =conversation_with_summary.invoke({\"question\":\"我叫什么名字？\"})\n",
    "print(respon4)"
   ],
   "id": "5b63eb5ba15487c1",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '你好，我是孙小空', 'history': '', 'text': '你好，孙小空！很高兴见到你！你今天过得怎么样？有什么我可以帮助你的吗？'}\n",
      "{'question': '我还有两个师弟，一个是猪小戒，一个是沙小僧', 'history': 'Human: 你好，我是孙小空\\nAI: 你好，孙小空！很高兴见到你！你今天过得怎么样？有什么我可以帮助你的吗？', 'text': '哇，猪小戒和沙小僧听起来很有趣！他们是不是你们团队里的搞笑角色呢？你们平时都一起做些什么呢？'}\n",
      "{'question': '我今年高考，竟然考上了1本', 'history': 'Human: 我还有两个师弟，一个是猪小戒，一个是沙小僧\\nAI: 哇，猪小戒和沙小僧听起来很有趣！他们是不是你们团队里的搞笑角色呢？你们平时都一起做些什么呢？', 'text': '太厉害了！恭喜你考上了一本！这是一个很大的成就，你一定付出了很多努力。你想要学什么专业呢？或者有没有什么学校是你特别想去的？'}\n",
      "{'question': '我叫什么名字？', 'history': 'Human: 我今年高考，竟然考上了1本\\nAI: 太厉害了！恭喜你考上了一本！这是一个很大的成就，你一定付出了很多努力。你想要学什么专业呢？或者有没有什么学校是你特别想去的？', 'text': '抱歉，我不知道你的名字。你愿意告诉我吗？或者我们可以聊聊其他你感兴趣的事情！'}\n"
     ]
    }
   ],
   "execution_count": 30
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "举例4：修改举例3中的参数k\n",
   "id": "87ae15156522c468"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-02T14:26:55.927165Z",
     "start_time": "2025-09-02T14:26:51.282379Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "# 1.导入相关包\n",
    "from langchain_core.prompts.prompt import PromptTemplate\n",
    "from langchain.chains.llm import LLMChain\n",
    "\n",
    "# 2.定义模版\n",
    "template = \"\"\"以下是人类与AI之间的友好对话描述。AI表现得很健谈，并提供了大量来自其上下文的具体细节。如果AI不知道问题的答案，它会表示不知道。\n",
    "\n",
    "当前对话：\n",
    "{history}\n",
    "Human: {question}\n",
    "AI:\"\"\"\n",
    "\n",
    "# 3.定义提示词模版\n",
    "prompt_template = PromptTemplate.from_template(template)\n",
    "\n",
    "# 4.创建大模型\n",
    "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
    "\n",
    "# 5.实例化ConversationBufferWindowMemory对象，设定窗口阈值\n",
    "memory = ConversationBufferWindowMemory(k=3)\n",
    "\n",
    "# 6.定义LLMChain\n",
    "conversation_with_summary = LLMChain(\n",
    "    llm=llm,\n",
    "    prompt=prompt_template,\n",
    "    memory=memory,\n",
    "    #verbose=True,\n",
    ")\n",
    "\n",
    "# 7.执行链（第一次提问）\n",
    "respon1 = conversation_with_summary.invoke({\"question\":\"你好，我是孙小空\"})\n",
    "# print(respon1)\n",
    "# 8.执行链（第二次提问）\n",
    "respon2 =conversation_with_summary.invoke({\"question\":\"我还有两个师弟，一个是猪小戒，一个是沙小僧\"})\n",
    "# print(respon2)\n",
    "# 9.执行链（第三次提问）\n",
    "respon3 =conversation_with_summary.invoke({\"question\":\"我今年高考，竟然考上了1本\"})\n",
    "# print(respon3)\n",
    "# 10.执行链（第四次提问）\n",
    "respon4 =conversation_with_summary.invoke({\"question\":\"我叫什么名字？\"})\n",
    "print(respon4)"
   ],
   "id": "7a56f7f735639034",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'question': '我叫什么名字？', 'history': 'Human: 你好，我是孙小空\\nAI: 你好，孙小空！很高兴认识你。你今天过得怎么样？有什么想聊的内容吗？\\nHuman: 我还有两个师弟，一个是猪小戒，一个是沙小僧\\nAI: 很高兴认识你的师弟们！猪小戒和沙小僧的名字听起来很有趣，似乎有些文艺或是神话背景。你们是不是一起学习或者修炼什么特别的技能呢？如果有趣的故事或者共同经历，欢迎分享！\\nHuman: 我今年高考，竟然考上了1本\\nAI: 太棒了，孙小空！恭喜你考上了一本大学！这是一个很大的成就，你一定为自己感到骄傲。你打算学什么专业呢？或者你对未来有什么计划吗？', 'text': '你叫孙小空！如果你有其他问题或者想聊的话题，请随时告诉我！'}\n"
     ]
    }
   ],
   "execution_count": 31
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
