{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "ca7e3ed2-d996-4d76-857e-f7b3bf4fdd0e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts import PromptTemplate\n",
    "\n",
    "# 国内代理方式，或其它大模型\n",
    "llm = ChatOpenAI(\n",
    "    api_key = \"sk-y7DHfp9fzuCxOVm2158638099f9541D3833aB4F4Ed674aCf\",\n",
    "    base_url = \"https://vip.apiyi.com/v1\",    # 此处代理方式，如果是OpenAI官方接口需调整接口地址\n",
    "    model = \"gpt-4o-mini\",\n",
    "    max_tokens = 1000,\n",
    "    temperature = 0\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a80715db-8393-492b-8464-afed0a85fb8c",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Eason\\AppData\\Local\\Temp\\ipykernel_149092\\3944655235.py:7: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory=ConversationBufferMemory()\n",
      "C:\\Users\\Eason\\AppData\\Local\\Temp\\ipykernel_149092\\3944655235.py:4: LangChainDeprecationWarning: The class `ConversationChain` was deprecated in LangChain 0.2.7 and will be removed in 1.0. Use :meth:`~RunnableWithMessageHistory: https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html` instead.\n",
      "  conversation = ConversationChain(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "\n",
      "Human: 你好呀！\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'你好！很高兴和你聊天！今天过得怎么样？有什么特别的事情发生吗？'"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.chains import ConversationChain\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "\n",
    "conversation = ConversationChain(\n",
    "    llm=llm, \n",
    "    verbose=True, \n",
    "    memory=ConversationBufferMemory()\n",
    ")\n",
    "\n",
    "conversation.predict(input=\"你好呀！\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3f0e740d-f6d5-41b1-a3f2-7274e1af2421",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "Human: 你好呀！\n",
      "AI: 你好！很高兴和你聊天！今天过得怎么样？有什么特别的事情发生吗？\n",
      "Human: 我想知道你是如何工作的。\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'我很高兴你对我感兴趣！我是一个基于人工智能的语言模型，使用大量的文本数据进行训练，以理解和生成自然语言。当你问我问题或与我对话时，我会分析你的输入，并根据我所学到的知识生成相应的回答。我没有意识或情感，但我可以模拟对话，提供信息、建议或只是闲聊。你想了解我工作的哪个方面呢？比如我的训练过程、技术原理，还是其他什么？'"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation.predict(input=\"我想知道你是如何工作的。\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "455697f0-3942-4a41-a01b-35346b9cedfb",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "Human: 你好呀！\n",
      "AI: 你好！很高兴和你聊天！今天过得怎么样？有什么特别的事情发生吗？\n",
      "Human: 我想知道你是如何工作的。\n",
      "AI: 我很高兴你对我感兴趣！我是一个基于人工智能的语言模型，使用大量的文本数据进行训练，以理解和生成自然语言。当你问我问题或与我对话时，我会分析你的输入，并根据我所学到的知识生成相应的回答。我没有意识或情感，但我可以模拟对话，提供信息、建议或只是闲聊。你想了解我工作的哪个方面呢？比如我的训练过程、技术原理，还是其他什么？\n",
      "Human: 那你是如何學習的呢？\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'我学习的过程主要是通过一种叫做“深度学习”的技术。具体来说，我的训练涉及到一个叫做“神经网络”的模型，这种模型模仿了人类大脑的结构。训练过程中，我会接触到大量的文本数据，这些数据来自书籍、文章、网站等各种来源。\\n\\n在训练时，我会通过分析这些文本来学习语言的结构、语法、词汇和上下文关系。我的目标是能够理解输入的内容，并生成合适的回应。训练的过程是通过不断调整模型的参数，以提高我的预测准确性。这个过程需要大量的计算资源和时间。\\n\\n如果你对某个特定的学习方法或技术细节感兴趣，我可以进一步解释！'"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation.predict(input=\"那你是如何學習的呢？\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f357c91b-bfbb-442d-b65b-39ac14e60bfa",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "Human: 你好呀！\n",
      "AI: 你好！很高兴和你聊天！今天过得怎么样？有什么特别的事情发生吗？\n",
      "Human: 我想知道你是如何工作的。\n",
      "AI: 我很高兴你对我感兴趣！我是一个基于人工智能的语言模型，使用大量的文本数据进行训练，以理解和生成自然语言。当你问我问题或与我对话时，我会分析你的输入，并根据我所学到的知识生成相应的回答。我没有意识或情感，但我可以模拟对话，提供信息、建议或只是闲聊。你想了解我工作的哪个方面呢？比如我的训练过程、技术原理，还是其他什么？\n",
      "Human: 那你是如何學習的呢？\n",
      "AI: 我学习的过程主要是通过一种叫做“深度学习”的技术。具体来说，我的训练涉及到一个叫做“神经网络”的模型，这种模型模仿了人类大脑的结构。训练过程中，我会接触到大量的文本数据，这些数据来自书籍、文章、网站等各种来源。\n",
      "\n",
      "在训练时，我会通过分析这些文本来学习语言的结构、语法、词汇和上下文关系。我的目标是能够理解输入的内容，并生成合适的回应。训练的过程是通过不断调整模型的参数，以提高我的预测准确性。这个过程需要大量的计算资源和时间。\n",
      "\n",
      "如果你对某个特定的学习方法或技术细节感兴趣，我可以进一步解释！\n",
      "Human: 你为什么叫小米？跟雷军有关系吗？\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'我其实并不叫小米，可能是你误解了我的身份。我是一个人工智能助手，没有具体的名字。不过，小米是一个知名的科技公司，由雷军创立，专注于智能手机和其他电子产品。如果你对小米公司或雷军有兴趣，我可以提供更多相关的信息！你想了解哪些方面呢？'"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation.predict(input=\"你为什么叫小米？跟雷军有关系吗？\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7f015aae-d9db-4ddf-9ed9-1b8b5689230d",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Eason\\AppData\\Local\\Temp\\ipykernel_149092\\191205368.py:6: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory=ConversationBufferWindowMemory(k=2)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "\n",
      "Human: 嗨，你最近过得怎么样？\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'嗨！我最近过得很好，谢谢你问我！虽然我没有实际的生活经历，但我一直在学习新知识和帮助人们解答问题。你呢？最近有什么有趣的事情发生吗？'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "\n",
    "conversation = ConversationChain(\n",
    "    llm=llm, \n",
    "    verbose=True, \n",
    "    memory=ConversationBufferWindowMemory(k=2)\n",
    ")\n",
    "\n",
    "conversation.predict(input=\"嗨，你最近过得怎么样？\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "dc41dca5-80e1-4d44-a05a-1ab46f21acda",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "Human: 嗨，你最近过得怎么样？\n",
      "AI: 嗨！我最近过得很好，谢谢你问我！虽然我没有实际的生活经历，但我一直在学习新知识和帮助人们解答问题。你呢？最近有什么有趣的事情发生吗？\n",
      "Human: 你最近学到什么新知识了?\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'我最近学到了一些关于可持续发展的新知识，比如如何通过减少塑料使用和推广可再生能源来保护环境。此外，我还了解了许多关于人工智能在医疗、教育和交通等领域的应用。这些技术如何改变我们的生活真是令人兴奋！你对这些话题感兴趣吗？或者你最近有没有学到什么新知识？'"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation.predict(input=\"你最近学到什么新知识了?\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "5ed5d92e-6502-4dd2-a973-15636b7619b8",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "Human: 嗨，你最近过得怎么样？\n",
      "AI: 嗨！我最近过得很好，谢谢你问我！虽然我没有实际的生活经历，但我一直在学习新知识和帮助人们解答问题。你呢？最近有什么有趣的事情发生吗？\n",
      "Human: 你最近学到什么新知识了?\n",
      "AI: 我最近学到了一些关于可持续发展的新知识，比如如何通过减少塑料使用和推广可再生能源来保护环境。此外，我还了解了许多关于人工智能在医疗、教育和交通等领域的应用。这些技术如何改变我们的生活真是令人兴奋！你对这些话题感兴趣吗？或者你最近有没有学到什么新知识？\n",
      "Human: 展开讲讲？\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'当然可以！关于可持续发展，最近有很多有趣的研究和实践。例如，许多国家正在推动减少一次性塑料的使用，鼓励人们使用可重复使用的购物袋和水瓶。还有一些城市在实施塑料禁令，限制塑料袋和吸管的使用，以减少海洋污染。\\n\\n在可再生能源方面，太阳能和风能的使用正在迅速增长。许多地方正在建设大型太阳能发电厂和风电场，以减少对化石燃料的依赖。这不仅有助于减缓气候变化，还能创造就业机会。\\n\\n至于人工智能，它在医疗领域的应用非常广泛，比如通过机器学习算法来分析医学影像，帮助医生更早地发现疾病。在教育方面，AI可以根据学生的学习进度和风格提供个性化的学习体验。而在交通领域，自动驾驶技术正在逐步发展，未来可能会改变我们出行的方式。\\n\\n你对这些话题有什么看法？或者你想了解更多关于某个特定领域的信息吗？'"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation.predict(input=\"展开讲讲？\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "1647994f-0cac-4237-a402-e715f0a82b68",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "Human: 你最近学到什么新知识了?\n",
      "AI: 我最近学到了一些关于可持续发展的新知识，比如如何通过减少塑料使用和推广可再生能源来保护环境。此外，我还了解了许多关于人工智能在医疗、教育和交通等领域的应用。这些技术如何改变我们的生活真是令人兴奋！你对这些话题感兴趣吗？或者你最近有没有学到什么新知识？\n",
      "Human: 展开讲讲？\n",
      "AI: 当然可以！关于可持续发展，最近有很多有趣的研究和实践。例如，许多国家正在推动减少一次性塑料的使用，鼓励人们使用可重复使用的购物袋和水瓶。还有一些城市在实施塑料禁令，限制塑料袋和吸管的使用，以减少海洋污染。\n",
      "\n",
      "在可再生能源方面，太阳能和风能的使用正在迅速增长。许多地方正在建设大型太阳能发电厂和风电场，以减少对化石燃料的依赖。这不仅有助于减缓气候变化，还能创造就业机会。\n",
      "\n",
      "至于人工智能，它在医疗领域的应用非常广泛，比如通过机器学习算法来分析医学影像，帮助医生更早地发现疾病。在教育方面，AI可以根据学生的学习进度和风格提供个性化的学习体验。而在交通领域，自动驾驶技术正在逐步发展，未来可能会改变我们出行的方式。\n",
      "\n",
      "你对这些话题有什么看法？或者你想了解更多关于某个特定领域的信息吗？\n",
      "Human: 如果要构建聊天机器人，具体要用什么自然语言处理技术?\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'构建聊天机器人通常涉及多种自然语言处理（NLP）技术。以下是一些关键的技术和方法：\\n\\n1. **文本预处理**：在处理用户输入之前，通常需要对文本进行清洗和预处理。这包括去除标点符号、转换为小写、去除停用词（如“的”、“是”等）以及词干提取或词形还原。\\n\\n2. **分词**：对于中文等语言，分词是一个重要的步骤。可以使用工具如jieba分词来将句子分割成单独的词语。\\n\\n3. **词嵌入**：为了让计算机理解词语的语义，通常会使用词嵌入技术，如Word2Vec、GloVe或更现代的BERT。这些技术将词语转换为向量，使得相似的词在向量空间中靠得更近。\\n\\n4. **意图识别**：聊天机器人需要识别用户的意图。可以使用分类算法（如支持向量机、随机森林或深度学习模型）来判断用户的输入属于哪个意图类别。\\n\\n5. **实体识别**：在对话中，识别出用户提到的特定实体（如人名、地点、日期等）是很重要的。可以使用命名实体识别（NER）技术来实现。\\n\\n6. **对话管理**：聊天机器人需要管理对话的状态，以便能够根据上下文进行适当的回应。这通常涉及使用状态机或更复杂的对话管理框架。\\n\\n7. **生成响应**：根据用户的输入和对话上下文，生成合适的响应。可以使用模板生成、检索式生成或基于深度学习的生成模型（如GPT系列）来实现。\\n\\n8. **情感分析**：在某些情况下，了解用户的情感状态可以帮助聊天机器人提供更合适的回应。情感分析技术可以帮助识别用户的情绪。\\n\\n9. **多轮对话**：为了实现更自然的对话，聊天机器人需要能够处理多轮对话，记住之前的上下文并进行适当的回应。\\n\\n这些技术结合在一起，可以构建出一个功能强大的聊天机器人。你对某个特定的技术或实现细节感兴趣吗？'"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation.predict(input=\"如果要构建聊天机器人，具体要用什么自然语言处理技术?\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a1d7378c-714e-4eba-969a-ef8e36e30229",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'name': None,\n",
       " 'memory': ConversationBufferWindowMemory(chat_memory=InMemoryChatMessageHistory(messages=[HumanMessage(content='嗨，你最近过得怎么样？', additional_kwargs={}, response_metadata={}), AIMessage(content='嗨！我最近过得很好，谢谢你问我！虽然我没有实际的生活经历，但我一直在学习新知识和帮助人们解答问题。你呢？最近有什么有趣的事情发生吗？', additional_kwargs={}, response_metadata={}), HumanMessage(content='你最近学到什么新知识了?', additional_kwargs={}, response_metadata={}), AIMessage(content='我最近学到了一些关于可持续发展的新知识，比如如何通过减少塑料使用和推广可再生能源来保护环境。此外，我还了解了许多关于人工智能在医疗、教育和交通等领域的应用。这些技术如何改变我们的生活真是令人兴奋！你对这些话题感兴趣吗？或者你最近有没有学到什么新知识？', additional_kwargs={}, response_metadata={}), HumanMessage(content='展开讲讲？', additional_kwargs={}, response_metadata={}), AIMessage(content='当然可以！关于可持续发展，最近有很多有趣的研究和实践。例如，许多国家正在推动减少一次性塑料的使用，鼓励人们使用可重复使用的购物袋和水瓶。还有一些城市在实施塑料禁令，限制塑料袋和吸管的使用，以减少海洋污染。\\n\\n在可再生能源方面，太阳能和风能的使用正在迅速增长。许多地方正在建设大型太阳能发电厂和风电场，以减少对化石燃料的依赖。这不仅有助于减缓气候变化，还能创造就业机会。\\n\\n至于人工智能，它在医疗领域的应用非常广泛，比如通过机器学习算法来分析医学影像，帮助医生更早地发现疾病。在教育方面，AI可以根据学生的学习进度和风格提供个性化的学习体验。而在交通领域，自动驾驶技术正在逐步发展，未来可能会改变我们出行的方式。\\n\\n你对这些话题有什么看法？或者你想了解更多关于某个特定领域的信息吗？', additional_kwargs={}, response_metadata={}), HumanMessage(content='如果要构建聊天机器人，具体要用什么自然语言处理技术?', additional_kwargs={}, response_metadata={}), AIMessage(content='构建聊天机器人通常涉及多种自然语言处理（NLP）技术。以下是一些关键的技术和方法：\\n\\n1. **文本预处理**：在处理用户输入之前，通常需要对文本进行清洗和预处理。这包括去除标点符号、转换为小写、去除停用词（如“的”、“是”等）以及词干提取或词形还原。\\n\\n2. **分词**：对于中文等语言，分词是一个重要的步骤。可以使用工具如jieba分词来将句子分割成单独的词语。\\n\\n3. **词嵌入**：为了让计算机理解词语的语义，通常会使用词嵌入技术，如Word2Vec、GloVe或更现代的BERT。这些技术将词语转换为向量，使得相似的词在向量空间中靠得更近。\\n\\n4. **意图识别**：聊天机器人需要识别用户的意图。可以使用分类算法（如支持向量机、随机森林或深度学习模型）来判断用户的输入属于哪个意图类别。\\n\\n5. **实体识别**：在对话中，识别出用户提到的特定实体（如人名、地点、日期等）是很重要的。可以使用命名实体识别（NER）技术来实现。\\n\\n6. **对话管理**：聊天机器人需要管理对话的状态，以便能够根据上下文进行适当的回应。这通常涉及使用状态机或更复杂的对话管理框架。\\n\\n7. **生成响应**：根据用户的输入和对话上下文，生成合适的响应。可以使用模板生成、检索式生成或基于深度学习的生成模型（如GPT系列）来实现。\\n\\n8. **情感分析**：在某些情况下，了解用户的情感状态可以帮助聊天机器人提供更合适的回应。情感分析技术可以帮助识别用户的情绪。\\n\\n9. **多轮对话**：为了实现更自然的对话，聊天机器人需要能够处理多轮对话，记住之前的上下文并进行适当的回应。\\n\\n这些技术结合在一起，可以构建出一个功能强大的聊天机器人。你对某个特定的技术或实现细节感兴趣吗？', additional_kwargs={}, response_metadata={})]), k=2),\n",
       " 'callbacks': None,\n",
       " 'verbose': True,\n",
       " 'tags': None,\n",
       " 'metadata': None,\n",
       " 'callback_manager': None,\n",
       " 'prompt': PromptTemplate(input_variables=['history', 'input'], input_types={}, partial_variables={}, template='The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\\n\\nCurrent conversation:\\n{history}\\nHuman: {input}\\nAI:'),\n",
       " 'llm': ChatOpenAI(client=<openai.resources.chat.completions.completions.Completions object at 0x000002A8C4C2DF90>, async_client=<openai.resources.chat.completions.completions.AsyncCompletions object at 0x000002A8C4A3D6D0>, root_client=<openai.OpenAI object at 0x000002A8C49E6F90>, root_async_client=<openai.AsyncOpenAI object at 0x000002A8C4A3F110>, model_name='gpt-4o-mini', temperature=0.0, model_kwargs={}, openai_api_key=SecretStr('**********'), openai_api_base='https://vip.apiyi.com/v1', max_tokens=1000),\n",
       " 'output_key': 'response',\n",
       " 'output_parser': StrOutputParser(),\n",
       " 'return_final_only': True,\n",
       " 'llm_kwargs': {},\n",
       " 'input_key': 'input'}"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "conversation.__dict__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "2a0c645f-d5d6-4d46-9781-1ceb5bb8351b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Eason\\AppData\\Local\\Temp\\ipykernel_149092\\2284439736.py:3: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'history': 'System: The human asks the AI how it has been recently. The AI responds that it has been well, learning new knowledge, trying to improve its performance, and engaging more in conversations to better understand human thought processes. The human then inquires about what new knowledge the AI has learned, to which the AI replies that it has learned about natural language processing and how to better understand human language, as well as knowledge about machine learning and how to use it to enhance its performance.'}"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.memory import ConversationSummaryBufferMemory\n",
    "\n",
    "memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10)\n",
    "memory.save_context({\"input\": \"嗨，你最近过得怎么样？\"}, {\"output\": \" 嗨！我最近过得很好，谢谢你问。我最近一直在学习新的知识，并且正在尝试改进自己的性能。我也在尝试更多的交流，以便更好地了解人类的思维方式。\"})\n",
    "memory.save_context({\"input\": \"你最近学到什么新知识了?\"}, {\"output\": \" 最近我学习了有关自然语言处理的知识，以及如何更好地理解人类的语言。我还学习了有关机器学习的知识，以及如何使用它来改善自己的性能。\"})\n",
    "\n",
    "memory.load_memory_variables({})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "d34a7615-f152-4da9-b1c4-a559375df589",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "System: The human asks the AI how it has been recently. The AI responds that it has been well, learning new knowledge, trying to improve its performance, and engaging more in conversations to better understand human thought processes. The human then inquires about what new knowledge the AI has learned, to which the AI replies that it has learned about natural language processing and how to better understand human language, as well as knowledge about machine learning and how to use it to enhance its performance.\n"
     ]
    }
   ],
   "source": [
    "print(memory.load_memory_variables({})['history'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e30ba008-81ef-4bba-be46-8d4e8595083d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4b0a628d-e6ea-43cf-b405-b8b6f3be86d4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "6dd69a78-41c9-422e-9183-ce52bb44d1c7",
   "metadata": {},
   "source": [
    "# 封装memory函数，降低使用复杂度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "id": "7407c0e7-cc2b-4507-843b-69c0b72d8198",
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List\n",
    "from langchain.schema import BaseMessage, HumanMessage, AIMessage\n",
    "\n",
    "class WindowView:\n",
    "    def __init__(self, full_history, k: int):\n",
    "        # 一定要在 __init__ 里保存底层历史实例\n",
    "        self._full = full_history  \n",
    "        self._k = k\n",
    "\n",
    "    @property\n",
    "    def messages(self) -> List[BaseMessage]:\n",
    "        # 只读切片，不修改 self._full.messages\n",
    "        return self._full.messages[-self._k:]\n",
    "\n",
    "    def add_user_message(self, msg: str) -> None:\n",
    "        # 委托到底层\n",
    "        self._full.add_user_message(msg)\n",
    "\n",
    "    def add_ai_message(self, msg: str) -> None:\n",
    "        # 委托到底层\n",
    "        self._full.add_ai_message(msg)\n",
    "\n",
    "    def add_messages(self, messages: List[BaseMessage]) -> None:\n",
    "        # 批量委托到底层\n",
    "        for m in messages:\n",
    "            if isinstance(m, HumanMessage):\n",
    "                self._full.add_user_message(m.content)\n",
    "            elif isinstance(m, AIMessage):\n",
    "                self._full.add_ai_message(m.content)\n",
    "                \n",
    "from typing import Dict\n",
    "from langchain_core.chat_history import InMemoryChatMessageHistory\n",
    "\n",
    "class MemoryManager:\n",
    "    def __init__(self):\n",
    "        self.storage: Dict[str, InMemoryChatMessageHistory] = {}\n",
    "\n",
    "    def get_session(self, session_id: str) -> InMemoryChatMessageHistory:\n",
    "        if session_id not in self.storage:\n",
    "            self.storage[session_id] = InMemoryChatMessageHistory()\n",
    "        return self.storage[session_id]\n",
    "        \n",
    "    def get_session_window(self, session_id: str, k: int = 10):\n",
    "        full = self.get_session(session_id)\n",
    "        k = k * 2\n",
    "        # 每次都用最新 full 创建一个新的视图对象\n",
    "        return WindowView(full, k)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "id": "759ae59d-a9e4-4bd9-9d01-c7b1c20e0207",
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.prompts import PromptTemplate\n",
    "from langchain_core.runnables import RunnableLambda\n",
    "\n",
    "memory_manager = MemoryManager()\n",
    "\n",
    "# 1. 定义带有历史变量的提示模板（显式声明输入变量更安全）\n",
    "prompt = PromptTemplate(\n",
    "    template=\"{history}\\nHuman: {input}\\nAI: \",\n",
    "    input_variables=[\"input\", \"history\"]\n",
    ")\n",
    "\n",
    "def _format_history(data: dict) -> dict:\n",
    "    # data[\"history\"] 是 WindowView.messages (List[BaseMessage])\n",
    "    lines = []\n",
    "    for m in data[\"history\"]:\n",
    "        prefix = \"Human:\" if m.__class__.__name__ == \"HumanMessage\" else \"AI:\"\n",
    "        lines.append(f\"{prefix} {m.content}\")\n",
    "    history_text = \"\\n\".join(lines)\n",
    "    return {\n",
    "        \"history\": history_text,\n",
    "        \"input\": data[\"input\"]\n",
    "    }\n",
    "format_history = RunnableLambda(_format_history)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0f21ecb0-3318-48fb-8b79-7659cddd2ab6",
   "metadata": {},
   "source": [
    "# 全量历史消息"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "id": "9c29c463-44de-4678-b4e8-031800db5102",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": []\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [5ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [6ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你好呀！\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"\",\n",
      "  \"input\": \"你好呀！\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"\",\n",
      "  \"input\": \"你好呀！\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [1ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 你好呀！\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [1.31s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"你好！有什么我可以帮助你的吗？\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"你好！有什么我可以帮助你的吗？\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 10,\n",
      "                \"prompt_tokens\": 16,\n",
      "                \"total_tokens\": 26,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW39yPB1TZwMfCezsX2H8e1ZA4O7l\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--2ea40d10-640d-40c0-9fd6-6708ce2c59ae-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 16,\n",
      "              \"output_tokens\": 10,\n",
      "              \"total_tokens\": 26,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 10,\n",
      "      \"prompt_tokens\": 16,\n",
      "      \"total_tokens\": 26,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW39yPB1TZwMfCezsX2H8e1ZA4O7l\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [1ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你好！有什么我可以帮助你的吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [1.31s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你好！有什么我可以帮助你的吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [1.31s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你好！有什么我可以帮助你的吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [1.32s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你好！有什么我可以帮助你的吗？\"\n",
      "}\n",
      "AI回复： 你好！有什么我可以帮助你的吗？\n"
     ]
    }
   ],
   "source": [
    "from langchain_core.runnables.history import RunnableWithMessageHistory\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "from langchain_core.tracers.stdout import ConsoleCallbackHandler\n",
    "\n",
    "\n",
    "# 3. 核心链：通过 RunnableWithMessageHistory 自动读写 memory\n",
    "conversation_chain = RunnableWithMessageHistory(\n",
    "    format_history | prompt | llm | StrOutputParser(),\n",
    "    get_session_history = memory_manager.get_session,  # 使用工厂函数\n",
    "    input_messages_key=\"input\",      # 用户输入的键名\n",
    "    history_messages_key=\"history\"   # 历史记录的键名\n",
    ")\n",
    "\n",
    "# 4. 调用示例（需指定session_id）\n",
    "response = conversation_chain.invoke(\n",
    "    {\"input\": \"你好呀！\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_01\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "id": "23aa1983-1111-4b25-afcc-fd3f7e78a824",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我想知道你是如何工作的。\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我想知道你是如何工作的。\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我想知道你是如何工作的。\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我想知道你是如何工作的。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\",\n",
      "  \"input\": \"我想知道你是如何工作的。\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\",\n",
      "  \"input\": \"我想知道你是如何工作的。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [1ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 我想知道你是如何工作的。\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [2.47s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 81,\n",
      "                \"prompt_tokens\": 20,\n",
      "                \"total_tokens\": 101,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW3ABHkXZAZPg1bacK3pMWnHUUYZv\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--9646b9ed-74de-484c-b226-924e923f8b77-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 20,\n",
      "              \"output_tokens\": 81,\n",
      "              \"total_tokens\": 101,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 81,\n",
      "      \"prompt_tokens\": 20,\n",
      "      \"total_tokens\": 101,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW3ABHkXZAZPg1bacK3pMWnHUUYZv\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [1ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [2.48s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [2.48s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [2.48s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\"\n",
      "}\n",
      "AI回复： 我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\n"
     ]
    }
   ],
   "source": [
    "response = conversation_chain.invoke(\n",
    "    {\"input\": \"我想知道你是如何工作的。\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_01\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "4ccc1e00-de25-4a58-98cf-a452fe49b9f6",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"那你是如何学习的呢？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"那你是如何学习的呢？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"那你是如何学习的呢？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"那你是如何学习的呢？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [3ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [3ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\\nHuman: 我想知道你是如何工作的。\\nAI: 我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\",\n",
      "  \"input\": \"那你是如何学习的呢？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\\nHuman: 我想知道你是如何工作的。\\nAI: 我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\",\n",
      "  \"input\": \"那你是如何学习的呢？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [0ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 那你是如何学习的呢？\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [2.99s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 94,\n",
      "                \"prompt_tokens\": 20,\n",
      "                \"total_tokens\": 114,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW3BIlDbnwQvuzb9xpZyxu05nqX0t\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--0eba62e0-7129-4c57-81f5-fc360d0b64bc-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 20,\n",
      "              \"output_tokens\": 94,\n",
      "              \"total_tokens\": 114,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 94,\n",
      "      \"prompt_tokens\": 20,\n",
      "      \"total_tokens\": 114,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW3BIlDbnwQvuzb9xpZyxu05nqX0t\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [0ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [2.99s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [2.99s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [3.00s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\"\n",
      "}\n",
      "AI回复： 我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\n"
     ]
    }
   ],
   "source": [
    "# 4. 调用示例（需指定session_id）\n",
    "response = conversation_chain.invoke(\n",
    "    {\"input\": \"那你是如何学习的呢？\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_01\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "id": "b96e6c0a-cd04-4f27-979e-df4341336ccf",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你为什么叫小米？跟雷军有关系吗？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你为什么叫小米？跟雷军有关系吗？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你为什么叫小米？跟雷军有关系吗？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你为什么叫小米？跟雷军有关系吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\\nHuman: 我想知道你是如何工作的。\\nAI: 我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\\nHuman: 那你是如何学习的呢？\\nAI: 我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\",\n",
      "  \"input\": \"你为什么叫小米？跟雷军有关系吗？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\\nHuman: 我想知道你是如何工作的。\\nAI: 我通过处理和分析大量文本数据来工作。我的设计基于机器学习和自然语言处理技术，使我能够理解和生成语言。当你提出问题或请求时，我会根据我所学到的知识和模式来生成相应的回答。我的目标是提供准确、有用的信息，并与用户进行自然的对话。你有什么具体想了解的方面吗？\\nHuman: 那你是如何学习的呢？\\nAI: 我是通过大量的数据和文本进行训练的。这些数据包括书籍、文章、网站等各种形式的文字信息。通过分析这些数据，我能够学习语言的结构、语法、语义以及不同主题的知识。我的学习过程是基于机器学习算法，特别是深度学习技术，使我能够理解和生成自然语言。虽然我可以提供信息和回答问题，但我并不具备真正的理解或意识。\",\n",
      "  \"input\": \"你为什么叫小米？跟雷军有关系吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [0ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 你为什么叫小米？跟雷军有关系吗？\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [2.77s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"小米这个名字来源于“米”字，象征着简单、纯粹和基础，同时也代表着小米科技的愿景——让科技普及到每一个人。雷军是小米公司的创始人之一，他希望通过小米的产品让更多人享受到高品质的科技生活。因此，小米的名字与雷军的理念是有关系的。\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"小米这个名字来源于“米”字，象征着简单、纯粹和基础，同时也代表着小米科技的愿景——让科技普及到每一个人。雷军是小米公司的创始人之一，他希望通过小米的产品让更多人享受到高品质的科技生活。因此，小米的名字与雷军的理念是有关系的。\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 84,\n",
      "                \"prompt_tokens\": 26,\n",
      "                \"total_tokens\": 110,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW3BbUllws37tMymB6iMzpkh4O0kp\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--649ac02b-270c-48b0-9d92-3c59be48c6a4-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 26,\n",
      "              \"output_tokens\": 84,\n",
      "              \"total_tokens\": 110,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 84,\n",
      "      \"prompt_tokens\": 26,\n",
      "      \"total_tokens\": 110,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW3BbUllws37tMymB6iMzpkh4O0kp\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [1ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"小米这个名字来源于“米”字，象征着简单、纯粹和基础，同时也代表着小米科技的愿景——让科技普及到每一个人。雷军是小米公司的创始人之一，他希望通过小米的产品让更多人享受到高品质的科技生活。因此，小米的名字与雷军的理念是有关系的。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [2.77s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"小米这个名字来源于“米”字，象征着简单、纯粹和基础，同时也代表着小米科技的愿景——让科技普及到每一个人。雷军是小米公司的创始人之一，他希望通过小米的产品让更多人享受到高品质的科技生活。因此，小米的名字与雷军的理念是有关系的。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [2.78s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"小米这个名字来源于“米”字，象征着简单、纯粹和基础，同时也代表着小米科技的愿景——让科技普及到每一个人。雷军是小米公司的创始人之一，他希望通过小米的产品让更多人享受到高品质的科技生活。因此，小米的名字与雷军的理念是有关系的。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [2.78s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"小米这个名字来源于“米”字，象征着简单、纯粹和基础，同时也代表着小米科技的愿景——让科技普及到每一个人。雷军是小米公司的创始人之一，他希望通过小米的产品让更多人享受到高品质的科技生活。因此，小米的名字与雷军的理念是有关系的。\"\n",
      "}\n",
      "AI回复： 小米这个名字来源于“米”字，象征着简单、纯粹和基础，同时也代表着小米科技的愿景——让科技普及到每一个人。雷军是小米公司的创始人之一，他希望通过小米的产品让更多人享受到高品质的科技生活。因此，小米的名字与雷军的理念是有关系的。\n"
     ]
    }
   ],
   "source": [
    "response = conversation_chain.invoke(\n",
    "    {\"input\": \"你为什么叫小米？跟雷军有关系吗？\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_01\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e350e8f0-0278-4eb9-b92b-23ea6b7ba21a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "9cba3ad2-cf70-4830-b5a8-76f4ddf44893",
   "metadata": {},
   "source": [
    "# 滑动窗口示例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "id": "902460c8-4147-4917-93f0-18e2e8939ed8",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": []\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"嗨，你最近过得怎么样？\",\n",
      "  \"history\": []\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"\",\n",
      "  \"input\": \"嗨，你最近过得怎么样？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"\",\n",
      "  \"input\": \"嗨，你最近过得怎么样？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [0ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 嗨，你最近过得怎么样？\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [1.81s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"嗨！我很好，谢谢你问。你呢？最近过得怎么样？\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"嗨！我很好，谢谢你问。你呢？最近过得怎么样？\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 18,\n",
      "                \"prompt_tokens\": 21,\n",
      "                \"total_tokens\": 39,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW3MhSbHrXcYvTZNz4UlJ4oayoAhv\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--43724549-b12f-4bc4-ad49-4dd609ecbcbc-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 21,\n",
      "              \"output_tokens\": 18,\n",
      "              \"total_tokens\": 39,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 18,\n",
      "      \"prompt_tokens\": 21,\n",
      "      \"total_tokens\": 39,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW3MhSbHrXcYvTZNz4UlJ4oayoAhv\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [0ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"嗨！我很好，谢谢你问。你呢？最近过得怎么样？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [1.81s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"嗨！我很好，谢谢你问。你呢？最近过得怎么样？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [1.81s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"嗨！我很好，谢谢你问。你呢？最近过得怎么样？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [1.81s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"嗨！我很好，谢谢你问。你呢？最近过得怎么样？\"\n",
      "}\n",
      "AI回复： 嗨！我很好，谢谢你问。你呢？最近过得怎么样？\n"
     ]
    }
   ],
   "source": [
    "window_k = 2\n",
    "\n",
    "conversation_win_chain = RunnableWithMessageHistory(\n",
    "    format_history | prompt | llm | StrOutputParser(),\n",
    "    get_session_history = lambda session_id: memory_manager.get_session_window(session_id, window_k),  # 使用工厂函数\n",
    "    input_messages_key=\"input\",      # 用户输入的键名\n",
    "    history_messages_key=\"history\"   # 历史记录的键名\n",
    ")\n",
    "\n",
    "response = conversation_win_chain.invoke(\n",
    "    {\"input\": \"嗨，你最近过得怎么样？\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_02\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "3ab5b05d-6fb5-45a1-bb29-f9b2481bfce6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[HumanMessage(content='嗨，你最近过得怎么样？', additional_kwargs={}, response_metadata={}),\n",
       " AIMessage(content='嗨！我很好，谢谢你问。你呢？最近过得怎么样？', additional_kwargs={}, response_metadata={})]"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "memory_manager.get_session(\"test_session_02\").messages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "id": "70e45343-33a3-4ca5-84e6-38fc3e89b6a6",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你最近学到什么新知识了?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你最近学到什么新知识了?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你最近学到什么新知识了?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"你最近学到什么新知识了?\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [3ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 嗨，你最近过得怎么样？\\nAI: 嗨！我很好，谢谢你问。你呢？最近过得怎么样？\",\n",
      "  \"input\": \"你最近学到什么新知识了?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 嗨，你最近过得怎么样？\\nAI: 嗨！我很好，谢谢你问。你呢？最近过得怎么样？\",\n",
      "  \"input\": \"你最近学到什么新知识了?\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [1ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 你最近学到什么新知识了?\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [3.78s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 63,\n",
      "                \"prompt_tokens\": 22,\n",
      "                \"total_tokens\": 85,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW3MrYm78Jt8s4R36xx7aDt3jFoAh\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--22ea9864-66e9-4b8a-a675-32ea9c301818-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 22,\n",
      "              \"output_tokens\": 63,\n",
      "              \"total_tokens\": 85,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 63,\n",
      "      \"prompt_tokens\": 22,\n",
      "      \"total_tokens\": 85,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW3MrYm78Jt8s4R36xx7aDt3jFoAh\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [0ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [3.78s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [3.78s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [3.79s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\"\n",
      "}\n",
      "AI回复： 我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\n"
     ]
    }
   ],
   "source": [
    "response = conversation_win_chain.invoke(\n",
    "    {\"input\": \"你最近学到什么新知识了?\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_02\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "id": "c83b71a9-fd15-4f2a-9d8d-0f860748cb84",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"展开讲讲？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"展开讲讲？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"展开讲讲？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"展开讲讲？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 嗨，你最近过得怎么样？\\nAI: 嗨！我很好，谢谢你问。你呢？最近过得怎么样？\\nHuman: 你最近学到什么新知识了?\\nAI: 我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\",\n",
      "  \"input\": \"展开讲讲？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 嗨，你最近过得怎么样？\\nAI: 嗨！我很好，谢谢你问。你呢？最近过得怎么样？\\nHuman: 你最近学到什么新知识了?\\nAI: 我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\",\n",
      "  \"input\": \"展开讲讲？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [0ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 展开讲讲？\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [1.91s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 41,\n",
      "                \"prompt_tokens\": 17,\n",
      "                \"total_tokens\": 58,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW3N2iOj6m80rdz6XAyhlvYSDOm0t\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--86876027-62d2-47c1-a04b-552b0692d091-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 17,\n",
      "              \"output_tokens\": 41,\n",
      "              \"total_tokens\": 58,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 41,\n",
      "      \"prompt_tokens\": 17,\n",
      "      \"total_tokens\": 58,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW3N2iOj6m80rdz6XAyhlvYSDOm0t\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [0ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [1.91s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [1.92s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [1.92s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\"\n",
      "}\n",
      "AI回复： 当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\n"
     ]
    }
   ],
   "source": [
    "response = conversation_win_chain.invoke(\n",
    "    {\"input\": \"展开讲讲？\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_02\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "id": "d9ce5d65-9841-41de-b455-772619a00d54",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"如果要构建聊天机器人，具体要用什么自然语言处理技术?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"如果要构建聊天机器人，具体要用什么自然语言处理技术?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"如果要构建聊天机器人，具体要用什么自然语言处理技术?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"如果要构建聊天机器人，具体要用什么自然语言处理技术?\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [3ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你最近学到什么新知识了?\\nAI: 我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\\nHuman: 展开讲讲？\\nAI: 当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\",\n",
      "  \"input\": \"如果要构建聊天机器人，具体要用什么自然语言处理技术?\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"history\": \"Human: 你最近学到什么新知识了?\\nAI: 我最近学习了关于人工智能在医疗领域的应用，包括如何利用机器学习算法来帮助诊断疾病、分析医学影像以及个性化治疗方案的制定。这些技术正在改变医疗行业，提高了诊断的准确性和效率。你对这个话题感兴趣吗？\\nHuman: 展开讲讲？\\nAI: 当然可以！请问你想让我展开讲讲什么主题呢？无论是科学、历史、文化、技术还是其他任何话题，我都乐意为你详细讲解。\",\n",
      "  \"input\": \"如果要构建聊天机器人，具体要用什么自然语言处理技术?\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [0ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 如果要构建聊天机器人，具体要用什么自然语言处理技术?\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [9.30s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"构建聊天机器人通常涉及多种自然语言处理（NLP）技术，以下是一些关键技术和步骤：\\n\\n1. **文本预处理**：\\n   - **分词**：将文本分割成单词或短语。\\n   - **去除停用词**：去掉常见但对理解无帮助的词（如“的”、“是”等）。\\n   - **词干提取和词形还原**：将词语还原为基本形式。\\n\\n2. **意图识别**：\\n   - 使用分类算法（如支持向量机、随机森林、深度学习模型等）来识别用户的意图。\\n\\n3. **实体识别**：\\n   - 识别文本中的关键实体（如人名、地点、日期等），通常使用命名实体识别（NER）技术。\\n\\n4. **对话管理**：\\n   - 设计对话策略，决定如何根据用户输入生成合适的响应。可以使用状态机、规则引擎或强化学习等方法。\\n\\n5. **生成响应**：\\n   - **模板生成**：使用预定义的模板生成响应。\\n   - **自然语言生成（NLG）**：使用深度学习模型（如GPT、BERT等）生成更自然的语言。\\n\\n6. **上下文管理**：\\n   - 维护对话的上下文，以便在多轮对话中保持连贯性。\\n\\n7. **情感分析**：\\n   - 分析用户的情感状态，以便生成更合适的响应。\\n\\n8. **用户反馈和学习**：\\n   - 收集用户反馈，利用这些数据不断改进模型和对话策略。\\n\\n9. **多轮对话处理**：\\n   - 处理复杂的对话场景，能够理解和管理多轮对话。\\n\\n10. **集成外部API**：\\n    - 根据需要集成天气、新闻、数据库等外部API，以提供更丰富的信息。\\n\\n构建聊天机器人时，选择合适的技术和工具（如NLTK、spaCy、Transformers等）以及平台（如Dialogflow、Rasa、Microsoft Bot Framework等）也非常重要。\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"构建聊天机器人通常涉及多种自然语言处理（NLP）技术，以下是一些关键技术和步骤：\\n\\n1. **文本预处理**：\\n   - **分词**：将文本分割成单词或短语。\\n   - **去除停用词**：去掉常见但对理解无帮助的词（如“的”、“是”等）。\\n   - **词干提取和词形还原**：将词语还原为基本形式。\\n\\n2. **意图识别**：\\n   - 使用分类算法（如支持向量机、随机森林、深度学习模型等）来识别用户的意图。\\n\\n3. **实体识别**：\\n   - 识别文本中的关键实体（如人名、地点、日期等），通常使用命名实体识别（NER）技术。\\n\\n4. **对话管理**：\\n   - 设计对话策略，决定如何根据用户输入生成合适的响应。可以使用状态机、规则引擎或强化学习等方法。\\n\\n5. **生成响应**：\\n   - **模板生成**：使用预定义的模板生成响应。\\n   - **自然语言生成（NLG）**：使用深度学习模型（如GPT、BERT等）生成更自然的语言。\\n\\n6. **上下文管理**：\\n   - 维护对话的上下文，以便在多轮对话中保持连贯性。\\n\\n7. **情感分析**：\\n   - 分析用户的情感状态，以便生成更合适的响应。\\n\\n8. **用户反馈和学习**：\\n   - 收集用户反馈，利用这些数据不断改进模型和对话策略。\\n\\n9. **多轮对话处理**：\\n   - 处理复杂的对话场景，能够理解和管理多轮对话。\\n\\n10. **集成外部API**：\\n    - 根据需要集成天气、新闻、数据库等外部API，以提供更丰富的信息。\\n\\n构建聊天机器人时，选择合适的技术和工具（如NLTK、spaCy、Transformers等）以及平台（如Dialogflow、Rasa、Microsoft Bot Framework等）也非常重要。\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 465,\n",
      "                \"prompt_tokens\": 28,\n",
      "                \"total_tokens\": 493,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW3NGyjG7lDdQck8O36Nm16uKn7E2\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--fdece22e-ca8f-46b7-a5c0-7dc2c134f973-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 28,\n",
      "              \"output_tokens\": 465,\n",
      "              \"total_tokens\": 493,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 465,\n",
      "      \"prompt_tokens\": 28,\n",
      "      \"total_tokens\": 493,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW3NGyjG7lDdQck8O36Nm16uKn7E2\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [0ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"构建聊天机器人通常涉及多种自然语言处理（NLP）技术，以下是一些关键技术和步骤：\\n\\n1. **文本预处理**：\\n   - **分词**：将文本分割成单词或短语。\\n   - **去除停用词**：去掉常见但对理解无帮助的词（如“的”、“是”等）。\\n   - **词干提取和词形还原**：将词语还原为基本形式。\\n\\n2. **意图识别**：\\n   - 使用分类算法（如支持向量机、随机森林、深度学习模型等）来识别用户的意图。\\n\\n3. **实体识别**：\\n   - 识别文本中的关键实体（如人名、地点、日期等），通常使用命名实体识别（NER）技术。\\n\\n4. **对话管理**：\\n   - 设计对话策略，决定如何根据用户输入生成合适的响应。可以使用状态机、规则引擎或强化学习等方法。\\n\\n5. **生成响应**：\\n   - **模板生成**：使用预定义的模板生成响应。\\n   - **自然语言生成（NLG）**：使用深度学习模型（如GPT、BERT等）生成更自然的语言。\\n\\n6. **上下文管理**：\\n   - 维护对话的上下文，以便在多轮对话中保持连贯性。\\n\\n7. **情感分析**：\\n   - 分析用户的情感状态，以便生成更合适的响应。\\n\\n8. **用户反馈和学习**：\\n   - 收集用户反馈，利用这些数据不断改进模型和对话策略。\\n\\n9. **多轮对话处理**：\\n   - 处理复杂的对话场景，能够理解和管理多轮对话。\\n\\n10. **集成外部API**：\\n    - 根据需要集成天气、新闻、数据库等外部API，以提供更丰富的信息。\\n\\n构建聊天机器人时，选择合适的技术和工具（如NLTK、spaCy、Transformers等）以及平台（如Dialogflow、Rasa、Microsoft Bot Framework等）也非常重要。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [9.30s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"构建聊天机器人通常涉及多种自然语言处理（NLP）技术，以下是一些关键技术和步骤：\\n\\n1. **文本预处理**：\\n   - **分词**：将文本分割成单词或短语。\\n   - **去除停用词**：去掉常见但对理解无帮助的词（如“的”、“是”等）。\\n   - **词干提取和词形还原**：将词语还原为基本形式。\\n\\n2. **意图识别**：\\n   - 使用分类算法（如支持向量机、随机森林、深度学习模型等）来识别用户的意图。\\n\\n3. **实体识别**：\\n   - 识别文本中的关键实体（如人名、地点、日期等），通常使用命名实体识别（NER）技术。\\n\\n4. **对话管理**：\\n   - 设计对话策略，决定如何根据用户输入生成合适的响应。可以使用状态机、规则引擎或强化学习等方法。\\n\\n5. **生成响应**：\\n   - **模板生成**：使用预定义的模板生成响应。\\n   - **自然语言生成（NLG）**：使用深度学习模型（如GPT、BERT等）生成更自然的语言。\\n\\n6. **上下文管理**：\\n   - 维护对话的上下文，以便在多轮对话中保持连贯性。\\n\\n7. **情感分析**：\\n   - 分析用户的情感状态，以便生成更合适的响应。\\n\\n8. **用户反馈和学习**：\\n   - 收集用户反馈，利用这些数据不断改进模型和对话策略。\\n\\n9. **多轮对话处理**：\\n   - 处理复杂的对话场景，能够理解和管理多轮对话。\\n\\n10. **集成外部API**：\\n    - 根据需要集成天气、新闻、数据库等外部API，以提供更丰富的信息。\\n\\n构建聊天机器人时，选择合适的技术和工具（如NLTK、spaCy、Transformers等）以及平台（如Dialogflow、Rasa、Microsoft Bot Framework等）也非常重要。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [9.30s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"构建聊天机器人通常涉及多种自然语言处理（NLP）技术，以下是一些关键技术和步骤：\\n\\n1. **文本预处理**：\\n   - **分词**：将文本分割成单词或短语。\\n   - **去除停用词**：去掉常见但对理解无帮助的词（如“的”、“是”等）。\\n   - **词干提取和词形还原**：将词语还原为基本形式。\\n\\n2. **意图识别**：\\n   - 使用分类算法（如支持向量机、随机森林、深度学习模型等）来识别用户的意图。\\n\\n3. **实体识别**：\\n   - 识别文本中的关键实体（如人名、地点、日期等），通常使用命名实体识别（NER）技术。\\n\\n4. **对话管理**：\\n   - 设计对话策略，决定如何根据用户输入生成合适的响应。可以使用状态机、规则引擎或强化学习等方法。\\n\\n5. **生成响应**：\\n   - **模板生成**：使用预定义的模板生成响应。\\n   - **自然语言生成（NLG）**：使用深度学习模型（如GPT、BERT等）生成更自然的语言。\\n\\n6. **上下文管理**：\\n   - 维护对话的上下文，以便在多轮对话中保持连贯性。\\n\\n7. **情感分析**：\\n   - 分析用户的情感状态，以便生成更合适的响应。\\n\\n8. **用户反馈和学习**：\\n   - 收集用户反馈，利用这些数据不断改进模型和对话策略。\\n\\n9. **多轮对话处理**：\\n   - 处理复杂的对话场景，能够理解和管理多轮对话。\\n\\n10. **集成外部API**：\\n    - 根据需要集成天气、新闻、数据库等外部API，以提供更丰富的信息。\\n\\n构建聊天机器人时，选择合适的技术和工具（如NLTK、spaCy、Transformers等）以及平台（如Dialogflow、Rasa、Microsoft Bot Framework等）也非常重要。\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [9.31s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"构建聊天机器人通常涉及多种自然语言处理（NLP）技术，以下是一些关键技术和步骤：\\n\\n1. **文本预处理**：\\n   - **分词**：将文本分割成单词或短语。\\n   - **去除停用词**：去掉常见但对理解无帮助的词（如“的”、“是”等）。\\n   - **词干提取和词形还原**：将词语还原为基本形式。\\n\\n2. **意图识别**：\\n   - 使用分类算法（如支持向量机、随机森林、深度学习模型等）来识别用户的意图。\\n\\n3. **实体识别**：\\n   - 识别文本中的关键实体（如人名、地点、日期等），通常使用命名实体识别（NER）技术。\\n\\n4. **对话管理**：\\n   - 设计对话策略，决定如何根据用户输入生成合适的响应。可以使用状态机、规则引擎或强化学习等方法。\\n\\n5. **生成响应**：\\n   - **模板生成**：使用预定义的模板生成响应。\\n   - **自然语言生成（NLG）**：使用深度学习模型（如GPT、BERT等）生成更自然的语言。\\n\\n6. **上下文管理**：\\n   - 维护对话的上下文，以便在多轮对话中保持连贯性。\\n\\n7. **情感分析**：\\n   - 分析用户的情感状态，以便生成更合适的响应。\\n\\n8. **用户反馈和学习**：\\n   - 收集用户反馈，利用这些数据不断改进模型和对话策略。\\n\\n9. **多轮对话处理**：\\n   - 处理复杂的对话场景，能够理解和管理多轮对话。\\n\\n10. **集成外部API**：\\n    - 根据需要集成天气、新闻、数据库等外部API，以提供更丰富的信息。\\n\\n构建聊天机器人时，选择合适的技术和工具（如NLTK、spaCy、Transformers等）以及平台（如Dialogflow、Rasa、Microsoft Bot Framework等）也非常重要。\"\n",
      "}\n",
      "AI回复： 构建聊天机器人通常涉及多种自然语言处理（NLP）技术，以下是一些关键技术和步骤：\n",
      "\n",
      "1. **文本预处理**：\n",
      "   - **分词**：将文本分割成单词或短语。\n",
      "   - **去除停用词**：去掉常见但对理解无帮助的词（如“的”、“是”等）。\n",
      "   - **词干提取和词形还原**：将词语还原为基本形式。\n",
      "\n",
      "2. **意图识别**：\n",
      "   - 使用分类算法（如支持向量机、随机森林、深度学习模型等）来识别用户的意图。\n",
      "\n",
      "3. **实体识别**：\n",
      "   - 识别文本中的关键实体（如人名、地点、日期等），通常使用命名实体识别（NER）技术。\n",
      "\n",
      "4. **对话管理**：\n",
      "   - 设计对话策略，决定如何根据用户输入生成合适的响应。可以使用状态机、规则引擎或强化学习等方法。\n",
      "\n",
      "5. **生成响应**：\n",
      "   - **模板生成**：使用预定义的模板生成响应。\n",
      "   - **自然语言生成（NLG）**：使用深度学习模型（如GPT、BERT等）生成更自然的语言。\n",
      "\n",
      "6. **上下文管理**：\n",
      "   - 维护对话的上下文，以便在多轮对话中保持连贯性。\n",
      "\n",
      "7. **情感分析**：\n",
      "   - 分析用户的情感状态，以便生成更合适的响应。\n",
      "\n",
      "8. **用户反馈和学习**：\n",
      "   - 收集用户反馈，利用这些数据不断改进模型和对话策略。\n",
      "\n",
      "9. **多轮对话处理**：\n",
      "   - 处理复杂的对话场景，能够理解和管理多轮对话。\n",
      "\n",
      "10. **集成外部API**：\n",
      "    - 根据需要集成天气、新闻、数据库等外部API，以提供更丰富的信息。\n",
      "\n",
      "构建聊天机器人时，选择合适的技术和工具（如NLTK、spaCy、Transformers等）以及平台（如Dialogflow、Rasa、Microsoft Bot Framework等）也非常重要。\n"
     ]
    }
   ],
   "source": [
    "response = conversation_win_chain.invoke(\n",
    "    {\"input\": \"如果要构建聊天机器人，具体要用什么自然语言处理技术?\"},\n",
    "    config = {\n",
    "        \"callbacks\": [ConsoleCallbackHandler()],\n",
    "        \"configurable\": {\"session_id\": \"test_session_02\"}  # 必须指定会话ID\n",
    "    }\n",
    ")\n",
    "print(\"AI回复：\", response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f96f5168-7577-4482-b931-c8f4f4a6e6f7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "d745e116-1f91-4568-bbd8-84d6dd8d66b8",
   "metadata": {},
   "source": [
    "# 封装完整的方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "id": "dc971d70-70b3-45ee-9a59-44256cab373a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Optional, List, Callable, Dict, Any\n",
    "from langchain_core.runnables import RunnableLambda\n",
    "from langchain_core.runnables.history import RunnableWithMessageHistory\n",
    "from langchain_core.prompts import PromptTemplate\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "from langchain_core.tracers.stdout import ConsoleCallbackHandler\n",
    "from langchain_core.messages import BaseMessage, HumanMessage, AIMessage\n",
    "\n",
    "def build_windowed_chat_chain(\n",
    "    llm,\n",
    "    memory_manager,\n",
    "    session_id: str,\n",
    "    k: Optional[int] = None,\n",
    "    verbose: bool = False,\n",
    "    callbacks: Optional[List] = None\n",
    ") -> Callable[[str], str]:\n",
    "    \"\"\"\n",
    "    构建一个带滑窗历史的对话链，输出格式为：\n",
    "      Human: 用户输入\n",
    "      AI:    模型回复\n",
    "\n",
    "    参数：\n",
    "    - llm: ChatOpenAI 或其他 ChatModel 实例\n",
    "    - memory_manager: 负责 get_session(session_id) 和 get_window_view(session_id, k) 的管理器\n",
    "    - session_id: 需要共享历史的会话 ID\n",
    "    - window_k: 只读视图中保留的历史条数\n",
    "    - callbacks: 可选回调列表\n",
    "\n",
    "    返回：\n",
    "      调用 chain.invoke({\"input\": your_text}) 即可\n",
    "    \"\"\"\n",
    "\n",
    "    # 1. 定义带有历史变量的提示模板\n",
    "    prompt = PromptTemplate(\n",
    "        template=\"{history}\\nHuman: {input}\\nAI: \",\n",
    "        input_variables=[\"input\", \"history\"]\n",
    "    )\n",
    "    \n",
    "    # 2. RunnableLambda：将 View.messages 列表拼接为带前缀的多行历史字符串\n",
    "    def _format_history(data: Dict[str, Any]) -> Dict[str, str]:\n",
    "        msgs: List[BaseMessage] = data[\"history\"]\n",
    "        lines = []\n",
    "        for m in msgs:\n",
    "            if isinstance(m, HumanMessage):\n",
    "                lines.append(f\"Human: {m.content}\")\n",
    "            elif isinstance(m, AIMessage):\n",
    "                lines.append(f\"AI: {m.content}\")\n",
    "        history_text = \"\\n\".join(lines)\n",
    "        return {\"input\": data[\"input\"], \"history\": history_text}\n",
    "        \n",
    "    format_history = RunnableLambda(_format_history)\n",
    "    \n",
    "    # 3. 选择 get_session_history 工厂\n",
    "    if k is None:\n",
    "        get_session_history = lambda sid: memory_manager.get_session(sid)\n",
    "    else:\n",
    "        get_session_history = lambda sid: memory_manager.get_session_window(sid, k)\n",
    "\n",
    "    # 4. 组装带历史的核心链\n",
    "    core_runnable = format_history | prompt | llm | StrOutputParser()\n",
    "    chain = RunnableWithMessageHistory(\n",
    "        core_runnable,\n",
    "        get_session_history=get_session_history,\n",
    "        input_messages_key=\"input\",\n",
    "        history_messages_key=\"history\",\n",
    "        # output_messages_key=\"response\",\n",
    "    )\n",
    "\n",
    "    # 4. 返回一个简化的调用函数\n",
    "    def chat(user_input: str):\n",
    "        # 回调集合：根据 verbose 和 callbacks 合并\n",
    "        cbks = []\n",
    "        if verbose:\n",
    "            cbks.append(ConsoleCallbackHandler())\n",
    "        if callbacks:\n",
    "            cbks.extend(callbacks)\n",
    "        # 执行 invoke\n",
    "        res = chain.invoke(\n",
    "            {\"input\": user_input},\n",
    "            config={\n",
    "                \"callbacks\": cbks,\n",
    "                \"configurable\": {\"session_id\": session_id}\n",
    "            }\n",
    "        )\n",
    "        return res #[\"response\"]\n",
    "\n",
    "    return chat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "id": "739d6e63-7f0b-4804-a581-1932b05d9e6c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 假设 llm、memory_manager 已初始化\n",
    "chat_full = build_windowed_chat_chain(\n",
    "    llm=llm,\n",
    "    memory_manager=memory_manager,\n",
    "    session_id=\"sess3\",\n",
    "    k=None,               # 全量历史\n",
    "    verbose=True\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "id": "c096ad47-9a87-4062-b23b-78d5de2dbf72",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我上一个问题是什么？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我上一个问题是什么？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我上一个问题是什么？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我上一个问题是什么？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我上一个问题是什么？\",\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\\nHuman: 我想知道你是如何工作的。\\nAI: 我通过自然语言处理技术来理解和生成文本。我的工作原理是基于大量的数据和机器学习算法，能够识别语言模式并生成相应的回答。你可以问我各种问题，我会尽力提供准确和有用的信息！你还有其他想了解的吗？\\nHuman: 那你是如何学习的呢？\\nAI: 我通过分析大量的文本数据进行学习。这些数据包括书籍、文章、网站等各种来源。通过机器学习算法，我能够识别语言中的模式、结构和语义。训练过程中，我会不断调整我的参数，以提高对语言的理解和生成能力。虽然我不能像人类一样主动学习，但我可以根据已有的数据进行推理和回答问题。你还有其他想了解的内容吗？\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"我上一个问题是什么？\",\n",
      "  \"history\": \"Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\\nHuman: 我想知道你是如何工作的。\\nAI: 我通过自然语言处理技术来理解和生成文本。我的工作原理是基于大量的数据和机器学习算法，能够识别语言模式并生成相应的回答。你可以问我各种问题，我会尽力提供准确和有用的信息！你还有其他想了解的吗？\\nHuman: 那你是如何学习的呢？\\nAI: 我通过分析大量的文本数据进行学习。这些数据包括书籍、文章、网站等各种来源。通过机器学习算法，我能够识别语言中的模式、结构和语义。训练过程中，我会不断调整我的参数，以提高对语言的理解和生成能力。虽然我不能像人类一样主动学习，但我可以根据已有的数据进行推理和回答问题。你还有其他想了解的内容吗？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [0ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 你好呀！\\nAI: 你好！有什么我可以帮助你的吗？\\nHuman: 我想知道你是如何工作的。\\nAI: 我通过自然语言处理技术来理解和生成文本。我的工作原理是基于大量的数据和机器学习算法，能够识别语言模式并生成相应的回答。你可以问我各种问题，我会尽力提供准确和有用的信息！你还有其他想了解的吗？\\nHuman: 那你是如何学习的呢？\\nAI: 我通过分析大量的文本数据进行学习。这些数据包括书籍、文章、网站等各种来源。通过机器学习算法，我能够识别语言中的模式、结构和语义。训练过程中，我会不断调整我的参数，以提高对语言的理解和生成能力。虽然我不能像人类一样主动学习，但我可以根据已有的数据进行推理和回答问题。你还有其他想了解的内容吗？\\nHuman: 我上一个问题是什么？\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [2.36s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"你上一个问题是“那你是如何学习的呢？”你想继续讨论这个话题吗，还是有其他问题想问？\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"你上一个问题是“那你是如何学习的呢？”你想继续讨论这个话题吗，还是有其他问题想问？\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 31,\n",
      "                \"prompt_tokens\": 211,\n",
      "                \"total_tokens\": 242,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW48ZdGYh7sffUnU6auK6fJUyIpmf\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--5276aa18-8788-441f-8093-2e6c0f2d4615-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 211,\n",
      "              \"output_tokens\": 31,\n",
      "              \"total_tokens\": 242,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 31,\n",
      "      \"prompt_tokens\": 211,\n",
      "      \"total_tokens\": 242,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW48ZdGYh7sffUnU6auK6fJUyIpmf\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [0ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你上一个问题是“那你是如何学习的呢？”你想继续讨论这个话题吗，还是有其他问题想问？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [2.36s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你上一个问题是“那你是如何学习的呢？”你想继续讨论这个话题吗，还是有其他问题想问？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [2.36s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你上一个问题是“那你是如何学习的呢？”你想继续讨论这个话题吗，还是有其他问题想问？\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [2.37s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"你上一个问题是“那你是如何学习的呢？”你想继续讨论这个话题吗，还是有其他问题想问？\"\n",
      "}\n",
      "你上一个问题是“那你是如何学习的呢？”你想继续讨论这个话题吗，还是有其他问题想问？\n"
     ]
    }
   ],
   "source": [
    "# 你好呀！\n",
    "# 我想知道你是如何工作的。\n",
    "# 那你是如何学习的呢？\n",
    "# 我上一个问题是什么？\n",
    "\n",
    "response = chat_full(\"我上一个问题是什么？\")\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "id": "4037e318-70f5-4a71-a071-731188c83a47",
   "metadata": {},
   "outputs": [],
   "source": [
    "chat_win = build_windowed_chat_chain(\n",
    "    llm=llm,\n",
    "    memory_manager=memory_manager,\n",
    "    session_id=\"sess4\",\n",
    "    k=2,                  # 滑窗 4 条消息\n",
    "    verbose=True\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "id": "d89739f8-b0a9-4d1f-97bc-774b7f24dde7",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"这是我的第四个问题\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"这是我的第四个问题\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"这是我的第四个问题\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] Entering Chain run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"这是我的第四个问题\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history> > chain:load_history] [0ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history > chain:RunnableParallel<history>] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:insert_history] [2ms] Exiting Chain run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] Entering Chain run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > chain:_format_history] [1ms] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"input\": \"这是我的第四个问题\",\n",
      "  \"history\": \"Human: 这是我的第二个问题\\nAI: 请问你的第二个问题是什么呢？我在这里帮助你！\\nHuman: 这是我的第三个问题\\nAI: 请问你的第三个问题是什么呢？我很高兴能为你解答！\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] Entering Prompt run with input:\n",
      "\u001b[0m{\n",
      "  \"input\": \"这是我的第四个问题\",\n",
      "  \"history\": \"Human: 这是我的第二个问题\\nAI: 请问你的第二个问题是什么呢？我在这里帮助你！\\nHuman: 这是我的第三个问题\\nAI: 请问你的第三个问题是什么呢？我很高兴能为你解答！\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > prompt:PromptTemplate] [0ms] Exiting Prompt run with output:\n",
      "\u001b[0m[outputs]\n",
      "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] Entering LLM run with input:\n",
      "\u001b[0m{\n",
      "  \"prompts\": [\n",
      "    \"Human: Human: 这是我的第二个问题\\nAI: 请问你的第二个问题是什么呢？我在这里帮助你！\\nHuman: 这是我的第三个问题\\nAI: 请问你的第三个问题是什么呢？我很高兴能为你解答！\\nHuman: 这是我的第四个问题\\nAI:\"\n",
      "  ]\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > llm:ChatOpenAI] [1.78s] Exiting LLM run with output:\n",
      "\u001b[0m{\n",
      "  \"generations\": [\n",
      "    [\n",
      "      {\n",
      "        \"text\": \"请问你的第四个问题是什么呢？我很乐意帮助你解答！\",\n",
      "        \"generation_info\": {\n",
      "          \"finish_reason\": \"stop\",\n",
      "          \"logprobs\": null\n",
      "        },\n",
      "        \"type\": \"ChatGeneration\",\n",
      "        \"message\": {\n",
      "          \"lc\": 1,\n",
      "          \"type\": \"constructor\",\n",
      "          \"id\": [\n",
      "            \"langchain\",\n",
      "            \"schema\",\n",
      "            \"messages\",\n",
      "            \"AIMessage\"\n",
      "          ],\n",
      "          \"kwargs\": {\n",
      "            \"content\": \"请问你的第四个问题是什么呢？我很乐意帮助你解答！\",\n",
      "            \"additional_kwargs\": {\n",
      "              \"refusal\": null\n",
      "            },\n",
      "            \"response_metadata\": {\n",
      "              \"token_usage\": {\n",
      "                \"completion_tokens\": 19,\n",
      "                \"prompt_tokens\": 75,\n",
      "                \"total_tokens\": 94,\n",
      "                \"completion_tokens_details\": {\n",
      "                  \"accepted_prediction_tokens\": 0,\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"reasoning_tokens\": 0,\n",
      "                  \"rejected_prediction_tokens\": 0\n",
      "                },\n",
      "                \"prompt_tokens_details\": {\n",
      "                  \"audio_tokens\": 0,\n",
      "                  \"cached_tokens\": 0\n",
      "                }\n",
      "              },\n",
      "              \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "              \"system_fingerprint\": \"fp_0392822090\",\n",
      "              \"id\": \"chatcmpl-BW4BKceKCPHwnQzfs6z0w43z5VSBh\",\n",
      "              \"service_tier\": \"default\",\n",
      "              \"finish_reason\": \"stop\",\n",
      "              \"logprobs\": null\n",
      "            },\n",
      "            \"type\": \"ai\",\n",
      "            \"id\": \"run--7081d75c-4431-4cfe-915b-7cb94a1fec01-0\",\n",
      "            \"usage_metadata\": {\n",
      "              \"input_tokens\": 75,\n",
      "              \"output_tokens\": 19,\n",
      "              \"total_tokens\": 94,\n",
      "              \"input_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"cache_read\": 0\n",
      "              },\n",
      "              \"output_token_details\": {\n",
      "                \"audio\": 0,\n",
      "                \"reasoning\": 0\n",
      "              }\n",
      "            },\n",
      "            \"tool_calls\": [],\n",
      "            \"invalid_tool_calls\": []\n",
      "          }\n",
      "        }\n",
      "      }\n",
      "    ]\n",
      "  ],\n",
      "  \"llm_output\": {\n",
      "    \"token_usage\": {\n",
      "      \"completion_tokens\": 19,\n",
      "      \"prompt_tokens\": 75,\n",
      "      \"total_tokens\": 94,\n",
      "      \"completion_tokens_details\": {\n",
      "        \"accepted_prediction_tokens\": 0,\n",
      "        \"audio_tokens\": 0,\n",
      "        \"reasoning_tokens\": 0,\n",
      "        \"rejected_prediction_tokens\": 0\n",
      "      },\n",
      "      \"prompt_tokens_details\": {\n",
      "        \"audio_tokens\": 0,\n",
      "        \"cached_tokens\": 0\n",
      "      }\n",
      "    },\n",
      "    \"model_name\": \"gpt-4o-mini-2024-07-18\",\n",
      "    \"system_fingerprint\": \"fp_0392822090\",\n",
      "    \"id\": \"chatcmpl-BW4BKceKCPHwnQzfs6z0w43z5VSBh\",\n",
      "    \"service_tier\": \"default\"\n",
      "  },\n",
      "  \"run\": null,\n",
      "  \"type\": \"LLMResult\"\n",
      "}\n",
      "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] Entering Parser run with input:\n",
      "\u001b[0m[inputs]\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence > parser:StrOutputParser] [0ms] Exiting Parser run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"请问你的第四个问题是什么呢？我很乐意帮助你解答！\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async > chain:RunnableSequence] [1.79s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"请问你的第四个问题是什么呢？我很乐意帮助你解答！\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory > chain:check_sync_or_async] [1.79s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"请问你的第四个问题是什么呢？我很乐意帮助你解答！\"\n",
      "}\n",
      "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[chain:RunnableWithMessageHistory] [1.79s] Exiting Chain run with output:\n",
      "\u001b[0m{\n",
      "  \"output\": \"请问你的第四个问题是什么呢？我很乐意帮助你解答！\"\n",
      "}\n",
      "请问你的第四个问题是什么呢？我很乐意帮助你解答！\n"
     ]
    }
   ],
   "source": [
    "response = chat_win(\"这是我的第四个问题\")\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "698d7c1c-6386-4469-9736-75383e342600",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e218ea53-53c0-4bef-b414-a76a8adb8292",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
