{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c1b2ef74-90ef-4324-b37a-8a1a5aade389",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'history': 'Human: 你好，我是人类！\\nAI: 你好，我是AI，有什么可以帮助你的吗？'}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 内存中的短时记忆\n",
    "from langchain.memory import  ConversationBufferMemory\n",
    "\n",
    "memory = ConversationBufferMemory()\n",
    "memory.chat_memory.add_user_message(\"你好，我是人类！\")\n",
    "memory.chat_memory.add_ai_message(\"你好，我是AI，有什么可以帮助你的吗？\")\n",
    "memory.load_memory_variables({})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "34d33d7c-dcf2-4762-955c-a5cf0307375f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'history': 'Human: 我想吃鸡肉\\nAI: 好的，我帮你找找鸡肉的做法\\nHuman: 我想狗肉\\nAI: 好的，我帮你找找狗肉的做法'}"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 内存中的短时记忆：实现一个最近的对话窗口，超过窗口条数的对话将被删除\n",
    "from langchain.memory import  ConversationBufferWindowMemory\n",
    "\n",
    "memory = ConversationBufferWindowMemory(k=2)\n",
    "\n",
    "memory.save_context({\"input\":\"你好，我是人类!\"},{\"output\":\"你好，我是AI，有什么可以帮助你的吗？\"})\n",
    "memory.save_context({\"input\":\"我想吃鸡肉\"},{\"output\":\"好的，我帮你找找鸡肉的做法\"})\n",
    "memory.save_context({\"input\":\"我想狗肉\"},{\"output\":\"好的，我帮你找找狗肉的做法\"})\n",
    "\n",
    "memory.load_memory_variables({})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ad142c37-40f2-4850-842c-579122f3db01",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\2190101073.py:9: LangChainDeprecationWarning: The class `ChatOpenAI` was deprecated in LangChain 0.0.10 and will be removed in 1.0. An updated version of the class exists in the :class:`~langchain-openai package and should be used instead. To use it run `pip install -U :class:`~langchain-openai` and import as `from :class:`~langchain_openai import ChatOpenAI``.\n",
      "  llm = ChatOpenAI(\n",
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\2190101073.py:16: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory = ConversationEntityMemory(llm=llm)\n",
      "D:\\conda_envs\\rag_learn\\lib\\site-packages\\pydantic\\main.py:253: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)\n"
     ]
    }
   ],
   "source": [
    "# 构建记忆实体概念清单\n",
    "# from langchain.llms import  OpenAI\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.memory import ConversationEntityMemory\n",
    "\n",
    "# llm = OpenAI(\n",
    "#     temperature=0\n",
    "# )\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",  # 使用通义千问 plus 模型\n",
    "    temperature=0,      # 温度为0，确保输出确定性\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "memory = ConversationEntityMemory(llm=llm)\n",
    "_input = {\n",
    "    \"input\":\"胡八一和王胖子雪莉杨经常在一起冒险，合称盗墓铁三角.\"\n",
    "}\n",
    "memory.load_memory_variables(_input)\n",
    "memory.save_context(\n",
    "    _input,\n",
    "    {\n",
    "        \"output\":\"听起来很刺激，我也想加入他们！\"\n",
    "    }\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e31013e6-2221-4d88-a25d-50864990b752",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'history': 'Human: 胡八一和王胖子雪莉杨经常在一起冒险，合称盗墓铁三角.\\nAI: 听起来很刺激，我也想加入他们！',\n",
       " 'entities': {'胡八一': '胡八一经常与王胖子和雪莉杨一起冒险，三人合称为盗墓铁三角。',\n",
       "  '王胖子': '王胖子是盗墓铁三角成员之一，经常与胡八一、雪莉杨一起冒险。',\n",
       "  '雪莉杨': '雪莉杨经常与胡八一和王胖子一起冒险，三人合称为盗墓铁三角。'}}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "memory.load_memory_variables({\"input\":\"铁三角是谁?\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "3eb522bb-1fff-426c-9505-aaa42ce35835",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用知识图谱构建记忆完成\n"
     ]
    }
   ],
   "source": [
    "# 使用知识图谱构建记忆\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.memory import ConversationKGMemory\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",  # 使用通义千问 plus 模型\n",
    "    temperature=0,      # 温度为0，确保输出确定性\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "memory = ConversationKGMemory(llm=llm,return_messages=True)\n",
    "\n",
    "memory.save_context(\n",
    "    {\"input\":\"帮我找一下tomie\"},\n",
    "    {\"output\":\"对不起请问什么是tomie？\"}\n",
    ")\n",
    "\n",
    "memory.save_context(\n",
    "    {\"input\":\"tomie是一个培训讲师\"},\n",
    "    {\"output\":\"好的，我知道了。\"}\n",
    ")\n",
    "print(\"使用知识图谱构建记忆完成\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "9065b15c-11fe-4cce-b9aa-7cc81b58cf32",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'history': [SystemMessage(content='On tomie: tomie is a 培训讲师.', additional_kwargs={}, response_metadata={})]}"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "memory.load_memory_variables({\"input\":\"tomie是谁?\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "fa8b66a5-9b8c-4309-b114-ce9a46120352",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['tomie']"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "memory.get_current_entities(\"tomie最喜欢做什么事?\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "9c9fcb56-0d6d-4d27-aced-0f7808c70da9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[KnowledgeTriple(subject='tomie', predicate='最喜欢', object_='打游戏')]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "memory.get_knowledge_triplets(\"tomie最喜欢打游戏\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "bea91703-f028-45c0-a0f9-92fa8320bd9a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\1070451423.py:11: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory = ConversationSummaryMemory(llm=llm)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "总结摘要完成\n"
     ]
    }
   ],
   "source": [
    "# 长对话在内存中的处理方式：总结摘要以及token计算\n",
    "from langchain.memory import ConversationSummaryMemory\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",  # 使用通义千问 plus 模型\n",
    "    temperature=0,      # 温度为0，确保输出确定性\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "memory = ConversationSummaryMemory(llm=llm)\n",
    "memory.save_context(\n",
    "    {\"input\":\"帮我找一下tomie\"},\n",
    "    {\"output\":\"对不起请问什么是tomie？\"}\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"tomie是一个培训讲师\"},\n",
    "    {\"output\":\"好的，我知道了。\"}\n",
    ")\n",
    "print(\"总结摘要完成\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "862246f1-3476-4456-a16c-6ab70bb01b10",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'history': 'The human asks the AI to help find \"tomie,\" and the AI responds by asking for clarification on what \"tomie\" refers to. The human clarifies that \"tomie\" is a training instructor, and the AI acknowledges the clarification.'}"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "memory.load_memory_variables({})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "8c8cb365-5567-4c36-977a-2cbada96a366",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'The human asks the AI to help find tomie. The AI asks what tomie is, and the human clarifies that tomie is a training instructor. The AI acknowledges the clarification.'"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "messages = memory.chat_memory.messages\n",
    "#print(messages)\n",
    "memory.predict_new_summary(messages,\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "99389e13-7e82-478b-baac-7fa40988ed54",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'history': [SystemMessage(content='\\nThe AI introduces itself as AI Little Maruko and asks if there is anything it can help the human with.', additional_kwargs={}, response_metadata={})]}"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#使用ChatMessageHistory来快速获得对话摘要\n",
    "from langchain.memory import ConversationSummaryMemory\n",
    "from langchain.memory import ChatMessageHistory\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",  # 使用通义千问 plus 模型\n",
    "    temperature=0,      # 温度为0，确保输出确定性\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "hisiory = ChatMessageHistory()\n",
    "hisiory.add_user_message(\"你好，我是人类！\")\n",
    "hisiory.add_ai_message(\"你好，我是AI小丸子，有什么可以帮助你的吗？\")\n",
    "\n",
    "#memory = ConversationSummaryMemory.from_messages(\n",
    "#    llm=OpenAI(temperature=0),\n",
    "#    chat_memory=hisiory,\n",
    "#    return_messages=True\n",
    "#)\n",
    "\n",
    "memory = ConversationSummaryMemory(\n",
    "    llm=llm,\n",
    "    return_messages=True,\n",
    "    buffer=\"\\nThe AI introduces itself as AI Little Maruko and asks if there is anything it can help the human with.\",\n",
    "    chat_memory=hisiory\n",
    ")\n",
    "#memory.buffer\n",
    "memory.load_memory_variables({})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "c412445d-2910-4893-b435-a30eec4371aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Memory content:\n",
      "{'history': [HumanMessage(content='帮我找一下tomie', additional_kwargs={}, response_metadata={}), AIMessage(content='对不起请问什么是tomie？', additional_kwargs={}, response_metadata={}), HumanMessage(content='tomie是一个培训讲师', additional_kwargs={}, response_metadata={}), AIMessage(content='好的，我知道了。', additional_kwargs={}, response_metadata={}), HumanMessage(content='今天他要讲一门关于RAG的课程', additional_kwargs={}, response_metadata={}), AIMessage(content='好的，我知道了。需要RAG的资料吗？', additional_kwargs={}, response_metadata={})]}\n"
     ]
    }
   ],
   "source": [
    "# 当对话持续进行且对话内容很多的时候\n",
    "# 可以使用ConversationSummaryBufferMemory来存储对话摘要\n",
    "# 这是一种非常有用的方式,它会根据token的数量来自动判断是否需要进行摘要\n",
    "# 当token数量超过阈值的时候,会自动进行摘要\n",
    "# 在缓冲区中,会保留最近的k条对话\n",
    "#比较久的对话会被删除，在删除前会进行摘要\n",
    "\n",
    "from langchain.memory import ConversationSummaryBufferMemory\n",
    "from langchain_community.chat_models import ChatOpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.schema import HumanMessage, AIMessage\n",
    "from pydantic import Field\n",
    "from typing import Optional\n",
    "import tiktoken\n",
    "\n",
    "# 自定义 token 计数函数\n",
    "def count_tokens(messages):\n",
    "    encoding = tiktoken.get_encoding(\"cl100k_base\")\n",
    "    total_tokens = 0\n",
    "    for message in messages:\n",
    "        # 计算每条消息的 token 数\n",
    "        if hasattr(message, 'content'):\n",
    "            content = message.content\n",
    "        else:\n",
    "            content = str(message)\n",
    "        total_tokens += len(encoding.encode(content))\n",
    "    return total_tokens\n",
    "\n",
    "# 创建自定义的 ConversationSummaryBufferMemory\n",
    "class CustomConversationSummaryBufferMemory(ConversationSummaryBufferMemory):\n",
    "    prompt_template: Optional[PromptTemplate] = Field(default=None)\n",
    "    moving_summary_buffer: str = Field(default=\"\")\n",
    "    \n",
    "    def __init__(self, *args, **kwargs):\n",
    "        super().__init__(*args, **kwargs)\n",
    "        # 定义提示模板\n",
    "        self.prompt_template = PromptTemplate(\n",
    "            input_variables=[\"summary\", \"new_lines\"],\n",
    "            template=\"请对以下对话内容进行简要总结：\\n\\n当前摘要：{summary}\\n\\n新对话内容：\\n{new_lines}\\n\\n新的摘要：\"\n",
    "        )\n",
    "        self.moving_summary_buffer = \"\"\n",
    "    \n",
    "    def prune(self):\n",
    "        \"\"\"Prune buffer if it exceeds max token limit\"\"\"\n",
    "        buffer = self.chat_memory.messages\n",
    "        curr_buffer_length = count_tokens(buffer)\n",
    "        \n",
    "        if curr_buffer_length > self.max_token_limit:\n",
    "            pruned_messages = []\n",
    "            \n",
    "            # 修剪消息直到token数在限制内\n",
    "            while curr_buffer_length > self.max_token_limit and len(buffer) > 0:\n",
    "                removed_message = buffer.pop(0)\n",
    "                pruned_messages.append(removed_message)\n",
    "                curr_buffer_length = count_tokens(buffer)\n",
    "            \n",
    "            # 如果有被修剪的消息，生成摘要\n",
    "            if pruned_messages:\n",
    "                # 将修剪的消息转换为文本\n",
    "                new_lines = \"\"\n",
    "                for msg in pruned_messages:\n",
    "                    if isinstance(msg, HumanMessage):\n",
    "                        role = \"Human\"\n",
    "                    elif isinstance(msg, AIMessage):\n",
    "                        role = \"AI\"\n",
    "                    else:\n",
    "                        role = \"Unknown\"\n",
    "                    new_lines += f\"{role}: {msg.content}\\n\"\n",
    "                \n",
    "                # 生成摘要\n",
    "                prompt = self.prompt_template.format(\n",
    "                    summary=self.moving_summary_buffer,\n",
    "                    new_lines=new_lines\n",
    "                )\n",
    "                summary = self.llm.predict(prompt)\n",
    "                self.moving_summary_buffer = summary\n",
    "\n",
    "# 使用自定义的 memory\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "memory = CustomConversationSummaryBufferMemory(\n",
    "    llm=llm,\n",
    "    max_token_limit=100,  # 增加 token 限制\n",
    "    return_messages=True\n",
    ")\n",
    "\n",
    "# 测试\n",
    "memory.save_context({\"input\": \"帮我找一下tomie\"}, {\"output\": \"对不起请问什么是tomie？\"})\n",
    "memory.save_context({\"input\": \"tomie是一个培训讲师\"}, {\"output\": \"好的，我知道了。\"})\n",
    "memory.save_context({\"input\": \"今天他要讲一门关于RAG的课程\"}, {\"output\": \"好的，我知道了。需要RAG的资料吗？\"})\n",
    "\n",
    "print(\"Memory content:\")\n",
    "print(memory.load_memory_variables({}))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "515d5bee-2291-43a5-8098-27daa043042c",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\537932640.py:11: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory = ConversationTokenBufferMemory(\n"
     ]
    },
    {
     "ename": "NotImplementedError",
     "evalue": "get_num_tokens_from_messages() is not presently implemented for model qwen-plus. See https://platform.openai.com/docs/guides/text-generation/managing-tokens for information on how messages are converted to tokens.",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNotImplementedError\u001b[0m                       Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[28], line 15\u001b[0m\n\u001b[0;32m      4\u001b[0m llm \u001b[38;5;241m=\u001b[39m ChatOpenAI(\n\u001b[0;32m      5\u001b[0m     model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mqwen-plus\u001b[39m\u001b[38;5;124m\"\u001b[39m,  \u001b[38;5;66;03m# 使用通义千问 plus 模型\u001b[39;00m\n\u001b[0;32m      6\u001b[0m     temperature\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m,      \u001b[38;5;66;03m# 温度为0，确保输出确定性\u001b[39;00m\n\u001b[0;32m      7\u001b[0m     openai_api_key\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msk-4e88cf4db3e14894bafaff606d296610\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m      8\u001b[0m     openai_api_base\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttps://dashscope.aliyuncs.com/compatible-mode/v1\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m      9\u001b[0m )\n\u001b[0;32m     11\u001b[0m memory \u001b[38;5;241m=\u001b[39m ConversationTokenBufferMemory(\n\u001b[0;32m     12\u001b[0m     llm\u001b[38;5;241m=\u001b[39mllm,\n\u001b[0;32m     13\u001b[0m     max_token_limit\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m150\u001b[39m\n\u001b[0;32m     14\u001b[0m )\n\u001b[1;32m---> 15\u001b[0m \u001b[43mmemory\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msave_context\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m     16\u001b[0m \u001b[43m    \u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43minput\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m帮我找一下tomie\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m     17\u001b[0m \u001b[43m    \u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43moutput\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m对不起请问什么是tomie？\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\n\u001b[0;32m     18\u001b[0m \u001b[43m)\u001b[49m\n\u001b[0;32m     19\u001b[0m memory\u001b[38;5;241m.\u001b[39msave_context(\n\u001b[0;32m     20\u001b[0m     {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput\u001b[39m\u001b[38;5;124m\"\u001b[39m:\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtomie是一个培训讲师\u001b[39m\u001b[38;5;124m\"\u001b[39m},\n\u001b[0;32m     21\u001b[0m     {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput\u001b[39m\u001b[38;5;124m\"\u001b[39m:\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m好的，我知道了。\u001b[39m\u001b[38;5;124m\"\u001b[39m}\n\u001b[0;32m     22\u001b[0m )\n\u001b[0;32m     23\u001b[0m memory\u001b[38;5;241m.\u001b[39msave_context(\n\u001b[0;32m     24\u001b[0m     {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput\u001b[39m\u001b[38;5;124m\"\u001b[39m:\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m今天他要讲一门关于RAG的课程\u001b[39m\u001b[38;5;124m\"\u001b[39m},\n\u001b[0;32m     25\u001b[0m     {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moutput\u001b[39m\u001b[38;5;124m\"\u001b[39m:\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m好的，我知道了。需要RAG的资料吗？\u001b[39m\u001b[38;5;124m\"\u001b[39m}\n\u001b[0;32m     26\u001b[0m )\n",
      "File \u001b[1;32mD:\\conda_envs\\rag_learn\\lib\\site-packages\\langchain\\memory\\token_buffer.py:67\u001b[0m, in \u001b[0;36mConversationTokenBufferMemory.save_context\u001b[1;34m(self, inputs, outputs)\u001b[0m\n\u001b[0;32m     65\u001b[0m \u001b[38;5;66;03m# Prune buffer if it exceeds max token limit\u001b[39;00m\n\u001b[0;32m     66\u001b[0m buffer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mchat_memory\u001b[38;5;241m.\u001b[39mmessages\n\u001b[1;32m---> 67\u001b[0m curr_buffer_length \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mllm\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_num_tokens_from_messages\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     68\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m curr_buffer_length \u001b[38;5;241m>\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmax_token_limit:\n\u001b[0;32m     69\u001b[0m     pruned_memory \u001b[38;5;241m=\u001b[39m []\n",
      "File \u001b[1;32mD:\\conda_envs\\rag_learn\\lib\\site-packages\\langchain_openai\\chat_models\\base.py:1543\u001b[0m, in \u001b[0;36mBaseChatOpenAI.get_num_tokens_from_messages\u001b[1;34m(self, messages, tools)\u001b[0m\n\u001b[0;32m   1541\u001b[0m     tokens_per_name \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m   1542\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1543\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\n\u001b[0;32m   1544\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mget_num_tokens_from_messages() is not presently implemented \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m   1545\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfor model \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodel\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m. See \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m   1546\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttps://platform.openai.com/docs/guides/text-generation/managing-tokens\u001b[39m\u001b[38;5;124m\"\u001b[39m  \u001b[38;5;66;03m# noqa: E501\u001b[39;00m\n\u001b[0;32m   1547\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m for information on how messages are converted to tokens.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m   1548\u001b[0m     )\n\u001b[0;32m   1549\u001b[0m num_tokens \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m   1550\u001b[0m messages_dict \u001b[38;5;241m=\u001b[39m [_convert_message_to_dict(m) \u001b[38;5;28;01mfor\u001b[39;00m m \u001b[38;5;129;01min\u001b[39;00m messages]\n",
      "\u001b[1;31mNotImplementedError\u001b[0m: get_num_tokens_from_messages() is not presently implemented for model qwen-plus. See https://platform.openai.com/docs/guides/text-generation/managing-tokens for information on how messages are converted to tokens."
     ]
    }
   ],
   "source": [
    "from langchain.memory import ConversationTokenBufferMemory\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",  # 使用通义千问 plus 模型\n",
    "    temperature=0,      # 温度为0，确保输出确定性\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "memory = ConversationTokenBufferMemory(\n",
    "    llm=llm,\n",
    "    max_token_limit=150\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"帮我找一下tomie\"},\n",
    "    {\"output\":\"对不起请问什么是tomie？\"}\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"tomie是一个培训讲师\"},\n",
    "    {\"output\":\"好的，我知道了。\"}\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"今天他要讲一门关于RAG的课程\"},\n",
    "    {\"output\":\"好的，我知道了。需要RAG的资料吗？\"}\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"不需要资料了，谢谢\"},\n",
    "    {\"output\":\"好的，那我就不打扰你了。\"}\n",
    ")\n",
    "memory.load_memory_variables({})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "ed78b6c0-2a12-4dd5-b61d-220d30e811db",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用本地模型 D:/ideaSpace/MyPython/models/bert-base-chinese 的 tokenizer\n",
      "Memory content:\n",
      "{'history': [AIMessage(content='对不起请问什么是tomie？', additional_kwargs={}, response_metadata={}), HumanMessage(content='tomie是一个培训讲师', additional_kwargs={}, response_metadata={}), AIMessage(content='好的，我知道了。', additional_kwargs={}, response_metadata={}), HumanMessage(content='今天他要讲一门关于RAG的课程', additional_kwargs={}, response_metadata={}), AIMessage(content='好的，我知道了。需要RAG的资料吗？', additional_kwargs={}, response_metadata={}), HumanMessage(content='不需要资料了，谢谢', additional_kwargs={}, response_metadata={}), AIMessage(content='好的，那我就不打扰你了。', additional_kwargs={}, response_metadata={})]}\n"
     ]
    }
   ],
   "source": [
    "# from langchain.memory import ConversationTokenBufferMemory\n",
    "# from langchain.llms import OpenAI\n",
    "# llm = OpenAI(temperature=0)\n",
    "# memory = ConversationTokenBufferMemory(\n",
    "#     llm=llm,\n",
    "#     max_token_limit=150\n",
    "# )\n",
    "# Conversation Token Buffer使用token长度来决定什么时候刷新内存（国内无法使用openAI，故使用本地模型进行 token 计数）\n",
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "from langchain_openai import ChatOpenAI\n",
    "from transformers import AutoTokenizer\n",
    "import tiktoken\n",
    "\n",
    "class LocalTokenBufferMemory:\n",
    "    def __init__(self, llm, max_token_limit=150, model_name=\"gpt2\"):\n",
    "        self.llm = llm\n",
    "        self.max_token_limit = max_token_limit\n",
    "        self.memory = ConversationBufferWindowMemory(k=10, return_messages=True)\n",
    "        \n",
    "        # 使用本地 tokenizer\n",
    "        try:\n",
    "            self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "            print(f\"使用本地模型 {model_name} 的 tokenizer\")\n",
    "        except:\n",
    "            # 如果本地模型不可用，使用 tiktoken 作为备选\n",
    "            self.tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n",
    "            print(\"使用 tiktoken 作为 tokenizer\")\n",
    "    \n",
    "    def _count_tokens(self, text):\n",
    "        \"\"\"计算文本的 token 数量\"\"\"\n",
    "        if hasattr(self.tokenizer, 'encode'):\n",
    "            return len(self.tokenizer.encode(text))\n",
    "        else:\n",
    "            return len(self.tokenizer.encode(text))\n",
    "    \n",
    "    def save_context(self, inputs, outputs):\n",
    "        # 保存到内存\n",
    "        self.memory.save_context(inputs, outputs)\n",
    "        \n",
    "        # 修剪内存以保持 token 数量在限制内\n",
    "        self._prune_memory()\n",
    "    \n",
    "    def _prune_memory(self):\n",
    "        \"\"\"修剪内存以保持 token 数量在限制内\"\"\"\n",
    "        messages = self.memory.chat_memory.messages\n",
    "        \n",
    "        # 计算当前总 token 数\n",
    "        total_tokens = 0\n",
    "        for msg in messages:\n",
    "            total_tokens += self._count_tokens(msg.content)\n",
    "        \n",
    "        # 如果超过限制，从前面开始删除消息\n",
    "        while total_tokens > self.max_token_limit and len(messages) > 0:\n",
    "            removed_msg = messages.pop(0)\n",
    "            total_tokens -= self._count_tokens(removed_msg.content)\n",
    "    \n",
    "    def load_memory_variables(self, inputs):\n",
    "        return self.memory.load_memory_variables(inputs)\n",
    "\n",
    "# 使用通义千问模型\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "# 使用自定义内存\n",
    "memory = LocalTokenBufferMemory(llm=llm, max_token_limit=100, model_name=\"D:/ideaSpace/MyPython/models/bert-base-chinese\")\n",
    "\n",
    "# 测试\n",
    "memory.save_context({\"input\": \"帮我找一下tomie\"}, {\"output\": \"对不起请问什么是tomie？\"})\n",
    "memory.save_context({\"input\": \"tomie是一个培训讲师\"}, {\"output\": \"好的，我知道了。\"})\n",
    "memory.save_context({\"input\": \"今天他要讲一门关于RAG的课程\"}, {\"output\": \"好的，我知道了。需要RAG的资料吗？\"})\n",
    "memory.save_context({\"input\": \"不需要资料了，谢谢\"}, {\"output\": \"好的，那我就不打扰你了。\"})\n",
    "\n",
    "print(\"Memory content:\")\n",
    "print(memory.load_memory_variables({}))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "7b0eecb8-43c7-4018-8b01-d831a2b487bb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "一样把对话内容存储到向量数据库中。\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[Document(id='f1a62b97-1dc4-4405-bb05-c2c075d257a5', metadata={}, page_content='Human: tomie是一个培训讲师'),\n",
       " Document(id='1a09bf13-3ae4-4f92-897c-995679f1a336', metadata={}, page_content='AI: 对不起请问什么是tomie？'),\n",
       " Document(id='d5975acf-68a2-4a69-a39f-0d7d04601e22', metadata={}, page_content='Human: 帮我找一下tomie'),\n",
       " Document(id='a53f958b-a823-47d6-97e5-b481c1d74226', metadata={}, page_content='AI: 好的，那我就不打扰你了。')]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 长时记忆的是实现方式：\n",
    "#    通过向量数据库来存储之前的对话内容，有的向量数据库服务还提供自动摘要等，每次对话的时候，都会从向量数据库里查询最相关的文档或历史对话\n",
    "# from langchain_openai import OpenAIEmbeddings\n",
    "from langchain_community.embeddings import DashScopeEmbeddings  # 使用阿里云的嵌入模型\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "from langchain.vectorstores import FAISS\n",
    "\n",
    "# 使用阿里云的嵌入模型\n",
    "embeddings = DashScopeEmbeddings(\n",
    "    model=\"text-embedding-v1\",  # 阿里云的嵌入模型\n",
    "    dashscope_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\"\n",
    ")\n",
    "\n",
    "memory = ConversationBufferMemory()\n",
    "memory.save_context(\n",
    "    {\"input\":\"帮我找一下tomie\"},\n",
    "    {\"output\":\"对不起请问什么是tomie？\"}\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"tomie是一个培训讲师\"},\n",
    "    {\"output\":\"好的，我知道了。\"}\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"今天他要讲一门关于RAG的课程\"},\n",
    "    {\"output\":\"好的，我知道了。需要RAG的资料吗？\"}\n",
    ")\n",
    "memory.save_context(\n",
    "    {\"input\":\"不需要资料了，谢谢\"},\n",
    "    {\"output\":\"好的，那我就不打扰你了。\"}\n",
    ")\n",
    "\n",
    "vectorstore = FAISS.from_texts(\n",
    "    memory.buffer.split(\"\\n\"),\n",
    "    embeddings\n",
    ")\n",
    "FAISS.save_local(vectorstore,\"test_faiss\")\n",
    "print(\"一样把对话内容存储到向量数据库中。\")\n",
    "\n",
    "FAISS.load_local(\"test_faiss\",embeddings,allow_dangerous_deserialization=True).similarity_search(\"tomie是什么职业?\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "eb96e98c-4d7f-4210-bf3b-1b8882753c6c",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\2669636683.py:15: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory2 = VectorStoreRetrieverMemory(\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'history': 'Human: tomie是一个培训讲师'}"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.vectorstores import FAISS\n",
    "from langchain_community.embeddings import DashScopeEmbeddings  # 使用阿里云的嵌入模型\n",
    "from langchain.memory import VectorStoreRetrieverMemory\n",
    "\n",
    "# 使用阿里云的嵌入模型\n",
    "embeddings = DashScopeEmbeddings(\n",
    "    model=\"text-embedding-v1\",  # 阿里云的嵌入模型\n",
    "    dashscope_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\"\n",
    ")\n",
    "\n",
    "r1 = FAISS.load_local(\"test_faiss\",embeddings,allow_dangerous_deserialization=True)\n",
    "r2 = r1.as_retriever(\n",
    "    search_kwargs={\"k\":1}\n",
    ")\n",
    "memory2 = VectorStoreRetrieverMemory(\n",
    "    retriever=r2\n",
    ")\n",
    "memory2.load_memory_variables({\"prompt\":\"tomie是什么职业?\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "c08c7d08-542b-402d-bda6-e06ef7e79de1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3m你是一个可以和人类对话的机器人.\n",
      "\n",
      "人类:我最新喜欢我的世界这个游戏，你还记得我叫什么吗？\n",
      "机器人:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'当然记得啦！你叫小明。不过我更好奇你现在玩《我的世界》都建造了些什么有趣的东西呀？是不是又在策划什么大工程了？😊'"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 在链上使用记忆之LLMChain\n",
    "from langchain.chains import LLMChain\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "from langchain.prompts import PromptTemplate\n",
    "\n",
    "#自定义模板\n",
    "template = \"\"\"你是一个可以和人类对话的机器人.\n",
    "{chat_history}\n",
    "人类:{human_input}\n",
    "机器人:\"\"\"\n",
    "\n",
    "prompt= PromptTemplate(\n",
    "    template=template,\n",
    "    input_variables=[\"chat_history\", \"human_input\"],\n",
    ")\n",
    "memory = ConversationBufferMemory(\n",
    "    memory_key=\"chat_history\",\n",
    ")\n",
    "# 使用通义千问模型\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "chain = LLMChain(\n",
    "    llm=llm,\n",
    "    memory=memory,\n",
    "    prompt=prompt,\n",
    "    verbose=True,\n",
    ")\n",
    "\n",
    "chain.predict(human_input=\"我最新喜欢我的世界这个游戏，你还记得我叫什么吗？\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "1816f193-2197-4175-9814-22ad3c6f5f9c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "System: 你好，我是一个可以和人类对话的机器人\n",
      "Human: 你好\n",
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mSystem: 你好，我是一个可以和人类对话的机器人\n",
      "Human: 我叫tomie，我是一个AI应用程序猿\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'你好Tomie！很高兴认识你这个AI应用程序猿呢！我是Qwen，也是一个AI助手。我们简直是同行啊！🌟\\n\\n作为同样在AI领域的从业者，我特别想知道你平时都用哪些工具和技术呢？我最近正在学习一些新的编程技巧，感觉AI领域的技术更新真是太快了！和你这样的专业人士交流，一定能学到不少东西。\\n\\n对了，你平时都开发什么样的AI应用呢？是偏向自然语言处理、计算机视觉，还是其他方向？也许我们能交流一些经验，说不定还能合作做点有意思的项目呢！(•̀ᴗ•́)و'"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 在链上使用记忆之LLMChain\n",
    "from langchain.chat_models import  ChatOpenAI\n",
    "from langchain.prompts import  (\n",
    "    ChatPromptTemplate,\n",
    "    HumanMessagePromptTemplate,\n",
    "    MessagesPlaceholder,\n",
    ")\n",
    "from langchain.schema import  SystemMessage\n",
    "\n",
    "prompt = ChatPromptTemplate.from_messages(\n",
    "    [\n",
    "        SystemMessage(\n",
    "            content=\"你好，我是一个可以和人类对话的机器人\",\n",
    "            role=\"system\",\n",
    "        ),\n",
    "        MessagesPlaceholder(\n",
    "            variable_name=\"chat_history\",\n",
    "        ),\n",
    "        HumanMessagePromptTemplate.from_template(\n",
    "            \"{human_input}\"\n",
    "        ),\n",
    "    ]\n",
    ")\n",
    "\n",
    "print(prompt.format(human_input=\"你好\",chat_history=[]))\n",
    "\n",
    "memory = ConversationBufferMemory(\n",
    "    memory_key=\"chat_history\",\n",
    "    return_messages=True,\n",
    ")\n",
    "# 使用通义千问模型\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "chain = LLMChain(\n",
    "    llm=llm,\n",
    "    memory=memory,\n",
    "    prompt=prompt,\n",
    "    verbose=True,\n",
    ")\n",
    "\n",
    "chain.predict(human_input=\"我叫tomie，我是一个AI应用程序猿\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "0b85430a-ffaa-404a-a928-d67251247473",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\576982000.py:17: LangChainDeprecationWarning: The class `ConversationChain` was deprecated in LangChain 0.2.7 and will be removed in 1.0. Use :class:`~langchain_core.runnables.history.RunnableWithMessageHistory` instead.\n",
      "  chain = ConversationChain(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "[]\n",
      "Human: 帮我做个一日游攻略\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'当然可以！不过我需要你提供一些信息，这样我可以为你量身定制一日游攻略：\\n\\n1. **地点**：你想在哪个城市或地区进行一日游？  \\n2. **兴趣偏好**：你喜欢自然风光、历史文化、美食、购物，还是户外活动？  \\n3. **交通方式**：你是打算自驾、乘坐公共交通，还是步行为主？  \\n4. **预算范围**：你希望这次一日游的消费水平是低、中还是高？  \\n5. **是否有特殊需求**：比如是否需要适合带小孩、老人，或者是否有饮食限制等？\\n\\n只要你告诉我这些信息，我就能为你安排一个充实又有趣的一日游行程！😄'"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 在链上使用记忆之ConversationChain\n",
    "from langchain.chains import ConversationChain\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "\n",
    "# 使用通义千问模型\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "memory = ConversationBufferMemory(\n",
    "    memory_key=\"history\",\n",
    "    return_messages=True,\n",
    ")\n",
    "chain = ConversationChain(\n",
    "    llm=llm,\n",
    "    memory=memory,\n",
    "    verbose=True,\n",
    ")\n",
    "chain.predict(input=\"帮我做个一日游攻略\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "e43b15a3-e98a-4e1e-9779-68354aff4227",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3m下面是一段AI与人类的对话，AI会针对人类问题，提供尽可能详细的回答，如果AI不知道答案，会直接回复'人类老爷，我真的不知道'.\n",
      "当前对话:\n",
      "[]\n",
      "Human:你知道我叫什么名字吗?\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'人类老爷，我真的不知道。'"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 在链上使用记忆之ConversationChain\n",
    "# 自定义一下，对其进行覆盖\n",
    "from langchain.chains import ConversationChain\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "template = \"\"\"下面是一段AI与人类的对话，AI会针对人类问题，提供尽可能详细的回答，如果AI不知道答案，会直接回复'人类老爷，我真的不知道'.\n",
    "当前对话:\n",
    "{history}\n",
    "Human:{input}\n",
    "AI助手:\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    template=template,\n",
    "    input_variables=[\"history\", \"input\"],\n",
    ")\n",
    "\n",
    "chain = ConversationChain(\n",
    "    llm=llm,\n",
    "    memory=ConversationBufferMemory(\n",
    "        ai_prefix=\"AI助手\",\n",
    "        return_messages=True,\n",
    "    ),\n",
    "    prompt=prompt,\n",
    "    verbose=True,\n",
    ")\n",
    "\n",
    "chain.predict(input=\"你知道我叫什么名字吗?\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "12e643b9-eb5f-40b1-8749-4e5e8a11a79f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\2884524645.py:54: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 1.0. Use :meth:`~invoke` instead.\n",
      "  chain.run(\"那ETH呢？\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3m下面是一段AI与人类的对话，AI会针对人类问题，提供尽可能详细的回答，如果AI不知道答案，会直接回复'人类老爷，我真的不知道'.\n",
      "之前的对话摘要:\n",
      "\n",
      "当前对话:\n",
      "\n",
      "Human:那ETH呢？\n",
      "AI：\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'人类老爷，您提到的“ETH”可能有多种含义，我需要更多上下文来提供准确的回答。以下是几种常见的可能性：\\n\\n1. **以太坊（Ethereum）**：如果“ETH”是指以太坊区块链的原生加密货币以太币（Ether），那么它是一种去中心化的开源区块链平台，支持智能合约和去中心化应用程序（DApps）。以太坊是目前全球市值第二大的加密货币，仅次于比特币（BTC）。它的主要特点是支持开发者构建和部署智能合约，从而实现自动化、无需信任第三方的交易和协议。\\n\\n2. **以太网（Ethernet）**：如果“ETH”是指计算机网络中的“以太网”技术，它是一种广泛使用的局域网（LAN）技术，用于在局域网中传输数据。以太网标准由IEEE 802.3定义，支持多种传输速率，如10 Mbps、100 Mbps、1 Gbps、10 Gbps等。\\n\\n3. **化学元素**：如果“ETH”是化学领域的缩写，它可能与某些化合物或基团有关，例如“乙基”（ethyl），化学式为C₂H₅。\\n\\n4. **其他缩写**：在不同领域，“ETH”可能还有其他含义，比如Ethiopia（埃塞俄比亚的ISO国家代码）、Ethnicity（种族）等。\\n\\n如果您能提供更具体的背景或问题方向，我将能够为您提供更详细的信息！'"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 在链上使用记忆之ConversationChain\n",
    "# 同一个链合并使用多个memory\n",
    "from langchain.chains import ConversationChain\n",
    "from langchain.memory import (\n",
    "    ConversationBufferMemory,\n",
    "    ConversationSummaryMemory,\n",
    "    CombinedMemory\n",
    ")\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "#使用CoversationSummaryMemory对对话进行总结\n",
    "summay = ConversationSummaryMemory(\n",
    "    llm=llm,\n",
    "    input_key=\"input\"\n",
    ")\n",
    "#使用ConversationBufferMemory对对话进行缓存\n",
    "cov_memory = ConversationBufferMemory(\n",
    "    memory_key=\"history_now\",\n",
    "    input_key=\"input\",\n",
    ")\n",
    "\n",
    "memory = CombinedMemory(\n",
    "    memories=[summay, cov_memory],\n",
    ")\n",
    "\n",
    "TEMPLATE = \"\"\"下面是一段AI与人类的对话，AI会针对人类问题，提供尽可能详细的回答，如果AI不知道答案，会直接回复'人类老爷，我真的不知道'.\n",
    "之前的对话摘要:\n",
    "{history}\n",
    "当前对话:\n",
    "{history_now}\n",
    "Human:{input}\n",
    "AI：\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    template=TEMPLATE,\n",
    "    input_variables=[\"history\", \"history_now\", \"input\"],\n",
    ")\n",
    "\n",
    "chain = ConversationChain(\n",
    "    llm=llm,\n",
    "    memory=memory,\n",
    "    prompt=prompt,\n",
    "    verbose=True,\n",
    ")\n",
    "\n",
    "chain.run(\"那ETH呢？\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "9779ac10-d6e3-4d11-bae8-27bc9cf82db5",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Created a chunk of size 24, which is longer than the specified 20\n",
      "Created a chunk of size 45, which is longer than the specified 20\n",
      "Created a chunk of size 50, which is longer than the specified 20\n",
      "Created a chunk of size 232, which is longer than the specified 20\n",
      "Created a chunk of size 589, which is longer than the specified 20\n",
      "Created a chunk of size 610, which is longer than the specified 20\n",
      "Created a chunk of size 561, which is longer than the specified 20\n",
      "Created a chunk of size 563, which is longer than the specified 20\n",
      "Created a chunk of size 259, which is longer than the specified 20\n",
      "Created a chunk of size 87, which is longer than the specified 20\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Document(metadata={}, page_content='Marketing Initiatives and Campaigns\\nOur marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.'), Document(metadata={}, page_content='Marketing Initiatives and Campaigns\\nOur marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.'), Document(metadata={}, page_content=\"Thank you for your attention, and let's continue to work together to achieve our goals.\"), Document(metadata={}, page_content=\"Thank you for your attention, and let's continue to work together to achieve our goals.\")]\n"
     ]
    }
   ],
   "source": [
    "# 多参数链增加记忆\n",
    "# from langchain.embeddings.openai import  OpenAIEmbeddings\n",
    "from langchain_community.embeddings import DashScopeEmbeddings  # 使用阿里云的嵌入模型\n",
    "from langchain.text_splitter import CharacterTextSplitter\n",
    "from langchain.vectorstores import Chroma\n",
    "\n",
    "with open(\"kecheng/letter.txt\") as f:\n",
    "    text = f.read()\n",
    "    #切分文本\n",
    "    text_splitter = CharacterTextSplitter(\n",
    "        chunk_size = 20,\n",
    "        chunk_overlap = 5\n",
    "    )\n",
    "    texts = text_splitter.split_text(text)\n",
    "\n",
    "    #使用openai的embedding\n",
    "    # embddings = OpenAIEmbeddings()\n",
    "    # 使用阿里云的嵌入模型\n",
    "    embddings = DashScopeEmbeddings(\n",
    "        model=\"text-embedding-v1\",  # 阿里云的嵌入模型\n",
    "        dashscope_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\"\n",
    "    )\n",
    "    #使用chroma向量存储\n",
    "    docssearch = Chroma.from_texts(\n",
    "        texts,\n",
    "        embddings,\n",
    "    )\n",
    "    query = \"公司有什么新策略?\"\n",
    "    docs = docssearch.similarity_search(query=query)\n",
    "    print(docs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "ac2b8324-5c9e-4dfd-8b68-a65672664ff3",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\921202950.py:33: LangChainDeprecationWarning: This class is deprecated. See the following migration guides for replacements based on `chain_type`:\n",
      "stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n",
      "map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n",
      "refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n",
      "map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n",
      "\n",
      "See also guides on retrieval and question-answering here: https://python.langchain.com/docs/how_to/#qa-with-rag\n",
      "  chain = load_qa_chain(\n",
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_10156\\921202950.py:41: LangChainDeprecationWarning: The method `Chain.__call__` was deprecated in langchain 0.1.0 and will be removed in 1.0. Use :meth:`~invoke` instead.\n",
      "  chain({\"input_documents\":docs,\"human_input\":\"公司的营销策略是什么？\"})\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new StuffDocumentsChain chain...\u001b[0m\n",
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3m下面是一段AI与人类的对话，AI会针对人类问题，提供尽可能详细的回答，如果AI不知道答案，会直接回复'人类老爷，我真的不知道'，参考一下相关文档以及历史对话信息，AI会据此组织最终回答内容.\n",
      "Marketing Initiatives and Campaigns\n",
      "Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.\n",
      "\n",
      "Marketing Initiatives and Campaigns\n",
      "Our marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.\n",
      "\n",
      "Thank you for your attention, and let's continue to work together to achieve our goals.\n",
      "\n",
      "Thank you for your attention, and let's continue to work together to achieve our goals.\n",
      "[]\n",
      "Human:公司的营销策略是什么？\n",
      "AI:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'input_documents': [Document(metadata={}, page_content='Marketing Initiatives and Campaigns\\nOur marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.'),\n",
       "  Document(metadata={}, page_content='Marketing Initiatives and Campaigns\\nOur marketing team has been actively working on developing new strategies to increase brand awareness and drive customer engagement. We would like to thank Sarah Thompson (phone: 415-555-1234) for her exceptional efforts in managing our social media platforms. Sarah has successfully increased our follower base by 20% in the past month alone. Moreover, please mark your calendars for the upcoming product launch event on July 15th. We encourage all team members to attend and support this exciting milestone for our company.'),\n",
       "  Document(metadata={}, page_content=\"Thank you for your attention, and let's continue to work together to achieve our goals.\"),\n",
       "  Document(metadata={}, page_content=\"Thank you for your attention, and let's continue to work together to achieve our goals.\")],\n",
       " 'human_input': '公司的营销策略是什么？',\n",
       " 'chat_history': [HumanMessage(content='公司的营销策略是什么？', additional_kwargs={}, response_metadata={}),\n",
       "  AIMessage(content='人类老爷，我真的不知道。', additional_kwargs={}, response_metadata={})],\n",
       " 'output_text': '人类老爷，我真的不知道。'}"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#构建问答对话链\n",
    "from langchain.chains.question_answering import  load_qa_chain\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "template = \"\"\"下面是一段AI与人类的对话，AI会针对人类问题，提供尽可能详细的回答，如果AI不知道答案，会直接回复'人类老爷，我真的不知道'，参考一下相关文档以及历史对话信息，AI会据此组织最终回答内容.\n",
    "{context}\n",
    "{chat_history}\n",
    "Human:{human_input}\n",
    "AI:\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    template=template,\n",
    "    input_variables=[\"context\", \"chat_history\", \"human_input\"],\n",
    ")\n",
    "\n",
    "#使用ConversationBufferMemory对对话进行缓存 \n",
    "memory = ConversationBufferMemory(\n",
    "    memory_key=\"chat_history\",\n",
    "    input_key=\"human_input\",\n",
    "    return_messages=True,\n",
    ")\n",
    "\n",
    "#加载对话链\n",
    "chain = load_qa_chain(\n",
    "    llm=llm,\n",
    "    memory=memory,\n",
    "    prompt=prompt,\n",
    "    verbose=True,\n",
    "    chain_type=\"stuff\"\n",
    ")\n",
    "\n",
    "chain({\"input_documents\":docs,\"human_input\":\"公司的营销策略是什么？\"})\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "5e81c6fe-12b0-4483-b027-e56180248437",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_14804\\2498159558.py:36: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  memory=ConversationBufferWindowMemory(k=2),\n",
      "C:\\Users\\27727\\AppData\\Local\\Temp\\ipykernel_14804\\2498159558.py:32: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use :meth:`~RunnableSequence, e.g., `prompt | llm`` instead.\n",
      "  chatgpt_chain = LLMChain(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mAI助手是由OpenAI训练的大型语言模型。\n",
      "\n",
      "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
      "\n",
      "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
      "\n",
      "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
      "\n",
      "\n",
      "Human: 我要求你扮演Linux终端。我会输入命令，你将回复终端应显示的内容。我希望你只在一个唯一的代码块内回复终端输出，不添加其他内容。除非我指示你这样做，否则请不要输入命令。当我需要用英文告诉你一些信息时，我会用花括号{像这样}。我的第一个命令是pwd\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "```\n",
      "/home/user\n",
      "```\n"
     ]
    }
   ],
   "source": [
    "\"\"\"使用LLMChain克隆一个ChatGPT\n",
    "    自定义提示词\n",
    "    使用记忆\n",
    "\"\"\"\n",
    "from langchain.chains import LLMChain\n",
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    model=\"qwen-plus\",\n",
    "    temperature=0,\n",
    "    openai_api_key=\"sk-4e88cf4db3e14894bafaff606d296610\",\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    ")\n",
    "\n",
    "template = \"\"\"AI助手是由OpenAI训练的大型语言模型。\n",
    "\n",
    "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
    "\n",
    "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
    "\n",
    "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
    "\n",
    "{history}\n",
    "Human: {human_input}\n",
    "AI助手:\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(input_variables=[\"history\", \"human_input\"], template=template)\n",
    "\n",
    "\n",
    "chatgpt_chain = LLMChain(\n",
    "    llm=llm,\n",
    "    prompt=prompt,\n",
    "    verbose=True,\n",
    "    memory=ConversationBufferWindowMemory(k=2),\n",
    ")\n",
    "\n",
    "output = chatgpt_chain.predict(\n",
    "    human_input=\"我要求你扮演Linux终端。我会输入命令，你将回复终端应显示的内容。我希望你只在一个唯一的代码块内回复终端输出，不添加其他内容。除非我指示你这样做，否则请不要输入命令。当我需要用英文告诉你一些信息时，我会用花括号{像这样}。我的第一个命令是pwd\"\n",
    "\n",
    ")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "20379f42-f40b-476a-b6a0-b19a65613e36",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mAI助手是由OpenAI训练的大型语言模型。\n",
      "\n",
      "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
      "\n",
      "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
      "\n",
      "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
      "\n",
      "Human: 我要求你扮演Linux终端。我会输入命令，你将回复终端应显示的内容。我希望你只在一个唯一的代码块内回复终端输出，不添加其他内容。除非我指示你这样做，否则请不要输入命令。当我需要用英文告诉你一些信息时，我会用花括号{像这样}。我的第一个命令是pwd\n",
      "AI: ```\n",
      "/home/user\n",
      "```\n",
      "Human: ls ~\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "```\n",
      "Documents  Downloads  Music  Pictures  Videos\n",
      "```\n"
     ]
    }
   ],
   "source": [
    "output = chatgpt_chain.predict(human_input=\"ls ~\")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b8a29f5b-cfa6-45b4-b43a-9e8a58cb1cd8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mAI助手是由OpenAI训练的大型语言模型。\n",
      "\n",
      "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
      "\n",
      "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
      "\n",
      "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
      "\n",
      "Human: 我要求你扮演Linux终端。我会输入命令，你将回复终端应显示的内容。我希望你只在一个唯一的代码块内回复终端输出，不添加其他内容。除非我指示你这样做，否则请不要输入命令。当我需要用英文告诉你一些信息时，我会用花括号{像这样}。我的第一个命令是pwd\n",
      "AI: ```\n",
      "/home/user\n",
      "```\n",
      "Human: ls ~\n",
      "AI: ```\n",
      "Documents  Downloads  Music  Pictures  Videos\n",
      "```\n",
      "Human: cd ~\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "```\n",
      "/home/user\n",
      "```\n"
     ]
    }
   ],
   "source": [
    "output = chatgpt_chain.predict(human_input=\"cd ~\")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "db06e89c-16cc-489d-8647-595e9959c4da",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mAI助手是由OpenAI训练的大型语言模型。\n",
      "\n",
      "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
      "\n",
      "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
      "\n",
      "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
      "\n",
      "Human: ls ~\n",
      "AI: ```\n",
      "Documents  Downloads  Music  Pictures  Videos\n",
      "```\n",
      "Human: cd ~\n",
      "AI: ```\n",
      "/home/user\n",
      "```\n",
      "Human: {创建一个名为croypt.txt的文件，并在其中放入主流的加密货币代码}\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "```\n",
      "touch crypt.txt\n",
      "echo \"BTC\" > crypt.txt\n",
      "echo \"ETH\" >> crypt.txt\n",
      "echo \"XRP\" >> crypt.txt\n",
      "echo \"LTC\" >> crypt.txt\n",
      "echo \"ADA\" >> crypt.txt\n",
      "echo \"DOT\" >> crypt.txt\n",
      "echo \"LINK\" >> crypt.txt\n",
      "echo \"BNB\" >> crypt.txt\n",
      "echo \"XMR\" >> crypt.txt\n",
      "echo \"USDT\" >> crypt.txt\n",
      "```\n"
     ]
    }
   ],
   "source": [
    "output = chatgpt_chain.predict(\n",
    "    human_input=\"{创建一个名为croypt.txt的文件，并在其中放入主流的加密货币代码}\"\n",
    ")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "b92c49ca-9848-4c5d-b7b7-735b579095a3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mAI助手是由OpenAI训练的大型语言模型。\n",
      "\n",
      "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
      "\n",
      "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
      "\n",
      "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
      "\n",
      "Human: cd ~\n",
      "AI: ```\n",
      "/home/user\n",
      "```\n",
      "Human: {创建一个名为croypt.txt的文件，并在其中放入主流的加密货币代码}\n",
      "AI: ```\n",
      "touch crypt.txt\n",
      "echo \"BTC\" > crypt.txt\n",
      "echo \"ETH\" >> crypt.txt\n",
      "echo \"XRP\" >> crypt.txt\n",
      "echo \"LTC\" >> crypt.txt\n",
      "echo \"ADA\" >> crypt.txt\n",
      "echo \"DOT\" >> crypt.txt\n",
      "echo \"LINK\" >> crypt.txt\n",
      "echo \"BNB\" >> crypt.txt\n",
      "echo \"XMR\" >> crypt.txt\n",
      "echo \"USDT\" >> crypt.txt\n",
      "```\n",
      "Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "```\n",
      "Result:33\n",
      "```\n"
     ]
    }
   ],
   "source": [
    "output = chatgpt_chain.predict(\n",
    "    human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\"\n",
    ")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "363db295-00f7-4963-a737-30258cc26e0a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mAI助手是由OpenAI训练的大型语言模型。\n",
      "\n",
      "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
      "\n",
      "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
      "\n",
      "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
      "\n",
      "Human: {创建一个名为croypt.txt的文件，并在其中放入主流的加密货币代码}\n",
      "AI: ```\n",
      "touch crypt.txt\n",
      "echo \"BTC\" > crypt.txt\n",
      "echo \"ETH\" >> crypt.txt\n",
      "echo \"XRP\" >> crypt.txt\n",
      "echo \"LTC\" >> crypt.txt\n",
      "echo \"ADA\" >> crypt.txt\n",
      "echo \"DOT\" >> crypt.txt\n",
      "echo \"LINK\" >> crypt.txt\n",
      "echo \"BNB\" >> crypt.txt\n",
      "echo \"XMR\" >> crypt.txt\n",
      "echo \"USDT\" >> crypt.txt\n",
      "```\n",
      "Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
      "AI: ```\n",
      "Result:33\n",
      "```\n",
      "Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "```\n",
      "[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
      "```\n",
      "\n",
      "### 解释：\n",
      "该语句生成了一个包含前10个质数的列表。以下是代码的分解说明：\n",
      "\n",
      "1. `range(2, 3**10)`：生成一个从2到 $3^{10}$（即59049）的整数序列。\n",
      "2. `all(x % d for d in range(2, x))`：判断一个数 $x$ 是否能被从2到 $x-1$ 的任意数整除，即检查 $x$ 是否为质数。\n",
      "3. `filter(lambda x: ... , range(2, 3**10))`：筛选出所有满足条件的数（即所有质数）。\n",
      "4. `list(...)`：将筛选结果转换为列表。\n",
      "5. `[:10]`：取列表中的前10个元素。\n",
      "\n",
      "最终结果是前10个质数 `[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]`。\n"
     ]
    }
   ],
   "source": [
    "output = chatgpt_chain.predict(\n",
    "    human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\"\n",
    ")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "47754686-6851-434e-afb9-70f748da66b7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
      "Prompt after formatting:\n",
      "\u001b[32;1m\u001b[1;3mAI助手是由OpenAI训练的大型语言模型。\n",
      "\n",
      "AI助手旨在能够处理各种任务，从回答简单问题到提供广泛话题的深入解释和讨论。作为一个语言模型，AI助手能够根据接收到的输入生成类似人类的文本，使其能够进行自然的对话并提供与当前话题相关且连贯的回答。\n",
      "\n",
      "AI助手不断学习和改进，其能力不断发展。它能够处理和理解大量文本，并利用这些知识对各种问题提供准确和丰富的回答。此外，AI助手能够根据接收到的输入生成自己的文本，使其能够参与讨论并就各种话题提供解释和描述。\n",
      "\n",
      "总的来说，AI助手是一个强大的工具，可以帮助处理各种任务，并就广泛话题提供有价值的见解和信息。无论您需要针对特定问题的帮助，还是只是想就某个特定话题进行交流，AI助手都在这里为您提供帮助。\n",
      "\n",
      "Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
      "AI: ```\n",
      "Result:33\n",
      "```\n",
      "Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
      "AI: ```\n",
      "[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
      "```\n",
      "\n",
      "### 解释：\n",
      "该语句生成了一个包含前10个质数的列表。以下是代码的分解说明：\n",
      "\n",
      "1. `range(2, 3**10)`：生成一个从2到 $3^{10}$（即59049）的整数序列。\n",
      "2. `all(x % d for d in range(2, x))`：判断一个数 $x$ 是否能被从2到 $x-1$ 的任意数整除，即检查 $x$ 是否为质数。\n",
      "3. `filter(lambda x: ... , range(2, 3**10))`：筛选出所有满足条件的数（即所有质数）。\n",
      "4. `list(...)`：将筛选结果转换为列表。\n",
      "5. `[:10]`：取列表中的前10个元素。\n",
      "\n",
      "最终结果是前10个质数 `[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]`。\n",
      "Human: curl https://chat.openai.com/chat\n",
      "AI助手:\u001b[0m\n",
      "\n",
      "\u001b[1m> Finished chain.\u001b[0m\n",
      "我无法直接访问或执行 `curl` 命令来访问外部网站或服务，包括 OpenAI 的 ChatGPT 页面。这种操作通常用于从命令行与 Web 服务进行交互，但具体响应将取决于目标服务的实现方式，而不是由我来决定。\n",
      "\n",
      "如果你希望与我进行交互，请直接提出问题或任务，我将尽力帮助你完成！\n"
     ]
    }
   ],
   "source": [
    "output = chatgpt_chain.predict(human_input=\"curl https://chat.openai.com/chat\")\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d616852b-e5e4-49bc-bd04-b91ba6230bf4",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "rag_learn",
   "language": "python",
   "name": "rag_learn"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
