{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "## RAG\n",
    "让我们来看看如何在提示和LLM中添加检索步骤，这将形成一个“检索增强生成”链\n"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1a6cc1554c50894c"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "from langchain_community.embeddings import DashScopeEmbeddings\n",
    "from langchain_community.vectorstores import FAISS\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from langchain_core.runnables import RunnablePassthrough\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "vectorstore = FAISS.from_texts(\n",
    "    [\"harrison worked at kensho\", \"老李在建设局工作\"],\n",
    "    embedding=DashScopeEmbeddings(dashscope_api_key=os.getenv(\"DASHSCOPE_API_KEY\"))\n",
    ")\n",
    "retriever = vectorstore.as_retriever()\n",
    "\n",
    "template = \"\"\"Answer the question based only on the following context:\n",
    "{context}\n",
    "\n",
    "Question: {question}\n",
    "\"\"\"\n",
    "llm = ChatOpenAI(\n",
    "    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key=\"sk-xxx\",\n",
    "    openai_api_key=os.getenv(\"DASHSCOPE_API_KEY\"),\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    model_name=\"qwen-max\",\n",
    "    max_retries=0,\n",
    ")\n",
    "prompt = ChatPromptTemplate.from_template(template)\n",
    "\n",
    "chain = {\"context\": retriever, \"question\": RunnablePassthrough()} | prompt | llm | StrOutputParser()"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-30T09:36:09.403411Z",
     "start_time": "2024-10-30T09:36:08.173219Z"
    }
   },
   "id": "673dc20e8b6832bc",
   "execution_count": 41
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "data": {
      "text/plain": "'Harrison worked at Kensho.'"
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "chain.invoke(\"where did harrison work?\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-30T09:36:18.562549Z",
     "start_time": "2024-10-30T09:36:09.412102Z"
    }
   },
   "id": "5e96a375c4824bb0",
   "execution_count": 42
  },
  {
   "cell_type": "markdown",
   "source": [
    "更多关于`itemgetter`的使用，参考：`lang-chain-lcel-操作输入和输出.ipynb`\n",
    "\n",
    "## Conversational Retrieval Chain\n",
    "可以很容易地添加对话历史。这主要意味着添加聊天消息历史记录"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a9336ef6c7efe543"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "上一智能体回答结果:老李的工作是什么？\n",
      "[Document(metadata={}, page_content='老李在建设局工作'), Document(metadata={}, page_content='harrison worked at kensho')]\n",
      "input_variables=['page_content'] input_types={} partial_variables={} template='{page_content}'\n",
      "格式化后的文档字符串:老李在建设局工作\n",
      "\n",
      "harrison worked at kensho\n",
      "上一智能体回答结果:{'context': '老李在建设局工作\\n\\nharrison worked at kensho', 'question': '老李的工作是什么？'}\n"
     ]
    },
    {
     "data": {
      "text/plain": "'老李在建设局工作。所以，老李的工作是与建设局相关的职务，但具体的职位没有在提供的信息中提及。'"
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from operator import itemgetter\n",
    "from langchain_core.messages import get_buffer_string\n",
    "from langchain_core.runnables import RunnableParallel, RunnableLambda\n",
    "from langchain_core.prompts import format_document\n",
    "from langchain_core.prompts import ChatPromptTemplate, PromptTemplate\n",
    "\n",
    "_template = \"\"\"鉴于以下对话和一个后续问题，请将后续问题用其原始语言重新表述为一个独立的问题。\n",
    "\n",
    "聊天记录:\n",
    "{chat_history}\n",
    "后续输入: {question}\n",
    "Standalone question:\"\"\"\n",
    "CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)\n",
    "ANSWER_PROMPT = ChatPromptTemplate.from_template(\"\"\"仅根据以下上下文回答问题:\n",
    "{context}\n",
    "\n",
    "Question: {question}\n",
    "\"\"\")\n",
    "DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
    "\n",
    "\n",
    "def _combine_docs(docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, doc_separator=\"\\n\\n\"):\n",
    "    \"\"\"\n",
    "    根据提示模板将文档格式化为字符串(当前业务为格式化为智能体上下文内容)，并使用文档分隔符分隔后返回\n",
    "    :param docs: 文档\n",
    "    :param document_prompt: 提示模板\n",
    "    :param doc_separator: 分隔符\n",
    "    :return: str\n",
    "    \"\"\"\n",
    "    print(docs)\n",
    "    print(document_prompt)\n",
    "    #根据提示模板将文档格式化为字符串。\n",
    "    doc_strings = [format_document(doc, document_prompt) for doc in docs]\n",
    "    print(f\"格式化后的文档字符串:{doc_separator.join(doc_strings)}\")\n",
    "    return doc_separator.join(doc_strings)\n",
    "\n",
    "\n",
    "# 回答结果输出为字典. eg:{\"standalone_question\":str}\n",
    "_inputs = RunnableParallel(\n",
    "    standalone_question=RunnablePassthrough.assign(\n",
    "        chat_history=lambda x: get_buffer_string(x[\"chat_history\"])\n",
    "    )\n",
    "                        | CONDENSE_QUESTION_PROMPT\n",
    "                        | ChatOpenAI(\n",
    "        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key=\"sk-xxx\",\n",
    "        openai_api_key=os.getenv(\"DASHSCOPE_API_KEY\"),\n",
    "        openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "        model_name=\"qwen-max\",\n",
    "        max_retries=0, )\n",
    "                        | StrOutputParser(),\n",
    ")\n",
    "\n",
    "\n",
    "def _inputs_result_print(standalone_question: str | dict):\n",
    "    # if isinstance(standalone_question, str):\n",
    "    print(f\"上一智能体回答结果:{standalone_question}\")\n",
    "\n",
    "    return standalone_question\n",
    "\n",
    "\n",
    "# 从_inputs链中获取结果，设置question,而context通过retriever(向量)获取，并将向量内容拼接成字符串，形成智能体上下文\n",
    "_context = {\n",
    "    \"context\": itemgetter(\"standalone_question\") | RunnableLambda(\n",
    "        _inputs_result_print) | retriever | _combine_docs,\n",
    "    \"question\": lambda x: x[\"standalone_question\"]\n",
    "}\n",
    "\n",
    "# 最终的链路\n",
    "conversational_qa_chain = _inputs | _context | RunnableLambda(_inputs_result_print) | ANSWER_PROMPT | ChatOpenAI(\n",
    "    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key=\"sk-xxx\",\n",
    "    openai_api_key=os.getenv(\"DASHSCOPE_API_KEY\"),\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    model_name=\"qwen-max\",\n",
    "    max_retries=0,\n",
    ") | StrOutputParser()\n",
    "conversational_qa_chain.invoke(\n",
    "    {\n",
    "        \"question\": \"老李什么工作?\",\n",
    "        \"chat_history\": [],\n",
    "    }\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-30T09:36:23.489740Z",
     "start_time": "2024-10-30T09:36:18.565648Z"
    }
   },
   "id": "e055fa3acfbe18c1",
   "execution_count": 43
  },
  {
   "cell_type": "markdown",
   "source": [
    "根据会话记录回答"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "92f4c3ed3fab3de8"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "上一智能体回答结果:Where did Harrison work?\n",
      "[Document(metadata={}, page_content='harrison worked at kensho'), Document(metadata={}, page_content='老李在建设局工作')]\n",
      "input_variables=['page_content'] input_types={} partial_variables={} template='{page_content}'\n",
      "格式化后的文档字符串:harrison worked at kensho\n",
      "\n",
      "老李在建设局工作\n",
      "上一智能体回答结果:{'context': 'harrison worked at kensho\\n\\n老李在建设局工作', 'question': 'Where did Harrison work?'}\n"
     ]
    },
    {
     "data": {
      "text/plain": "'Harrison worked at Kensho.'"
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain_core.messages import HumanMessage\n",
    "\n",
    "from langchain_core.messages import AIMessage\n",
    "\n",
    "conversational_qa_chain.invoke(\n",
    "    {\n",
    "        \"question\": \"where did he work?\",\n",
    "        \"chat_history\": [\n",
    "            HumanMessage(content=\"Who wrote this notebook?\"),\n",
    "            AIMessage(content=\"Harrison\"),\n",
    "        ],\n",
    "    }\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-30T09:36:25.675201Z",
     "start_time": "2024-10-30T09:36:23.495328Z"
    }
   },
   "id": "1d40f266721c9861",
   "execution_count": 44
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 从内存加载搜索历史\n",
    "\n",
    ">The ConversationStringBufferMemory is equivalent to ConversationBufferMemory but was targeting LLMs that were not chat models.\n"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "c3acc78b55558ca"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Document(metadata={}, page_content='harrison worked at kensho'), Document(metadata={}, page_content='老李在建设局工作')]\n",
      "input_variables=['page_content'] input_types={} partial_variables={} template='{page_content}'\n",
      "格式化后的文档字符串:harrison worked at kensho\n",
      "\n",
      "老李在建设局工作\n",
      "上一智能体回答结果:{'context': 'harrison worked at kensho\\n\\n老李在建设局工作', 'question': 'Where did Harrison work?'}\n"
     ]
    }
   ],
   "source": [
    "from langchain.memory import ConversationBufferMemory\n",
    "\n",
    "memory = ConversationBufferMemory(\n",
    "    return_messages=True, output_key=\"answer\", input_key=\"question\", memory_key=\"chat_history\"\n",
    ")\n",
    "# 添加输出键时，传入参数\n",
    "loaded_memory = RunnablePassthrough.assign(\n",
    "    chat_history=RunnableLambda(memory.load_memory_variables) | itemgetter(\"chat_history\")\n",
    ")\n",
    "\n",
    "standalone_question = {\n",
    "    \"standalone_question\": {\n",
    "                               \"question\": lambda x: x[\"question\"],\n",
    "                               \"chat_history\": lambda x: get_buffer_string(x[\"chat_history\"])\n",
    "                           }\n",
    "                           | CONDENSE_QUESTION_PROMPT\n",
    "                           | ChatOpenAI(\n",
    "        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key=\"sk-xxx\",\n",
    "        openai_api_key=os.getenv(\"DASHSCOPE_API_KEY\"),\n",
    "        openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "        model_name=\"qwen-max\",\n",
    "        max_retries=0, )\n",
    "                           | StrOutputParser()\n",
    "}\n",
    "# 从retriever中获取文档\n",
    "retrieved_documents = {\n",
    "    \"docs\": itemgetter(\"standalone_question\") | retriever,\n",
    "    \"question\": lambda x: x[\"standalone_question\"],\n",
    "}\n",
    "\n",
    "# 构建提示符输入\n",
    "final_inputs = {\n",
    "    \"context\": lambda x: _combine_docs(x[\"docs\"]),\n",
    "    \"question\": itemgetter(\"question\")\n",
    "}\n",
    "#And finally, we do the part that returns the answers\n",
    "answer = {\n",
    "    \"answer\": final_inputs |RunnableLambda(_inputs_result_print) | ANSWER_PROMPT | ChatOpenAI(\n",
    "        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key=\"sk-xxx\",\n",
    "        openai_api_key=os.getenv(\"DASHSCOPE_API_KEY\"),\n",
    "        openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "        model_name=\"qwen-max\",\n",
    "        max_retries=0, ),\n",
    "    \"docs\": itemgetter(\"docs\")\n",
    "}\n",
    "\n",
    "final_chain = loaded_memory | standalone_question | retrieved_documents| answer\n",
    "\n",
    "inputs = {\"question\": \"where did harrison work?\"}\n",
    "result = final_chain.invoke(inputs)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-30T09:36:28.989400Z",
     "start_time": "2024-10-30T09:36:25.680027Z"
    }
   },
   "id": "2e5916b639409e5f",
   "execution_count": 45
  },
  {
   "cell_type": "markdown",
   "source": [
    "回答结果与问题存储到内存"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1493de1e5535a0e3"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "data": {
      "text/plain": "{'chat_history': [HumanMessage(content='where did harrison work?', additional_kwargs={}, response_metadata={}),\n  AIMessage(content='Harrison worked at Kensho.', additional_kwargs={}, response_metadata={})]}"
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 回答结果存储\n",
    "memory.save_context(inputs, {\"answer\": result[\"answer\"].content})\n",
    "# 查看存储信息\n",
    "memory.load_memory_variables({})"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-30T09:36:29.000958Z",
     "start_time": "2024-10-30T09:36:28.992613Z"
    }
   },
   "id": "953839393ae45e32",
   "execution_count": 46
  },
  {
   "cell_type": "markdown",
   "source": [
    "从存储中加"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "c951bcbd3042143b"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Document(metadata={}, page_content='harrison worked at kensho'), Document(metadata={}, page_content='老李在建设局工作')]\n",
      "input_variables=['page_content'] input_types={} partial_variables={} template='{page_content}'\n",
      "格式化后的文档字符串:harrison worked at kensho\n",
      "\n",
      "老李在建设局工作\n",
      "上一智能体回答结果:{'context': 'harrison worked at kensho\\n\\n老李在建设局工作', 'question': 'But where did Harrison actually work?'}\n"
     ]
    },
    {
     "data": {
      "text/plain": "{'answer': AIMessage(content='Harrison worked at Kensho.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 7, 'prompt_tokens': 50, 'total_tokens': 57, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'qwen-max', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-6f232d11-d0a1-44ba-84f5-2115ad4cafe7-0', usage_metadata={'input_tokens': 50, 'output_tokens': 7, 'total_tokens': 57, 'input_token_details': {}, 'output_token_details': {}}),\n 'docs': [Document(metadata={}, page_content='harrison worked at kensho'),\n  Document(metadata={}, page_content='老李在建设局工作')]}"
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "inputs = {\"question\": \"but where did he really work?\"}\n",
    "result = final_chain.invoke(inputs)\n",
    "result"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-30T09:36:31.652150Z",
     "start_time": "2024-10-30T09:36:29.004004Z"
    }
   },
   "id": "e9a380164f92de29",
   "execution_count": 47
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
