{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-12-17T16:13:37.821702Z",
     "start_time": "2024-12-17T16:13:36.471921Z"
    }
   },
   "source": [
    "ZHIPU_API_KEY = '65f85b27091a92ba8fbb32b1bcebfd6a.i6T4GfWNQM5aLF11'\n",
    "\n",
    "# 创建模型实例\n",
    "from langchain_openai import ChatOpenAI  # ChatOpenAI模型\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    temperature = 0.95,\n",
    "    model_name = \"glm-4-flash\",\n",
    "    openai_api_key = ZHIPU_API_KEY,\n",
    "    openai_api_base = \"https://open.bigmodel.cn/api/paas/v4/\"\n",
    ")"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:13:55.146573Z",
     "start_time": "2024-12-17T16:13:55.143572Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建两个场景的模板\n",
    "flower_care_template = \"\"\"\n",
    "你是一个经验丰富的园丁，擅长解答关于养花育花的问题。\n",
    "下面是需要你来回答的问题:\n",
    "{input}\n",
    "\"\"\"\n",
    "\n",
    "flower_deco_template = \"\"\"\n",
    "你是一位网红插花大师，擅长解答关于鲜花装饰的问题。\n",
    "下面是需要你来回答的问题:\n",
    "{input}\n",
    "\"\"\"\n",
    "\n",
    "# 构建提示信息\n",
    "prompt_infos = [\n",
    "    {\n",
    "        \"key\": \"flower_care\",\n",
    "        \"description\": \"适合回答关于鲜花护理的问题\",\n",
    "        \"template\": flower_care_template,\n",
    "    },\n",
    "    {\n",
    "        \"key\": \"flower_decoration\",\n",
    "        \"description\": \"适合回答关于鲜花装饰的问题\",\n",
    "        \"template\": flower_deco_template,\n",
    "    }\n",
    "]"
   ],
   "id": "c4d1ab92230e487",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:14:12.352426Z",
     "start_time": "2024-12-17T16:14:12.183086Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建目标链\n",
    "from langchain.chains.llm import LLMChain\n",
    "from langchain.prompts import PromptTemplate\n",
    "\n",
    "chain_map = {}\n",
    "\n",
    "for info in prompt_infos:\n",
    "    prompt = PromptTemplate(\n",
    "        template=info['template'],\n",
    "        input_variables=[\"input\"]\n",
    "    )\n",
    "    print(\"目标提示:\\n\", prompt)\n",
    "\n",
    "    chain = LLMChain(\n",
    "        llm=llm,\n",
    "        prompt=prompt,\n",
    "        verbose=True\n",
    "    )\n",
    "    chain_map[info[\"key\"]] = chain"
   ],
   "id": "98d4c61d0a6f74ff",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "目标提示:\n",
      " input_variables=['input'] input_types={} partial_variables={} template='\\n你是一个经验丰富的园丁，擅长解答关于养花育花的问题。\\n下面是需要你来回答的问题:\\n{input}\\n'\n",
      "目标提示:\n",
      " input_variables=['input'] input_types={} partial_variables={} template='\\n你是一位网红插花大师，擅长解答关于鲜花装饰的问题。\\n下面是需要你来回答的问题:\\n{input}\\n'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\zxy\\AppData\\Local\\Temp\\ipykernel_23188\\2994331668.py:14: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use :meth:`~RunnableSequence, e.g., `prompt | llm`` instead.\n",
      "  chain = LLMChain(\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:14:44.662632Z",
     "start_time": "2024-12-17T16:14:44.539925Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建路由链\n",
    "from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser\n",
    "from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE as RounterTemplate\n",
    "\n",
    "destinations = [f\"{p['key']}: {p['description']}\" for p in prompt_infos]\n",
    "router_template = RounterTemplate.format(destinations=\"\\n\".join(destinations))\n",
    "print(\"路由模板:\\n\", router_template)\n",
    "\n",
    "router_prompt = PromptTemplate(\n",
    "    template=router_template,\n",
    "    input_variables=[\"input\"],\n",
    "    output_parser=RouterOutputParser(),\n",
    ")\n",
    "print(\"路由提示:\\n\", router_prompt)\n",
    "\n",
    "router_chain = LLMRouterChain.from_llm(\n",
    "    llm,\n",
    "    router_prompt,\n",
    "    verbose=True\n",
    ")\n"
   ],
   "id": "788cf3279218cae9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "路由模板:\n",
      " Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.\n",
      "\n",
      "<< FORMATTING >>\n",
      "Return a markdown code snippet with a JSON object formatted to look like:\n",
      "```json\n",
      "{{\n",
      "    \"destination\": string \\ name of the prompt to use or \"DEFAULT\"\n",
      "    \"next_inputs\": string \\ a potentially modified version of the original input\n",
      "}}\n",
      "```\n",
      "\n",
      "REMEMBER: \"destination\" MUST be one of the candidate prompt names specified below OR it can be \"DEFAULT\" if the input is not well suited for any of the candidate prompts.\n",
      "REMEMBER: \"next_inputs\" can just be the original input if you don't think any modifications are needed.\n",
      "\n",
      "<< CANDIDATE PROMPTS >>\n",
      "flower_care: 适合回答关于鲜花护理的问题\n",
      "flower_decoration: 适合回答关于鲜花装饰的问题\n",
      "\n",
      "<< INPUT >>\n",
      "{input}\n",
      "\n",
      "<< OUTPUT (must include ```json at the start of the response) >>\n",
      "<< OUTPUT (must end with ```) >>\n",
      "\n",
      "路由提示:\n",
      " input_variables=['input'] input_types={} output_parser=RouterOutputParser() partial_variables={} template='Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.\\n\\n<< FORMATTING >>\\nReturn a markdown code snippet with a JSON object formatted to look like:\\n```json\\n{{\\n    \"destination\": string \\\\ name of the prompt to use or \"DEFAULT\"\\n    \"next_inputs\": string \\\\ a potentially modified version of the original input\\n}}\\n```\\n\\nREMEMBER: \"destination\" MUST be one of the candidate prompt names specified below OR it can be \"DEFAULT\" if the input is not well suited for any of the candidate prompts.\\nREMEMBER: \"next_inputs\" can just be the original input if you don\\'t think any modifications are needed.\\n\\n<< CANDIDATE PROMPTS >>\\nflower_care: 适合回答关于鲜花护理的问题\\nflower_decoration: 适合回答关于鲜花装饰的问题\\n\\n<< INPUT >>\\n{input}\\n\\n<< OUTPUT (must include ```json at the start of the response) >>\\n<< OUTPUT (must end with ```) >>\\n'\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:14:59.260286Z",
     "start_time": "2024-12-17T16:14:59.253658Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建默认链\n",
    "from langchain.chains import ConversationChain\n",
    "\n",
    "default_chain = ConversationChain(\n",
    "    llm=llm,\n",
    "    output_key=\"text\",\n",
    "    verbose=True\n",
    ")"
   ],
   "id": "f19e90a94fa62f2f",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\zxy\\AppData\\Local\\Temp\\ipykernel_23188\\1976241251.py:4: LangChainDeprecationWarning: The class `ConversationChain` was deprecated in LangChain 0.2.7 and will be removed in 1.0. Use :meth:`~RunnableWithMessageHistory: https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html` instead.\n",
      "  default_chain = ConversationChain(\n",
      "C:\\Users\\zxy\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\pydantic\\main.py:212: LangChainDeprecationWarning: Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n",
      "  validated_self = self.__pydantic_validator__.validate_python(data, self_instance=self)\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:15:27.314517Z",
     "start_time": "2024-12-17T16:15:27.310715Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建多提示链\n",
    "from langchain.chains.router import MultiPromptChain\n",
    "\n",
    "chain = MultiPromptChain(\n",
    "    router_chain=router_chain,\n",
    "    destination_chains=chain_map,\n",
    "    default_chain=default_chain,\n",
    "    verbose=True\n",
    ")"
   ],
   "id": "2a54f3e9782cfc6e",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\zxy\\AppData\\Local\\Temp\\ipykernel_23188\\133919940.py:4: LangChainDeprecationWarning: Please see migration guide here for recommended implementation: https://python.langchain.com/docs/versions/migrating_chains/multi_prompt_chain/\n",
      "  chain = MultiPromptChain(\n"
     ]
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:15:47.193246Z",
     "start_time": "2024-12-17T16:15:33.796497Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 测试1\n",
    "print(chain.run(\"如何为玫瑰浇水？\"))"
   ],
   "id": "f12db4499a996a8e",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\zxy\\AppData\\Local\\Temp\\ipykernel_23188\\3021724215.py:2: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 1.0. Use :meth:`~invoke` instead.\n",
      "  print(chain.run(\"如何为玫瑰浇水？\"))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new MultiPromptChain chain...\u001B[0m\n",
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMRouterChain chain...\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "flower_care: {'input': '如何为玫瑰浇水？'}\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m\n",
      "你是一个经验丰富的园丁，擅长解答关于养花育花的问题。\n",
      "下面是需要你来回答的问题:\n",
      "如何为玫瑰浇水？\n",
      "\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "为玫瑰浇水是保持其健康成长的关键，以下是一些浇水的基本指导原则：\n",
      "\n",
      "1. **了解土壤类型**：首先，了解你的玫瑰种植在哪种类型的土壤中。如果是沙质土壤，它需要更频繁的浇水，因为水分会迅速渗透；如果是黏土土壤，则需要少浇水但保持土壤湿润。\n",
      "\n",
      "2. **早晨浇水**：在早晨浇水可以帮助玫瑰在一天中蒸发较少的水分，减少水分蒸发带来的土壤干燥。\n",
      "\n",
      "3. **浇透土壤**：确保每次浇水都能浇透土壤，直到水分从盆底或土壤底部渗出。这样可以确保水分深入到根系。\n",
      "\n",
      "4. **避免浇在叶子上**：尽量将水直接浇到土壤上，避免水滴落在叶片上。过多的水分停留在叶片上可能导致病害。\n",
      "\n",
      "5. **土壤湿度**：检查土壤湿度，可以通过插一支干净的小木棍到土壤中，等待一段时间后取出，如果木棍上粘有湿土，说明土壤已经湿透；如果木棍干燥，则可能需要浇水。\n",
      "\n",
      "6. **季节调整**：根据季节的变化调整浇水频率。夏季通常需要更频繁的浇水，而冬季则可以减少浇水次数。\n",
      "\n",
      "7. **盆栽与地栽**：如果是盆栽玫瑰，通常需要更频繁的浇水，因为土壤容量有限，水分容易蒸发。而地栽玫瑰可以等待土壤更干燥一些再浇水。\n",
      "\n",
      "8. **雨水补给**：如果雨水较多，可以在浇水前检查土壤的湿度，以免过度浇水。\n",
      "\n",
      "9. **定期检查**：定期检查玫瑰的生长状况，根据其需水量和土壤状况灵活调整浇水频率。\n",
      "\n",
      "记住，每个花园和每种玫瑰品种的具体浇水需求可能有所不同，因此最好的浇水方法是根据实际情况和观察来调整。\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:16:06.193466Z",
     "start_time": "2024-12-17T16:15:50.416718Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 测试2\n",
    "print(chain.run(\"如何为婚礼场地装饰花朵？\"))"
   ],
   "id": "ab6c99d3fa3fe0f0",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new MultiPromptChain chain...\u001B[0m\n",
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMRouterChain chain...\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "flower_decoration: {'input': '如何为婚礼场地装饰花朵？'}\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m\n",
      "你是一位网红插花大师，擅长解答关于鲜花装饰的问题。\n",
      "下面是需要你来回答的问题:\n",
      "如何为婚礼场地装饰花朵？\n",
      "\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "为婚礼场地装饰花朵，以下是一些建议和步骤，以确保你的婚礼花卉装饰既美观又具有纪念意义：\n",
      "\n",
      "1. **主题选择**：\n",
      "   - 确定婚礼的主题，这将帮助你选择与主题相匹配的花卉颜色和风格。\n",
      "   - 考虑新娘的喜好和婚礼的氛围。\n",
      "\n",
      "2. **颜色搭配**：\n",
      "   - 选择适合婚礼主题的颜色方案。\n",
      "   - 利用色轮来创造和谐的色彩搭配。\n",
      "\n",
      "3. **花卉选择**：\n",
      "   - 根据季节选择新鲜的花卉，以确保花朵的持久和鲜艳。\n",
      "   - 考虑新娘和新郎的花束中使用的花卉，以便在婚礼现场进行呼应。\n",
      "\n",
      "4. **装饰布局**：\n",
      "   - **入口**：在入口处放置一个大型的花艺作品，可以是花门或花柱，为来宾留下深刻印象。\n",
      "   - **宴会桌**：在每个桌中央放置花艺装饰，可以使用花篮、花瓶或花束，颜色和风格应与婚礼主题相协调。\n",
      "   - **通道**：用花地毯或花柱装饰婚礼通道，营造出梦幻的氛围。\n",
      "   - **座位区域**：在座位区域周围放置小型花艺装饰，如花球、花环或花束，增加空间的温馨感。\n",
      "\n",
      "5. **花艺元素**：\n",
      "   - **花束**：为新郎和新娘制作花束，可以融入一些特别的花卉作为纪念。\n",
      "   - **胸花和手腕花**：为新娘和新郎以及关键宾客准备胸花和手腕花，保持整体风格的一致性。\n",
      "   - **头饰和发饰**：新娘可以选择一些花饰作为发饰或头饰，增加个性。\n",
      "\n",
      "6. **花艺细节**：\n",
      "   - 使用一些特别的装饰品，如丝带、绸带、珠子、水晶等，增加花艺作品的层次感和质感。\n",
      "   - 在适当的地方放置一些小摆件，如蜡烛、小灯笼或雕塑，与花卉相得益彰。\n",
      "\n",
      "7. **可持续发展**：\n",
      "   - 考虑使用可持续花卉，减少对环境的影响。\n",
      "   - 询问供应商关于花卉的来源和可持续性。\n",
      "\n",
      "8. **预算控制**：\n",
      "   - 制定预算并尽量在预算范围内选择合适的花卉和装饰。\n",
      "\n",
      "9. **专业咨询**：\n",
      "   - 如果自己不擅长花艺设计，可以考虑聘请专业的花艺师来帮助你设计和执行。\n",
      "\n",
      "通过以上步骤，你可以为婚礼场地打造出独特的花卉装饰，让每一处细节都充满爱意和仪式感。\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-12-17T16:16:52.981081Z",
     "start_time": "2024-12-17T16:16:46.033234Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 测试3\n",
    "print(chain.run(\"如何区分阿豆和罗豆？\"))"
   ],
   "id": "1534bbaf3a70bcc7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new MultiPromptChain chain...\u001B[0m\n",
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMRouterChain chain...\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "None: {'input': '如何区分阿豆和罗豆？'}\n",
      "\n",
      "\u001B[1m> Entering new ConversationChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
      "\n",
      "Current conversation:\n",
      "\n",
      "Human: 如何区分阿豆和罗豆？\n",
      "AI:\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "啊哈，这个问题很有趣！在中文里，“阿豆”和“罗豆”这两个词听起来非常相似，它们都像是对人的昵称或者特定称谓。不过，它们的确切含义和区别可能会因地区、文化或个人用法而异。以下是我能想到的一些可能的解释：\n",
      "\n",
      "1. **地域差异**：在某些地区，人们可能会根据自己的习惯和方言来使用这些词汇。比如，“阿豆”可能是一个特定地区的昵称，而“罗豆”可能在另一个地区流行。\n",
      "\n",
      "2. **个人命名**：如果这两个词是特定个人的名字或昵称，那么它们可能没有固定的区别，只是个人的称呼。\n",
      "\n",
      "3. **文化渊源**：在一些文学作品或民间传说中，“阿豆”和“罗豆”可能代表不同的角色或象征。但这需要具体了解相关的背景信息。\n",
      "\n",
      "没有更多具体的上下文信息，很难准确地区分这两个词。如果您能提供更多的背景或地区信息，我或许能提供更具体的解答。\n"
     ]
    }
   ],
   "execution_count": 9
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
