{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 3.1 Chains的基本使用\n",
    "使用LCEL，可以构造出结构最简单的Chain。\n",
    "LangChain表达式语言（LCEL，LangChain Expression Language）是一种声明式方法，可以轻松地\n",
    "将多个组件链接成 AI 工作流。它通过Python原生操作符（如管道符 | ）将组件连接成可执行流程，显\n",
    "著简化了AI应用的开发。\n",
    "LCEL的基本构成：提示（Prompt）+ 模型（Model）+ 输出解析器（OutputParser）"
   ],
   "id": "5d2642c8cfe69878"
  },
  {
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-10-25T11:05:05.114064Z",
     "start_time": "2025-10-25T11:04:59.950606Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 3.1.1使用LCEL，可以构造出结构最简单的Chain\n",
    "import os\n",
    "from langchain_core.prompts import PromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "from dotenv import load_dotenv\n",
    "from langchain_core.output_parsers import StrOutputParser\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "os.environ['OPENAI_API_KEY'] = os.getenv(\"DASHSCOPE_API_KEY\")\n",
    "os.environ['OPENAI_BASE_URL'] = os.getenv(\"DASHSCOPE_BASE_URL\")\n",
    "chat_model = ChatOpenAI(model=\"qwen-max\")\n",
    "prompt_template = PromptTemplate.from_template(\n",
    "    template=\"给我讲一个关于{topic}话题的简短笑话\"\n",
    ")\n",
    "parser = StrOutputParser()\n",
    "# 构建链式调用（LCEL语法）\n",
    "chain = prompt_template | chat_model | parser\n",
    "out_put = chain.invoke({\"topic\": \"ice cream\"})\n",
    "print(out_put)\n",
    "print(type(out_put))"
   ],
   "id": "eafbad92f668e94f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "当然可以，这里有一个关于冰淇淋的简短笑话：\n",
      "\n",
      "为什么冰淇淋总是冷笑话高手？因为它每次都能让人“冻”笑！\n",
      "<class 'str'>\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-27T14:33:25.229791Z",
     "start_time": "2025-10-27T14:33:21.510855Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 3.1.2 基础链LLMChain的使用\n",
    "from langchain.chains.llm import LLMChain\n",
    "from langchain_core.prompts import PromptTemplate\n",
    "import os\n",
    "import dotenv\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "dotenv.load_dotenv()\n",
    "os.environ['OPENAI_API_KEY'] = os.getenv(\"DASHSCOPE_API_KEY\")\n",
    "os.environ['OPENAI_BASE_URL'] = os.getenv(\"DASHSCOPE_BASE_URL\")\n",
    "# 1、创建大模型实例\n",
    "chat_model = ChatOpenAI(model=\"qwen-max\")\n",
    "# 2、原始字符串模板\n",
    "template = \"桌上有{number}个苹果，四个桃子和 3 本书，一共有几个水果?\"\n",
    "prompt = PromptTemplate.from_template(template)\n",
    "# 3、创建LLMChain\n",
    "llm_chain = LLMChain(\n",
    "    llm=chat_model,\n",
    "    prompt=prompt\n",
    ")\n",
    "# 4、调用LLMChain，返回结果\n",
    "result = llm_chain.invoke({\"number\": 2})\n",
    "print(result)"
   ],
   "id": "93f15c1265c4d4f3",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Chen Gaokai\\AppData\\Local\\Temp\\ipykernel_1616\\1790248431.py:17: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use :meth:`~RunnableSequence, e.g., `prompt | llm`` instead.\n",
      "  llm_chain = LLMChain(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'number': 2, 'text': '桌上有2个苹果和4个桃子，这些都是水果。所以，一共有 \\\\(2 + 4 = 6\\\\) 个水果。书籍不属于水果类，因此在计算水果总数时不包括在内。'}\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-10-27T14:34:13.135657Z",
     "start_time": "2025-10-27T14:34:08.164183Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 举例2：基础链.verbose参数（展示提示词模板），使用ChatPromptTemplate>对话提示词模板\n",
    "# 1.导入相关包\n",
    "from langchain.chains.llm import LLMChain\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "# 2.定义提示词模版对象\n",
    "chat_template = ChatPromptTemplate.from_messages(\n",
    "    [\n",
    "        (\"system\", \"你是一位{area}领域具备丰富经验的高端技术人才\"),\n",
    "        (\"human\", \"给我讲一个 {adjective} 笑话\"),\n",
    "    ]\n",
    ")\n",
    "# 3.定义模型\n",
    "llm = ChatOpenAI(model=\"qwen-max\")\n",
    "# 4.定义LLMChain\n",
    "llm_chain = LLMChain(llm=llm, prompt=chat_template, verbose=True)\n",
    "# 5.调用LLMChain\n",
    "response = llm_chain.invoke({\"area\": \"互联网\", \"adjective\": \"上班的\"})\n",
    "print(response)"
   ],
   "id": "f8621e96b5816684",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3mSystem: 你是一位互联网领域具备丰富经验的高端技术人才\n",
      "Human: 给我讲一个 上班的 笑话\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "{'area': '互联网', 'adjective': '上班的', 'text': '当然，这里有一个轻松的上班笑话给你：\\n\\n两个同事在办公室聊天。\\nA说：“我昨晚做了一个梦，梦见自己变成了一个电脑键盘。”\\nB好奇地问：“那感觉怎么样？”\\nA回答说：“挺好的，就是有时候会觉得自己被按得太多了。”\\n\\n希望这个小笑话能给你的工作日带来一丝欢乐！'}\n"
     ]
    }
   ],
   "execution_count": 4
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
