{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "# chains "
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8909b1f0b0f071c8"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "is_executing": true
   },
   "outputs": [],
   "source": [
    "%pip install langchain\n",
    "%pip install openai"
   ]
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "import os \n",
    "import getpass\n",
    "os.environ['OPENAI_API_KEY'] = getpass.getpass(\"输入openAi-key\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T14:51:51.696945Z",
     "start_time": "2024-09-05T14:51:44.668960Z"
    }
   },
   "id": "89a6375529a8036f",
   "execution_count": 1
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 1. chains 基本概念"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "62750adde0d89d66"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 1.1 单输入"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "737229cc082a6dd"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.llms import OpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains.llm import LLMChain\n",
    "\n",
    "llm = OpenAI(temperature=0.5)\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    input_variables=[\"pet\"],\n",
    "    template = \"我养了一只{pet}起个什么名字好\"\n",
    ")\n",
    "\n",
    "# 用chains 将llm和prompt连接起来\n",
    "chain = LLMChain(llm=llm, prompt=prompt)\n",
    "# Run the chain only specifying the input variable.\n",
    "print(chain.run(\"小狗\"))\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "f92b23ba07f148a9",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 1.2多输入"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "fd0ba1156ab77ffa"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "prompt = PromptTemplate(\n",
    "    input_variables=[\"pet\",\"season\"],\n",
    "    template = \"我养了一只{pet},出生在{season}起个什么名字好呢？\"\n",
    ")\n",
    "chain = LLMChain(llm=llm,prompt=prompt)\n",
    "print(chain.run({\n",
    "    \"pet\":\"小狗\",\n",
    "    \"season\":\"秋天\"\n",
    "}))"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "339430390ad811ea",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 1.3 聊天模式"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "6f9edcdb781eef1a"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains import LLMChain\n",
    "from langchain.prompts.chat import (\n",
    "    ChatPromptTemplate,\n",
    "    HumanMessagePromptTemplate,\n",
    ")\n",
    "\n",
    "human_msg_prompt_template = HumanMessagePromptTemplate(\n",
    "    prompt = PromptTemplate(\n",
    "        input_variables=[\"pet\"],\n",
    "        template=\"我养了一只{pet}起个什么名字好呢?\",\n",
    "    )   \n",
    ")\n",
    "chat_prompt_template = ChatPromptTemplate.from_messages([human_msg_prompt_template])\n",
    "chat = ChatOpenAI(temperature=0.9)\n",
    "chain = LLMChain(llm=chat,prompt=chat_prompt_template)\n",
    "\n",
    "print(chain.run(\"小猫\"))\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "af1fec99253f0c69",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 2 Chain 基本用法"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "70e2f924b06555a4"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.1 API异步调用"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "87534a2240564a2e"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "import asyncio\n",
    "import time\n",
    "\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains.llm import LLMChain\n",
    "\n",
    "#执行同步函数 多次\n",
    "def generate_serially():\n",
    "    llm = ChatOpenAI(temperature=0.9)\n",
    "    prompt = PromptTemplate(\n",
    "    input_variables=[\"pet\"],\n",
    "    template=\"我养了一只{pet}起个什么名字好呢?\",\n",
    "    )\n",
    "    chain = LLMChain(llm = llm,prompt=prompt)\n",
    "    for _ in range(5):\n",
    "        resp = chain.run(pet=\"猫\")\n",
    "        print(resp)\n",
    "\n",
    "# 异步函数\n",
    "async def async_generate(chain):\n",
    "    resp = chain.run(pet=\"狗\")\n",
    "    print(resp)\n",
    "\n",
    "#执行异步函数 多次\n",
    "async def generate_concurrently():\n",
    "    llm = ChatOpenAI(temperature=0.9)\n",
    "    prompt = PromptTemplate(\n",
    "    input_variables=[\"pet\"],\n",
    "    template=\"我养了一只{pet}起个什么名字好呢?\",\n",
    "    )\n",
    "    chain = LLMChain(llm = llm,prompt=prompt)\n",
    "    tasks = [async_generate(chain) for _ in range(5)]\n",
    "    await asyncio.gather(*tasks)\n",
    "\n",
    "# s = time.perf_counter()\n",
    "# await generate_concurrently()\n",
    "# elapsed = time.perf_counter() - s\n",
    "# \n",
    "# print(\"\\033[1m\" + f\"Concurrent executed in {elapsed:0.2f} seconds.\" + \"\\033[0m\")\n",
    "\n",
    "s = time.perf_counter()\n",
    "generate_serially()\n",
    "elapsed = time.perf_counter() - s\n",
    "print(\"\\033[1m\" + f\"Serial executed in {elapsed:0.2f} seconds.\" + \"\\033[0m\")\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "4faaed895401960e",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.2 debug开关"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "70f94a8ce4859aba"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains import LLMChain\n",
    "\n",
    "llm = ChatOpenAI(temperature=0.9)\n",
    "prompt = PromptTemplate(\n",
    "    input_variables=[\"pet\"],\n",
    "    template=\"我养了一只{pet},起个什么名字好呢？\"\n",
    ")\n",
    "#verbose 为tur 开启 debug开关，可以跟踪 chain的完成情况\n",
    "chain = LLMChain(llm = llm,prompt=prompt,verbose=True)\n",
    "print(chain.run(\"小狗\"))\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "538ff43bb5c63514",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "\n",
    "## 2.3链接外部资源\n",
    "[调用LangChainHub](https://github.com/hwchase17/langchain-hub)\n",
    "LangChainHub是一个收集与LangChain基元（如提示，链和代理）相关的所有有用工件的地方。这个库的目标是成为分享和发现高质量提示、链和代理的中心资源，这些元素结合在一起可以形成复杂的LLM应用。LangChainHub从收集提示开始，期待LangChain社区对此进行扩充，希望很快能扩展到链和代理"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "5ca63311c62d0841"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains.loading import load_chain\n",
    "chain = load_chain(\"lc://chains/llm-math/chain.json\")\n",
    "print(chain.run(\"2的6次方是多少？\"))"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "aec739f3e6143ea4",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.4 chain与memory的合作"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "89ea455ef00ceb19"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains.conversation.base import ConversationChain\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(temperature=0.9)\n",
    "chain = ConversationChain(\n",
    "    llm = llm,\n",
    "    memory=ConversationBufferMemory()\n",
    ")\n",
    "#chat中的第一个问题\n",
    "print(chain.run(\"世界由哪几个大洲组成的，说前三个?\"))\n",
    "#chat中的第二个问题，通过memory记忆上下文\n",
    "print(chain.run(\"把剩余的洲告诉我？\"))\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "1ae5ec07686a10e5",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.5 保存到磁盘"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "39f0cde96ddc63e0"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains.llm import LLMChain\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "\n",
    "llm = ChatOpenAI(temperature=0.9)\n",
    "prompt = PromptTemplate(\n",
    "    input_variables=[\"pet\"],\n",
    "    template=\"我养了只{pet},起什么名字好呢?\"\n",
    ")\n",
    "chain = LLMChain(llm = llm,prompt=prompt)\n",
    "\n",
    "print(chain.run(\"狗\"))\n",
    "\n",
    "chain.save(\"./file/llm_chain.json\")"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "8a2fbc952d92efb6",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 3.Chain 与 LLM 基础用法"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "e5eed77c331d1176"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.1 prompt template 的参数数组化"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "e33d44018d99c302"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.llms import OpenAI\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains.llm import LLMChain\n",
    "\n",
    "llm = OpenAI(temperature=0.5)\n",
    "\n",
    "prompt = PromptTemplate(\n",
    "    input_variables=[\"pet\"],\n",
    "    template = \"我养了一只{pet},起个什么名字好呢？\"\n",
    ")\n",
    "#chain中引用参数数组\n",
    "input_list = [\n",
    "    {\"pet\",\"狗\"},\n",
    "    {\"pet\",\"猫\"},\n",
    "    {\"pet\",\"猪\"},\n",
    "]\n",
    "#将llm 和 prompt链接起来\n",
    "chain = LLMChain(llm = llm,prompt=prompt)\n",
    "#通过apply 接受参数数组\n",
    "chain.apply(input_list)\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "13f6a7411bc1310",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.2多参数"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1fe9027c6576fea2"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "template = \"\"\"我养了一只{pet}，出生在{season}，起个什么名字好？\"\"\"\n",
    "prompt = PromptTemplate(template = template,input_variables=[\"pet\",\"season\"])\n",
    "llm = LLMChain(llm = OpenAI(temperature = 0),prompt=prompt)\n",
    "llm.predict(pet=\"猫\",season=\"冬天\")"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "430e43df09e6e90c",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.3 定义输出格式"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "bb8baa1b7c4878c3"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.output_parsers import CommaSeparatedListOutputParser\n",
    "\n",
    "# 定义输出格式\n",
    "output_parser = CommaSeparatedListOutputParser()\n",
    "out_template = \"列出可以饲养的宠物\"\n",
    "prompt = PromptTemplate(template = out_template,input_variables=[],output_parser = output_parser)\n",
    "llm_chain = LLMChain(llm = llm,prompt=prompt)\n",
    "\n",
    "llm_chain.predict_and_parse()\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "cb052dce82d1c840",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.4 router"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "9d6e7c84573be2de"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains.router import MultiPromptChain\n",
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.chains import ConversationChain\n",
    "from langchain.chains.llm import LLMChain\n",
    "from langchain.prompts import PromptTemplate\n",
    "\n",
    "#生成物理专业相关的prompt template\n",
    "physics_template = \"\"\"你是一位非常出色的物理家。你擅长回答物理问题。你之所以出色，是因为你能够将复杂的问题分解为各个部分，回答这些部分，然后将它们组合起来回答更广泛的问题。\n",
    "这里是问题：\n",
    "{input}\"\"\"\n",
    "\n",
    "#生成数学专业相关的prompt template\n",
    "math_template = \"\"\"你是一位非常出色的数学家。你擅长回答数学问题。你之所以出色，是因为你能够将复杂的问题分解为各个部分，回答这些部分，然后将它们组合起来回答更广泛的问题。\n",
    "\n",
    "这里是问题:\n",
    "{input}\"\"\"\n",
    "\n",
    "prompt_info = [\n",
    "    {\n",
    "        \"name\":\"physics\",\n",
    "        \"description\":\"更适合回答物理相关问题\",\n",
    "        \"prompt_template\":physics_template\n",
    "    },\n",
    "    {\n",
    "        \"name\":\"math\",\n",
    "        \"description\":\"更适合回答数学相关问题\",\n",
    "        \"prompt_template\":math_template\n",
    "    }\n",
    "]\n",
    "\n",
    "llm=  ChatOpenAI()\n",
    "\n",
    "destination_chains = {}\n",
    "\n",
    "for p_info in prompt_info:\n",
    "    name = p_info[\"name\"]\n",
    "    prompt_template = p_info[\"prompt_template\"]\n",
    "    prompt = PromptTemplate(template =prompt_template,input_variables=[\"input\"])\n",
    "    chain = LLMChain(llm=llm,prompt=prompt)\n",
    "    #把两个template 对应的chain 放到destination_chains中\n",
    "    #destination_chains 就是路由的目的chain\n",
    "    destination_chains[name] = chain\n",
    "\n",
    "#如何destination_chains 没有被选中（既不是物理问题也不是数学问题）\n",
    "#就选择default_chain 作为默认的chain\n",
    "default_chain = ConversationChain(llm=llm,output_key=\"text\",verbose=True)\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "f7c18d10b1e2294d",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "MULTI_PROMPT_ROUTER_TEMPLATE是用于生成问题的模板，它会将输入的问题与所有可能的目标链（即数学和物理）的描述组合起来。\n",
    "\n",
    "在生成的问题中，LLM需要选择最适合的答案，也就是说，选择最能解答输入问题的目标链。LLM是通过评估输入问题与各个目标链描述的相似性来做出选择的。例如，如果输入的问题是关于物理的，那么与物理相关的目标链描述会与输入问题有更高的相似性，因此LLM就会选择物理链。同样，如果问题是关于数学的，那么LLM就会选择数学链。"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "db3aa3d1e8a5c2b1"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser\n",
    "from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE\n",
    "#生成destination string\n",
    "destinations = [f\"{p['name']}: {p['description']}\" for p in prompt_info]\n",
    "destinations_str = \"\\n\".join(destinations)\n",
    "#准备组合destination router 与输入的内容\n",
    "router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations_str)\n",
    "#组合destination router 与输入的内容 放入到router prompt中\n",
    "router_prompt = PromptTemplate(\n",
    "    template=router_template,\n",
    "    input_variables=[\"input\"],\n",
    "    output_parser=RouterOutputParser(),\n",
    ")\n",
    "#请求 LLM 获取合适的组合\n",
    "router_chain = LLMRouterChain.from_llm(llm, router_prompt)\n",
    "#根据合适的组合生成chain\n",
    "chain = MultiPromptChain(\n",
    "    router_chain=router_chain,\n",
    "    destination_chains=destination_chains,\n",
    "    default_chain=default_chain,\n",
    "    verbose=True,\n",
    ")\n",
    "#执行chain\n",
    "print(chain.run(\"世界有多少人口\"))"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "73ab2afa1da1281e",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 4.SequentialChain"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "10ef6c7a2c6ed193"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 4.1简单顺序Chain"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1c9b5a1e369af7f8"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.llms import OpenAI\n",
    "from langchain.chains import LLMChain\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains.sequential import SimpleSequentialChain\n",
    "\n",
    "# chain 写一个剧情概要\n",
    "llm = OpenAI(temperature=.7)\n",
    "template = \"\"\"你是一个剧作家。给出剧目的标题，你的任务是为该标题写一个剧情概要。.\n",
    "\n",
    "标题: {title}\n",
    "剧作家: 这是上述剧目的剧情概要:\"\"\"\n",
    "prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
    "synopsis_chain = LLMChain(llm=llm, prompt=prompt_template)\n",
    "\n",
    "# chain 写一个剧本评论\n",
    "llm = OpenAI(temperature=.7)\n",
    "template = \"\"\"你是剧评人。给出剧本的概要，你的任务是为该剧编写评论。\n",
    "\n",
    "剧本概述:\n",
    "{synopsis}\n",
    "来自剧评人对上述剧目的评论:\"\"\"\n",
    "prompt_template = PromptTemplate(input_variables=[\"synopsis\"], template=template)\n",
    "review_chain = LLMChain(llm=llm, prompt=prompt_template)\n",
    "\n",
    "# 运行两个Chain ，针对\"剧情概要\"进行\"剧本评论\"\n",
    "overall_chain = SimpleSequentialChain(chains=[synopsis_chain, review_chain],verbose=True)\n",
    "#chains 变量对应的数组，按照chain的执行顺序进行。\n",
    "#放在前面的synopsis_chain先执行。它的输入【title】就是overall_chain中run方法的输入\n",
    "#它的输出会作为review_chain 的输入【synopsis】\n",
    "review = overall_chain.run(\"孙悟空大闹天宫\")\n",
    "print(review)"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "562494d972bd6704",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 4.2 定义chain的输出和下个chain的输入"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "c9f58fa11444e136"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.llms import OpenAI\n",
    "from langchain.chains import LLMChain\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains.sequential import SimpleSequentialChain\n",
    "\n",
    "# chain 写一个剧情概要\n",
    "llm = OpenAI(temperature=.7)\n",
    "template = \"\"\"你是一个剧作家。给出剧目的标题，你的任务是为该标题写一个剧情概要。.\n",
    "\n",
    "标题: {title}\n",
    "年代: {era}\n",
    "剧作家: 这是上述剧目的剧情概要:\"\"\"\n",
    "prompt_template = PromptTemplate(input_variables=[\"title\",\"era\"], template=template)\n",
    "synopsis_chain = LLMChain(llm=llm, prompt=prompt_template,output_key=\"synopsis\")\n",
    "\n",
    "# chain 写一个剧本评论\n",
    "llm = OpenAI(temperature=.7)\n",
    "template = \"\"\"你是剧评人。给出剧本的概要，你的任务是为该剧编写评论。\n",
    "\n",
    "剧本概述:\n",
    "{synopsis}\n",
    "来自剧评人对上述剧目的评论:\"\"\"\n",
    "#定义输入是 synopsis，正好是上一个chain的输出\n",
    "prompt_template = PromptTemplate(input_variables=[\"synopsis\"], template=template)\n",
    "#定义影评chain的输出 key 是 review\n",
    "review_chain = LLMChain(llm=llm, prompt=prompt_template,output_key=\"review\")\n",
    "#定义顺序执行chain，按照chains 中数组的顺序执行。\n",
    "#[\"era\", \"title\"] 作为第一个chain synopsis_chain 的输入\n",
    "from langchain.chains import SequentialChain\n",
    "overall_chain = SequentialChain(\n",
    "    chains=[synopsis_chain, review_chain]\n",
    "    ,input_variables=[\"title\",\"era\"]\n",
    "    ,output_variables=[\"synopsis\",\"review\"]\n",
    "    ,verbose=True)\n",
    "overall_chain({\"title\":\"孙悟空大闹天宫！\", \"era\": \"上古时代\"})"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "c7b6eca5c367ad02",
   "execution_count": null
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains import SequentialChain\n",
    "from langchain.memory import SimpleMemory\n",
    "llm = OpenAI(temperature=.7)\n",
    "template = \"\"\"你是一家剧院公司的社交媒体经理。给出戏剧的标题、设定的时代、日期、时间和地点，以及戏剧的概要和评论，你的工作就是为这部戏剧写一个社交媒体的帖子。\n",
    "\n",
    "以下是关于戏剧时间和地点的一些背景信息：\n",
    "日期和时间：{time}\n",
    "地点：{location}\n",
    "\n",
    "剧本概要：\n",
    "{synopsis}\n",
    "来自剧评人对上述戏剧的评论：\n",
    "{review}\n",
    "\n",
    "社交媒体帖子：\n",
    "\"\"\"\n",
    "\n",
    "prompt_template = PromptTemplate(input_variables=[\"time\",\"location\",\"synopsis\",\"review\"],template = template)\n",
    "social_chain = LLMChain(llm = llm, prompt = prompt_template,output_key=\"social_post_text\")\n",
    "\n",
    "overall_chain = SequentialChain(\n",
    "    memory=SimpleMemory(memories = {\"time\": \"7月23日, 下午8点\", \"location\": \"光明剧院\"}),\n",
    "    chains = [synopsis_chain, review_chain, social_chain],\n",
    "    input_variables=[\"era\",\"title\"],\n",
    "    output_variables=[\"social_post_text\"],\n",
    "    verbose=True\n",
    ")\n",
    "overall_chain({\"title\":\"猪八戒大闹花果山\",\"era\":\"冰河时代\"})\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "7981cde389af07",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 5.Transformer"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "6e1c4b2203a74151"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains.transform import TransformChain\n",
    "from langchain.chains.sequential import SimpleSequentialChain\n",
    "from langchain.chains import LLMChain\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.llms import OpenAI\n",
    "#读取文件\n",
    "with open(\"./file/story.txt\") as f:\n",
    "    story_file = f.read()\n",
    "#转换函数\n",
    "def transform_func(inputs: dict) -> dict :\n",
    "    text = inputs[\"text\"]\n",
    "    #获取文本中前3句话\n",
    "    shortened_text = \"\\n\\n\".join(text.split(\"\\n\\n\")[:3])\n",
    "    return {\"output_text\": shortened_text}\n",
    "\n",
    "#生成tranformation chain，输入为文本， 输出为3句话摘要\n",
    "transform_chain = TransformChain(input_variables=[\"text\"],output_variables=[\"output_text\"], transform=transform_func)\n",
    "\n",
    "template = \"\"\"这里是摘要:\n",
    "\n",
    "{output_text}\n",
    "\n",
    "摘要:\"\"\"\n",
    "\n",
    "#创建摘要的prompt_template\n",
    "prompt_template = PromptTemplate(input_variables=[\"output_text\"],template=template)\n",
    "#生成摘要Chain\n",
    "llm_chain = LLMChain(llm=OpenAI(), prompt=prompt_template)\n",
    "\n",
    "#利用SimpleSequentialChain，将两个chain 集成， 参数默认传递\n",
    "over_chain = SimpleSequentialChain(chains=[transform_chain,llm_chain])\n",
    "\n",
    "over_chain.run(story_file)\n"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "174555ea15f553c2",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 7. SQL Chain Langchain支持如下sql MS SQL, MySQL, MariaDB, PostgreSQL, Oracle SQL, Databricks and SQLite MySQL connection URL: mysql+pymysql://user:pass@some_mysql_db_address/db_name. "
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a97799707d5b67ed"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "!apt-get install -y sqlite3"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "75480dc55ecf21e6",
   "execution_count": null
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "!sqlite3 Chinook.db"
   ],
   "metadata": {
    "collapsed": false,
    "is_executing": true
   },
   "id": "c09ac8bb78ef2667",
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 7.2连接Sqlite 数据库，并且查询数据"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "8091f6c93e85656a"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SELECT COUNT(*) FROM Employee\n"
     ]
    }
   ],
   "source": [
    "from langchain import OpenAI,SQLDatabase\n",
    "from langchain.chains.sql_database.query import create_sql_query_chain\n",
    "db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n",
    "llm = OpenAI(temperature=0,verbose=True)\n",
    "db_chain = create_sql_query_chain(llm,db)\n",
    "response = db_chain.invoke({\"question\": \"How many employees are there\"})\n",
    "print(response)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-27T15:20:30.359541Z",
     "start_time": "2024-08-27T15:20:28.364040Z"
    }
   },
   "id": "7d407bf7cd5e44aa",
   "execution_count": 3
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "1eff07f4e631c98c"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 7.3根据条件进行表连接查询"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "592aacb8c65abdd6"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SELECT COUNT(*) FROM Album WHERE ArtistId = (SELECT ArtistId FROM Artist WHERE Name = \"Aerosmith\")\n"
     ]
    }
   ],
   "source": [
    "response = db_chain.invoke({\"question\":\"有多少个Aerosmith的专辑?\"})\n",
    "print(response)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-27T15:21:11.833808Z",
     "start_time": "2024-08-27T15:21:10.706382Z"
    }
   },
   "id": "1e0d49a71a2699ca",
   "execution_count": 4
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "73ff3d2839839272"
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 8. 摘要"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "dc91bb27e3c87836"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/_api/deprecation.py:139: LangChainDeprecationWarning: The class `OpenAI` was deprecated in LangChain 0.0.10 and will be removed in 0.3.0. An updated version of the class exists in the langchain-openai package and should be used instead. To use it run `pip install -U langchain-openai` and import as `from langchain_openai import OpenAI`.\n",
      "  warn_deprecated(\n"
     ]
    }
   ],
   "source": [
    "from langchain import OpenAI, PromptTemplate, LLMChain\n",
    "from langchain.text_splitter import CharacterTextSplitter\n",
    "from langchain.chains.mapreduce import MapReduceChain,MapReduceDocumentsChain\n",
    "from langchain.prompts import PromptTemplate\n",
    "\n",
    "llm = OpenAI(temperature=0)\n",
    "\n",
    "text_splitter = CharacterTextSplitter()\n",
    "\n",
    "with open(\"./file/story.txt\") as r :\n",
    "    story = r.read()\n",
    "\n",
    "texts = text_splitter.split_text(story)\n",
    "\n",
    "\n",
    "from langchain.docstore.document import Document\n",
    "\n",
    "docs = [Document(page_content=t) for t in texts[:3]]"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T14:56:37.154495Z",
     "start_time": "2024-09-05T14:56:36.920168Z"
    }
   },
   "id": "8e6647b950cb2b4e",
   "execution_count": 3
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 8.1 stuff"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1efe23928b90b937"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/_api/deprecation.py:139: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 0.3.0. Use invoke instead.\n",
      "  warn_deprecated(\n"
     ]
    },
    {
     "data": {
      "text/plain": "'\\n\\n这是一个关于一只憨态可掬的小老虎泰格和一只纯洁无瑕的小白兔莉莉的故事。尽管他们的种族和生活习性不同，但他们却是最好的朋友。一天，他们发现一颗巨大的金色果实，为了摘取它，他们决定通力合作。然而，美好的时光总是短暂的，一个邪恶的猎人闯进森林，想要捉拿泰格。莉莉决定冒险去寻找一块神秘的宝石来解除陷阱，最终成功救出了泰格。从此，森林里的动物们开始和睦共处，彼此尊重，守护家园。莉莉和'"
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.chains.summarize import load_summarize_chain\n",
    "prompt_template = \"\"\"根据下面的内容生成摘要:\n",
    "\n",
    "{text}\n",
    "\n",
    "摘要内容以中文显示:\"\"\"\n",
    "\n",
    "prompt = PromptTemplate(template = prompt_template,input_variables=[\"text\"])\n",
    "chain = load_summarize_chain(llm=llm,chain_type=\"stuff\",prompt = prompt)\n",
    "chain.run(docs)\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T14:56:48.315729Z",
     "start_time": "2024-09-05T14:56:40.276671Z"
    }
   },
   "id": "39d0f69a1e42cfef",
   "execution_count": 4
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 8.2 mapReduce"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "2729a7075074640"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple/\r\n",
      "Requirement already satisfied: tiktoken in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (0.5.2)\r\n",
      "Requirement already satisfied: regex>=2022.1.18 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from tiktoken) (2024.4.16)\r\n",
      "Requirement already satisfied: requests>=2.26.0 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from tiktoken) (2.31.0)\r\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (3.3.2)\r\n",
      "Requirement already satisfied: idna<4,>=2.5 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (3.7)\r\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (2.2.1)\r\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages (from requests>=2.26.0->tiktoken) (2024.2.2)\r\n",
      "\r\n",
      "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m24.0\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.2\u001B[0m\r\n",
      "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpip3 install --upgrade pip\u001B[0m\r\n",
      "Note: you may need to restart the kernel to use updated packages.\n"
     ]
    }
   ],
   "source": [
    "%pip install tiktoken"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T14:51:27.257196Z",
     "start_time": "2024-09-05T14:51:24.235931Z"
    }
   },
   "id": "7ed8f2a9e92a3a02",
   "execution_count": 5
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 一般情况"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "19dead2e2bc90f3f"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/langchain_core/_api/deprecation.py:139: LangChainDeprecationWarning: The method `Chain.__call__` was deprecated in langchain 0.1.0 and will be removed in 0.3.0. Use invoke instead.\n",
      "  warn_deprecated(\n"
     ]
    },
    {
     "data": {
      "text/plain": "{'input_documents': [Document(page_content='很久很久以前，有一片幽深的森林，森林里住着各种各样的动物。在这里，生活着一只憨态可掬的小老虎，名叫泰格，以及一只纯洁无瑕的小白兔，名叫莉莉。尽管他们的种族、生活习性不同，他们却是最好的朋友。\\n\\n一天，泰格和莉莉正在森林中欢快地玩耍。突然，他们看到一颗巨大的金色果实高高挂在一棵巨大的树上。他们都被这颗金色的果实吸引，但它摘取这个果实需要通力合作。因此，他们决定一起尝试摘取果实。泰格用他强壮的身体爬上树干，而莉莉则坐在他的背上，用她的敏捷和轻盈摘下了果实。这是他们共同努力的结果，他们的友情也因此而更加坚固。\\n\\n然而，美好的时光总是短暂的。有一天，一个邪恶的猎人闯进了森林，他决定捉拿泰格，因为他觉得泰格的毛皮非常珍贵。猎人在泰格常去的水源处设下了陷阱，而无知的泰格就这样落入了猎人的陷阱。\\n\\n莉莉看到泰格陷入危险，害怕又担心。尽管她是一只小小的白兔，没有强大的力量，但她决定尽自己的努力去救泰格。她找到了森林中的智者，一只老乌龟，向他寻求帮助。老乌龟告诉莉莉，只有找到神秘的宝石，才能解除陷阱。\\n\\n小白兔莉莉决定冒险去寻找这块神秘的宝石。她独自穿过森林，跨过河流，攀上山丘，经历了无数的困难与危险。最终，莉莉在一片密布魔法的花海中找到了这块神秘的宝石。\\n\\n持有宝石的莉莉回到了陷阱所在地，她用尽全身的力气，对着陷阱呼唤出宝石的力量。在一道耀眼的光芒后，陷阱被解除了，泰格得救了。\\n\\n看到莉莉的勇气和决心，所有的森林动物都深受感动。他们不再只是彼此的食物，而是真正的朋友。从那天起，森林里的动物们开始和睦共处，他们相互帮助，彼此尊重，一同守护他们的家园。\\n\\n莉莉和泰格的故事在森林中流传，他们的友谊和勇气成为了所有动物的楷模。他们的故事告诉我们，真正的朋友会在你需要的时候站出来帮助你，无论他是强大的老虎，还是柔弱的兔子。友谊的力量超越了种族和身份，是我们在生活中最宝贵的财富。')],\n 'intermediate_steps': ['\\n\\nIn a deep forest, a friendly tiger named Tigger and a pure white rabbit named Lily live as best friends despite their differences. One day, they work together to retrieve a golden fruit from a tall tree, strengthening their bond. However, their peaceful life is disrupted when an evil hunter sets a trap for Tigger. Lily, with the help of a wise turtle, embarks on a dangerous journey to find a magical gem that can save Tigger. Her bravery and determination inspire all the animals in the forest to become friends and protect their home together. The story of Lily and Tigger teaches the value of true friendship and how it transcends differences in strength and identity.'],\n 'output_text': '\\n\\nA tiger and rabbit, Tigger and Lily, become best friends in a forest and work together to retrieve a golden fruit. When Tigger is in danger, Lily goes on a journey to find a magical gem with the help of a wise turtle. Their friendship inspires all the animals in the forest to unite and protect their home. The story highlights the importance of true friendship and how it can overcome differences.'}"
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.chains.summarize import load_summarize_chain\n",
    "\n",
    "chain = load_summarize_chain(OpenAI(temperature=0),chain_type=\"map_reduce\",return_intermediate_steps=True)\n",
    "\n",
    "chain({\"input_documents\":docs})\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T14:57:00.991170Z",
     "start_time": "2024-09-05T14:56:53.608013Z"
    }
   },
   "id": "f33f96a92be4fa2",
   "execution_count": 5
  },
  {
   "cell_type": "markdown",
   "source": [
    "`MapReduceChain`和`MapReduceDocumentsChain`都是实现MapReduce模式的类，不过它们的作用和使用场景有所不同。\n",
    "\n",
    "`MapReduceDocumentsChain`主要负责处理一个\"文档列表\"，即一组相互独立的输入。它将\"map\"阶段（通过指定的LLM链）应用于每个文档，并将产生的结果作为新的文档。然后，它使用一个单独的\"reduce\"阶段（通过指定的`reduce_documents_chain`）来处理映射后的文档，并将它们合并成一个单一的输出。\n",
    "\n",
    "另一方面，`MapReduceChain`是一个更高级别的抽象，它包含了对文档的分割和处理的所有步骤。这包括使用`text_splitter`对输入文本进行分割，使用`combine_documents_chain`对分割后的文档进行处理（这通常包括`MapReduceDocumentsChain`的使用），并根据需要递归地应用这个过程。\n",
    "\n",
    "具体来说，这段代码中的`MapReduceChain`将输入代码分割成独立的函数定义，并使用`combine_documents_chain`（一个`MapReduceDocumentsChain`实例）分别对它们进行处理。`MapReduceDocumentsChain`首先将每个函数通过`map_llm_chain`映射为一个描述和时间复杂度的说明，然后将这些说明合并并根据提出的问题生成最终的答案。\n",
    "\n",
    "总的来说，`MapReduceDocumentsChain`和`MapReduceChain`都实现了MapReduce模式，不过`MapReduceChain`提供了一个更完整的流程，包括对输入的分割和处理的各个步骤，而`MapReduceDocumentsChain`则专注于处理已经分割好的文档列表。\n"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "62d6ed9003443da8"
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain,ReduceDocumentsChain\n",
    "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n",
    "\n",
    "map_template_string = \"\"\"\n",
    "给出以下的Python代码信息，生成一个描述来解释代码的功能，并同时提及其时间复杂度。\n",
    "代码：\n",
    "{code}\n",
    "\n",
    "以以下格式返回描述：\n",
    "函数名：函数的描述\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "reduce_template_string = \"\"\"\n",
    "给出以下Python函数的名称和描述，回答下列问题\n",
    "{code_description}\n",
    "问题：{question}\n",
    "答案：\n",
    "\"\"\"\n",
    "\n",
    "#定义map 的prompt 输入内容就是代码\n",
    "map_prompt = PromptTemplate(template=map_template_string,input_variables=[\"code\"])\n",
    "#定义reduce的prompt，输入内容是代码描述和要问的问题\n",
    "reduce_prompt = PromptTemplate(template=reduce_template_string,input_variables=[\"code_description\",\"question\"])\n",
    "\n",
    "llm = OpenAI()\n",
    "#生成基于map prompt 的chain\n",
    "map_llm_chain = LLMChain(llm=llm,prompt=map_prompt,verbose=True)\n",
    "#生成基于reduce prompt 的chain\n",
    "reduce_llm_chain = LLMChain(llm=llm,prompt=reduce_prompt,verbose=True)\n",
    "\n",
    "# 把reduce chain 和文档的描述直接塞给StuffDocumentsChain，生成最终合并之后的文档\n",
    "combine_documents_chain = StuffDocumentsChain(\n",
    "    llm_chain=reduce_llm_chain,\n",
    "    document_variable_name=\"code_description\",verbose=True\n",
    ")\n",
    "#递归合并reduce 文档，定义最终文档combine_documents_chain\n",
    "#如果查处范围（3000token），使用combine_documents_chain进行压缩\n",
    "reduce_documents_chain = ReduceDocumentsChain(\n",
    "    combine_documents_chain = combine_documents_chain,\n",
    "    collapse_documents_chain = combine_documents_chain,\n",
    "    token_max = 3000,verbose=True\n",
    ")\n",
    "# 通过map chain 合并文档，将结果合并到reduce chain中。\n",
    "combine_documents = MapReduceDocumentsChain(\n",
    "    llm_chain = map_llm_chain,\n",
    "    reduce_documents_chain=reduce_documents_chain,\n",
    "    document_variable_name=\"code\",verbose=True\n",
    ")\n",
    "\n",
    "map_reduce = MapReduceChain(\n",
    "    combine_documents_chain = combine_documents,\n",
    "    text_splitter=CharacterTextSplitter(separator=\"\\n##\\n\", chunk_size=100, chunk_overlap=0),verbose=True\n",
    ")\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T15:30:41.640531Z",
     "start_time": "2024-09-05T15:30:41.626315Z"
    }
   },
   "id": "9ec7d4e1253870e5",
   "execution_count": 11
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [
    "code = \"\"\"\n",
    "def bubblesort(list):\n",
    "   for iter_num in range(len(list)-1,0,-1):\n",
    "      for idx in range(iter_num):\n",
    "         if list[idx]>list[idx+1]:\n",
    "            temp = list[idx]\n",
    "            list[idx] = list[idx+1]\n",
    "            list[idx+1] = temp\n",
    "    return list\n",
    "##\n",
    "def insertion_sort(InputList):\n",
    "   for i in range(1, len(InputList)):\n",
    "      j = i-1\n",
    "      nxt_element = InputList[i]\n",
    "   while (InputList[j] > nxt_element) and (j >= 0):\n",
    "      InputList[j+1] = InputList[j]\n",
    "      j=j-1\n",
    "   InputList[j+1] = nxt_element\n",
    "   return InputList\n",
    "##\n",
    "def shellSort(input_list):\n",
    "   gap = len(input_list) // 2\n",
    "   while gap > 0:\n",
    "      for i in range(gap, len(input_list)):\n",
    "         temp = input_list[i]\n",
    "         j = i\n",
    "   while j >= gap and input_list[j - gap] > temp:\n",
    "      input_list[j] = input_list[j - gap]\n",
    "      j = j-gap\n",
    "      input_list[j] = temp\n",
    "   gap = gap//2\n",
    "   return input_list\n",
    "\n",
    "\"\"\""
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T15:30:44.200450Z",
     "start_time": "2024-09-05T15:30:44.191964Z"
    }
   },
   "id": "f9135f3256352ee9",
   "execution_count": 12
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Created a chunk of size 247, which is longer than the specified 100\n",
      "Created a chunk of size 267, which is longer than the specified 100\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new MapReduceChain chain...\u001B[0m\n",
      "\n",
      "\n",
      "\u001B[1m> Entering new MapReduceDocumentsChain chain...\u001B[0m\n",
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m\n",
      "给出以下的Python代码信息，生成一个描述来解释代码的功能，并同时提及其时间复杂度。\n",
      "代码：\n",
      "def bubblesort(list):\n",
      "   for iter_num in range(len(list)-1,0,-1):\n",
      "      for idx in range(iter_num):\n",
      "         if list[idx]>list[idx+1]:\n",
      "            temp = list[idx]\n",
      "            list[idx] = list[idx+1]\n",
      "            list[idx+1] = temp\n",
      "    return list\n",
      "\n",
      "以以下格式返回描述：\n",
      "函数名：函数的描述\n",
      "\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m\n",
      "给出以下的Python代码信息，生成一个描述来解释代码的功能，并同时提及其时间复杂度。\n",
      "代码：\n",
      "def insertion_sort(InputList):\n",
      "   for i in range(1, len(InputList)):\n",
      "      j = i-1\n",
      "      nxt_element = InputList[i]\n",
      "   while (InputList[j] > nxt_element) and (j >= 0):\n",
      "      InputList[j+1] = InputList[j]\n",
      "      j=j-1\n",
      "   InputList[j+1] = nxt_element\n",
      "   return InputList\n",
      "\n",
      "以以下格式返回描述：\n",
      "函数名：函数的描述\n",
      "\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m\n",
      "给出以下的Python代码信息，生成一个描述来解释代码的功能，并同时提及其时间复杂度。\n",
      "代码：\n",
      "def shellSort(input_list):\n",
      "   gap = len(input_list) // 2\n",
      "   while gap > 0:\n",
      "      for i in range(gap, len(input_list)):\n",
      "         temp = input_list[i]\n",
      "         j = i\n",
      "   while j >= gap and input_list[j - gap] > temp:\n",
      "      input_list[j] = input_list[j - gap]\n",
      "      j = j-gap\n",
      "      input_list[j] = temp\n",
      "   gap = gap//2\n",
      "   return input_list\n",
      "\n",
      "以以下格式返回描述：\n",
      "函数名：函数的描述\n",
      "\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m\n",
      "给出以下Python函数的名称和描述，回答下列问题\n",
      "时间复杂度：\n",
      "函数名：bubblesort\n",
      "函数的描述：冒泡排序是一种简单的排序算法，它重复地遍历要排序的列表，每次比较相邻的两个元素，如果顺序不对则交换位置，直到整个列表都按照从小到大的顺序排列。\n",
      "时间复杂度：O(n^2)，因为需要两层循环来遍历和比较列表元素，最坏情况下需要遍历n次，每次遍历需要比较n-1次，总的时间复杂度为n*(n-1)，即O(n^2)。\n",
      "\n",
      "时间复杂度：代码的时间复杂度\n",
      "函数名：insertion_sort\n",
      "时间复杂度：O(n^2)\n",
      "\n",
      "函数的描述：该函数实现了插入排序算法，通过遍历列表中的每个元素，将当前元素与其前面的元素进行比较，如果前面的元素大于当前元素，则将前面的元素往后移动一位，直到找到合适的位置插入当前元素。重复以上步骤直到遍历完整个列表，最终得到有序的列表。\n",
      "时间复杂度：该算法的时间复杂度为O(n^2)，因为需要遍历n个元素，每次比较需要O(n)的时间复杂度，总共需要进行n次比较，因此时间复杂度为O(n^2)。\n",
      "\n",
      "时间复杂度：代码的时间复杂度\n",
      "\n",
      "函数名：shellSort\n",
      "时间复杂度：O(n^2)\n",
      "问题：哪段代码具有最优的时间复杂度?\n",
      "答案：\n",
      "\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": "'冒泡排序、插入排序和希尔排序都具有O(n^2)的时间复杂度，它们在最坏情况下的时间复杂度都是O(n^2)，因此它们都不是具有最优时间复杂度的代码。最优时间复杂度的代码应该是具有O(n)的时间复杂度，例如快速排序、归并排序等。'"
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "map_reduce.run(input_text=code,question=\"哪段代码具有最优的时间复杂度?\",verbose=True)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-09-05T15:30:52.749151Z",
     "start_time": "2024-09-05T15:30:46.880899Z"
    }
   },
   "id": "da3b97bb4d1cec94",
   "execution_count": 13
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
