{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "# 运行自定义函数\n",
    "您可以在流水线中使用任意函数。\n",
    "\n",
    "请注意，这些函数的所有输入都需要是一个参数。如果您有一个接受多个参数的函数，您应该编写一个接受单个输入并将其解包为多个参数的包装器函数。 `%pip install --upgrade --quiet langchain langchain-openai` \n",
    ">也可参考 ‘操作输入和输出’"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "6be0c276be3100fb"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "data": {
      "text/plain": "'3 + 6 equals 9.'"
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain_core.output_parsers import StrOutputParser\n",
    "import os\n",
    "from operator import itemgetter\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "from langchain_community.llms.tongyi import Tongyi\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "from langchain_core.runnables import RunnableLambda\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "\n",
    "def length_func(text):\n",
    "    \"\"\"\n",
    "     字符串长度\n",
    "    :param text: \n",
    "    :return: \n",
    "    \"\"\"\n",
    "    return len(text)\n",
    "\n",
    "\n",
    "def _multiple_length_func(text1, text2):\n",
    "    \"\"\"\n",
    "    计算两个字符串的长度之积\n",
    "    \"\"\"\n",
    "    return len(text1) * len(text2)\n",
    "\n",
    "\n",
    "def multiple_length_func(_dict):\n",
    "    \"\"\"\n",
    "    计算两个字符串的长度之积\n",
    "    \"\"\"\n",
    "    return _multiple_length_func(_dict[\"text1\"], _dict[\"text2\"])\n",
    "\n",
    "\n",
    "template = \"\"\"\n",
    "    what is {a} + {b}\n",
    "    \"\"\"\n",
    "\n",
    "prompt = ChatPromptTemplate.from_template(template)\n",
    "# 请注意，我们将max_retries = 0设置为避免在RateLimits等情况下重试\n",
    "llm1 = ChatOpenAI(\n",
    "    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key=\"sk-xxx\",\n",
    "    openai_api_key=os.getenv(\"DASHSCOPE_API_KEY\"),\n",
    "    openai_api_base=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    model_name=\"qwen-max\",\n",
    "    max_retries=0,\n",
    ")\n",
    "#\n",
    "llm2 = Tongyi()\n",
    "# 添加回退\n",
    "\n",
    "chain1 = prompt | llm1\n",
    "\n",
    "chain = {\n",
    "            \"a\": itemgetter(\"foo\") | RunnableLambda(length_func),\n",
    "            \"b\": {\"text1\": itemgetter(\"foo\"), \"text2\": itemgetter(\"bar\")} |\n",
    "                 RunnableLambda(multiple_length_func),\n",
    "        } | prompt | llm1 | StrOutputParser()\n",
    "\n",
    "chain.invoke({\"foo\": \"go\", \"bar\": \"gah\"})  # '2 + 6 equals 8.'\n"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-28T07:58:34.130167Z",
     "start_time": "2024-10-28T07:58:33.179782Z"
    }
   },
   "id": "b4372328bd1e2988",
   "execution_count": 6
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 接受可运行配置\n",
    "可运行的lambda函数可以选择接受一个`RunnableConfig`，它们可以使用该配置传递回调、标签和其他配置信息给嵌套运行。"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "de184948f757037b"
  },
  {
   "cell_type": "code",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{foo:bar}\n",
      "Expecting property name enclosed in double quotes: line 1 column 2 (char 1)\n",
      "Expecting value: line 1 column 1 (char 0)\n",
      "Expecting value: line 1 column 1 (char 0)\n",
      "Failed to parse\n",
      "Tokens Used: 228\n",
      "\tPrompt Tokens: 201\n",
      "\tCompletion Tokens: 27\n",
      "Successful Requests: 3\n",
      "Total Cost (USD): $0.0\n"
     ]
    }
   ],
   "source": [
    "from langchain_community.callbacks import get_openai_callback\n",
    "from langchain_core.runnables import RunnableConfig\n",
    "import json\n",
    "\n",
    "\n",
    "def parse_or_fix(text: str, config: RunnableConfig):\n",
    "    \"\"\"\n",
    "    该函数尝试解析输入的 JSON 字符串 text，如果解析失败，则使用一个预定义的链 fixing_chain 来尝试修复并重新解析。如果经过三次尝试后仍然无法解析，则返回 \"Failed to parse\"。以下是详细的代码解释\n",
    "    :param text: \n",
    "    :param config: \n",
    "    :return: \n",
    "    \"\"\"\n",
    "    fixing_chain = ChatPromptTemplate.from_template(\n",
    "        \"Fix the following text:\\n\\n```text\\n{input}\\n```\\nError: {error}\"\n",
    "        \" Don't narrate, just respond with the fixed data.\"\n",
    "    ) | llm1 | StrOutputParser()\n",
    "    print(text)\n",
    "    for _ in range(3):\n",
    "        try:\n",
    "            return json.loads(text)\n",
    "        except Exception as e:\n",
    "            print(e)\n",
    "            text = fixing_chain.invoke({\"input\": text, \"error\": e}, config)\n",
    "    return \"Failed to parse\"\n",
    "\n",
    "\n",
    "with get_openai_callback() as cb:\n",
    "    output = RunnableLambda(parse_or_fix).invoke(\"{foo:bar}\", {\"tags\": [\"my-tag\"], \"callbacks\": [cb]})\n",
    "    print(output)\n",
    "    print(cb)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-10-28T08:17:41.738777Z",
     "start_time": "2024-10-28T08:17:38.465946Z"
    }
   },
   "id": "c9d3fc0dd5c5c407",
   "execution_count": 9
  },
  {
   "cell_type": "code",
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "fecf911312dc7c3f"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
