{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "9bbbd268-ed1b-4b1f-963e-dc21ba42b702",
   "metadata": {},
   "source": [
    "# CallBacks模块"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0c959e3b-35c2-4251-8b29-6db513ec921b",
   "metadata": {},
   "source": [
    "回调模块允许接到LLM应用程序的各个阶段，鉴于LLM的幻觉问题，这对于日志记录、监视、流式处理和其他任务非常有用"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e1985215-7df8-4bec-9100-ad655ff73995",
   "metadata": {},
   "source": [
    "## 1. 支持的回调方法"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "CallbackHandlers 回调助手类: 指的是实现 CallbackHandler 接口的对象，\n",
    "该接口为每个可以订阅的事件都有一个方法。触发事件时，CallbackManager 将在每个处理程序上调用相应的方法。\n",
    "\n",
    "下面是用于处理langchain回调的，基础的回调处理类，以及包含的所有支持的事件方法"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a9d92339-2cb6-4147-b9dd-55f7a26af3f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "class BaseCallbackHandler:\n",
    "    \"\"\"Base callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
    "\n",
    "    def on_llm_start(\n",
    "        self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
    "    ) -> Any:\n",
    "        \"\"\"Run when LLM starts running.\"\"\"\n",
    "\n",
    "    def on_chat_model_start(\n",
    "        self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any\n",
    "    ) -> Any:\n",
    "        \"\"\"Run when Chat Model starts running.\"\"\"\n",
    "\n",
    "    def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:\n",
    "        \"\"\"Run on new LLM token. Only available when streaming is enabled.\"\"\"\n",
    "\n",
    "    def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:\n",
    "        \"\"\"Run when LLM ends running.\"\"\"\n",
    "\n",
    "    def on_llm_error(\n",
    "        self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
    "    ) -> Any:\n",
    "        \"\"\"Run when LLM errors.\"\"\"\n",
    "\n",
    "    def on_chain_start(\n",
    "        self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any\n",
    "    ) -> Any:\n",
    "        \"\"\"Run when chain starts running.\"\"\"\n",
    "\n",
    "    def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:\n",
    "        \"\"\"Run when chain ends running.\"\"\"\n",
    "\n",
    "    def on_chain_error(\n",
    "        self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
    "    ) -> Any:\n",
    "        \"\"\"Run when chain errors.\"\"\"\n",
    "\n",
    "    def on_tool_start(\n",
    "        self, serialized: Dict[str, Any], input_str: str, **kwargs: Any\n",
    "    ) -> Any:\n",
    "        \"\"\"Run when tool starts running.\"\"\"\n",
    "\n",
    "    def on_tool_end(self, output: Any, **kwargs: Any) -> Any:\n",
    "        \"\"\"Run when tool ends running.\"\"\"\n",
    "\n",
    "    def on_tool_error(\n",
    "        self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
    "    ) -> Any:\n",
    "        \"\"\"Run when tool errors.\"\"\"\n",
    "\n",
    "    def on_text(self, text: str, **kwargs: Any) -> Any:\n",
    "        \"\"\"Run on arbitrary text.\"\"\"\n",
    "\n",
    "    def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:\n",
    "        \"\"\"Run on agent action.\"\"\"\n",
    "\n",
    "    def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:\n",
    "        \"\"\"Run on agent end.\"\"\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f35bb1cf-f6da-463a-90ba-188628a624dd",
   "metadata": {},
   "source": [
    "## 2. 简单案例"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "将所有的事件记录到输出结果中"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2e50fa9f-dbcd-4acc-b082-3de114becb3f",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m1 + 2 = \u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": "{'number': 2, 'text': '3'}"
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain.callbacks import StdOutCallbackHandler\n",
    "from langchain.chains import LLMChain\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_core.prompts import PromptTemplate\n",
    "\n",
    "#设置回调函数，采用框架提供的回调输出函数\n",
    "handler = StdOutCallbackHandler()\n",
    "\n",
    "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo-0125\")\n",
    "\n",
    "prompt = PromptTemplate.from_template(\"1 + {number} = \")\n",
    "\n",
    "# 将回调函数设置到LLM链中StdOutCallbackHandler\n",
    "chain = LLMChain(llm=llm, prompt=prompt, callbacks=[handler])\n",
    "chain.invoke({\"number\":2})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "4ba766b6-8f64-4ef1-8e6d-f6db2f7a4781",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m1 + 2 = \u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": "{'number': 2, 'text': '3'}"
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 通过使用verbose参数查看输出结果（和设置StdOutCallbackHandler回调函数效果一样）\n",
    "chain = LLMChain(llm=llm, prompt=prompt, verbose=True)\n",
    "chain.invoke({\"number\":2})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "64062f54-88f1-4ba9-8493-2a8469d798d5",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m1 + 2 = \u001B[0m\n",
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n"
     ]
    },
    {
     "data": {
      "text/plain": "{'number': 2, 'text': '3'}"
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 在请求调用是设置回调查看结果（和在LLM链中设置效果一样）\n",
    "chain = LLMChain(llm=llm, prompt=prompt)\n",
    "chain.invoke({\"number\":2}, {\"callbacks\":[handler]})"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9266dab1-c34b-4fbb-93a3-281f5b617e0f",
   "metadata": {},
   "source": [
    "## 3. 自定义回调函数"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "自定义的处理类：实现基类BaseCallbackHandler接口中的事件方法即可"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "ac4d79af-b48b-4fa4-b0a1-9258330171df",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "My custom handler, token: \n",
      "My custom handler, token: Why\n",
      "My custom handler, token:  did\n",
      "My custom handler, token:  the\n",
      "My custom handler, token:  scare\n",
      "My custom handler, token: crow\n",
      "My custom handler, token:  win\n",
      "My custom handler, token:  an\n",
      "My custom handler, token:  award\n",
      "My custom handler, token: ?\n",
      "My custom handler, token:  Because\n",
      "My custom handler, token:  he\n",
      "My custom handler, token:  was\n",
      "My custom handler, token:  outstanding\n",
      "My custom handler, token:  in\n",
      "My custom handler, token:  his\n",
      "My custom handler, token:  field\n",
      "My custom handler, token: !\n",
      "My custom handler, token: \n"
     ]
    },
    {
     "data": {
      "text/plain": "AIMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', response_metadata={'finish_reason': 'stop'})"
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain_core.callbacks import BaseCallbackHandler\n",
    "from langchain_core.messages import HumanMessage\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "#自定义处理类\n",
    "class MyCustomHandler(BaseCallbackHandler):\n",
    "    def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
    "        print(f\"My custom handler, token: {token}\")\n",
    "\n",
    "\n",
    "# 启用streaming参数，通过设置 `streaming=True` 实现流式输出\n",
    "chat = ChatOpenAI(max_tokens=25, streaming=True, callbacks=[MyCustomHandler()])\n",
    "\n",
    "chat.invoke([HumanMessage(content=\"Tell me a joke\")])"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 4.记录到文件"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "可以利用回调函数，将输出记录到日志文件中\n",
    "通过FileCallbackHandler实现将日志输出打印到文件中，执行过程和StdOutCallbackHandler类似，只是输出写入文件"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
      "Prompt after formatting:\n",
      "\u001B[32;1m\u001B[1;3m1 + 2 = \u001B[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m2024-04-09 17:32:05.238\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m19\u001B[0m - \u001B[1m{'number': 2, 'text': '3'}\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\u001B[1m> Finished chain.\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "from langchain.callbacks import FileCallbackHandler\n",
    "from langchain.chains import LLMChain\n",
    "from langchain_core.prompts import PromptTemplate\n",
    "from langchain_openai import ChatOpenAI\n",
    "from loguru import logger\n",
    "\n",
    "logfile = \"output.log\"\n",
    "logger.add(logfile, colorize=True, enqueue=True)\n",
    "handler = FileCallbackHandler(logfile)\n",
    "\n",
    "llm = ChatOpenAI()\n",
    "prompt = PromptTemplate.from_template(\"1 + {number} = \")\n",
    "\n",
    "#设置verbose=True输出到控制台，同时通过FileCallbackHandler写入文件（如果verbose=false仍然会写入到文件）\n",
    "chain = LLMChain(llm=llm, prompt=prompt, callbacks=[handler], verbose=True)\n",
    "#调用llm链并获取到结果\n",
    "answer = chain.invoke({\"number\":2})\n",
    "#将结果也记录到日志\n",
    "logger.info(answer)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 5.异步回调   #!!!!!!!!!!!执行后，暂未看到异步的效果！！！！！！！！！！"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "当需要读取或写入大量数据到数据库或者文件时，可以使用异步回调来避免阻塞主线程"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1712653662.9382486 async on_llm_start....\n",
      "1712653663.450857 Hi! I just woke up. Your llm is starting\n",
      "1712653671.215189 Sync handler being called in a `thread_pool_executor`: token: \n",
      "1712653671.2435968 Sync handler being called in a `thread_pool_executor`: token: Why\n",
      "1712653671.245625 Sync handler being called in a `thread_pool_executor`: token:  couldn\n",
      "1712653671.258355 Sync handler being called in a `thread_pool_executor`: token: 't\n",
      "1712653671.259472 Sync handler being called in a `thread_pool_executor`: token:  the\n",
      "1712653671.3210065 Sync handler being called in a `thread_pool_executor`: token:  bicycle\n",
      "1712653671.3225036 Sync handler being called in a `thread_pool_executor`: token:  find\n",
      "1712653671.360427 Sync handler being called in a `thread_pool_executor`: token:  its\n",
      "1712653671.3621085 Sync handler being called in a `thread_pool_executor`: token:  way\n",
      "1712653671.372167 Sync handler being called in a `thread_pool_executor`: token:  home\n",
      "1712653671.3738072 Sync handler being called in a `thread_pool_executor`: token: ?\n",
      "\n",
      "\n",
      "1712653671.415033 Sync handler being called in a `thread_pool_executor`: token: Because\n",
      "1712653671.4165335 Sync handler being called in a `thread_pool_executor`: token:  it\n",
      "1712653671.432173 Sync handler being called in a `thread_pool_executor`: token:  lost\n",
      "1712653671.433827 Sync handler being called in a `thread_pool_executor`: token:  its\n",
      "1712653671.4449284 Sync handler being called in a `thread_pool_executor`: token:  bearings\n",
      "1712653671.4464252 Sync handler being called in a `thread_pool_executor`: token: !\n",
      "1712653671.4474275 Sync handler being called in a `thread_pool_executor`: token: \n",
      "1712653671.4494274 async on_llm_end....\n",
      "1712653671.9567206 Hi! I just woke up. Your llm is ending\n"
     ]
    },
    {
     "data": {
      "text/plain": "LLMResult(generations=[[ChatGeneration(text=\"Why couldn't the bicycle find its way home?\\n\\nBecause it lost its bearings!\", generation_info={'finish_reason': 'stop'}, message=AIMessage(content=\"Why couldn't the bicycle find its way home?\\n\\nBecause it lost its bearings!\", response_metadata={'finish_reason': 'stop'}))]], llm_output={'token_usage': {}, 'model_name': 'gpt-3.5-turbo'}, run=[RunInfo(run_id=UUID('fcfefb80-e534-40bf-aae3-6d8ae9051ed1'))])"
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import asyncio\n",
    "from typing import Any, Dict, List\n",
    "\n",
    "from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler\n",
    "from langchain_core.messages import HumanMessage\n",
    "from langchain_core.outputs import LLMResult\n",
    "from langchain_openai import ChatOpenAI\n",
    "import time\n",
    "\n",
    "#1.自定义同步的回调处理器\n",
    "class MyCustomSyncHandler(BaseCallbackHandler):\n",
    "    def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
    "        print(f\"{time.time()} Sync handler being called in a `thread_pool_executor`: token: {token}\")\n",
    "#2.自定义异步的回调处理器\n",
    "class MyCustomAsyncHandler(AsyncCallbackHandler):\n",
    "    \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
    "\n",
    "    async def on_llm_start(\n",
    "        self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
    "    ) -> None:\n",
    "        \"\"\"Run when chain starts running.\"\"\"\n",
    "        print(f\"{time.time()} async on_llm_start....\")\n",
    "        await asyncio.sleep(0.5)\n",
    "        print(f\"{time.time()} Hi! I just woke up. Your llm is starting\")\n",
    "\n",
    "    async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
    "        \"\"\"Run when chain ends running.\"\"\"\n",
    "        print(f\"{time.time()} async on_llm_end....\")\n",
    "        await asyncio.sleep(0.5)\n",
    "        print(f\"{time.time()} Hi! I just woke up. Your llm is ending\")\n",
    "\n",
    "# 启用流式输出，构建模型，放入回调列表\n",
    "chat = ChatOpenAI(\n",
    "    max_tokens=25,\n",
    "    streaming=True,\n",
    "    callbacks=[MyCustomSyncHandler(),MyCustomAsyncHandler()],\n",
    ")\n",
    "\n",
    "#!!!!!!!!!!!执行后，暂未看到异步的效果！！！！！！！！！！\n",
    "await chat.agenerate([[HumanMessage(content=\"Tell me a joke\")]])\n"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
