{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 智普ai配置\n",
    "API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'\n",
    "BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'\n",
    "MODEL_NAME='glm-4'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# moonshot ai 配置\n",
    "API_KEY = 'sk-frDCb3ceOG9aot1alISE2XJrXzeb3utkWXrrIrqZiYOH2kLq'\n",
    "BASE_PATH ='https://api.moonshot.cn/v1/'\n",
    "MODEL_NAME='moonshot-v1-8k'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# langchain模型初始化\n",
    "from langchain_openai import ChatOpenAI\n",
    "llm =  ChatOpenAI(model_name=MODEL_NAME,temperature=.7,openai_api_key=API_KEY,base_url=BASE_PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "ename": "ImportError",
     "evalue": "cannot import name 'PromptEngine' from 'langchain' (d:\\install\\Python310\\Envs\\demo\\lib\\site-packages\\langchain\\__init__.py)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mImportError\u001b[0m                               Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[16], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m PromptEngine\n\u001b[0;32m      2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_community\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchat_models\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ChatOpenAI\n\u001b[0;32m      3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mgptcache\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mllm_cache\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m LLMCache\n",
      "\u001b[1;31mImportError\u001b[0m: cannot import name 'PromptEngine' from 'langchain' (d:\\install\\Python310\\Envs\\demo\\lib\\site-packages\\langchain\\__init__.py)"
     ]
    }
   ],
   "source": [
    "from langchain import PromptEngine\n",
    "from langchain_community.chat_models import ChatOpenAI\n",
    "from gptcache.llm_cache import LLMCache\n",
    " \n",
    "# 初始化OpenAI LLM# 智普ai配置\n",
    "API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'\n",
    "BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'\n",
    "MODEL_NAME='glm-4'\n",
    "openai_llm = llm =  ChatOpenAI(model_name=MODEL_NAME,temperature=.7,openai_api_key=API_KEY,base_url=BASE_PATH)\n",
    " \n",
    "# 初始化GPTCache\n",
    "cache = LLMCache(openai_llm, cache_size=10000)\n",
    " \n",
    "# 初始化PromptEngine，使用GPTCache作为缓存\n",
    "prompt_engine = PromptEngine(llm_cache=cache)\n",
    " \n",
    "# 使用PromptEngine生成提示\n",
    "response = prompt_engine.prompt(\"你好，这是聊天历史:\")\n",
    " \n",
    "print(response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "import logging\n",
    "def init_log(file_path,level):\n",
    "    \n",
    "\n",
    "    # 创建日志记录器\n",
    "    logger = logging.getLogger(__name__)\n",
    "    # logger.setLevel(logging.INFO)\n",
    "\n",
    "    # 创建文件处理器\n",
    "    import os\n",
    "    # if not os.path.exists(file_path):\n",
    "    #     with open(file_path,'w'):\n",
    "    #         pass\n",
    "    file_handler = logging.FileHandler(file_path)\n",
    "    # file_handler.setLevel(logging.ERROR)\n",
    "    file_handler.setLevel(level)\n",
    "\n",
    "    # 创建日志格式\n",
    "    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n",
    "    file_handler.setFormatter(formatter)\n",
    "\n",
    "    # 将处理器添加到日志记录器\n",
    "    logger.addHandler(file_handler)\n",
    "\n",
    "    return  logger\n",
    "\n",
    "info_log = init_log('logs/info.log',logging.INFO)\n",
    "info_log.info('this is an info')\n",
    "info_log.error('this is an err')\n",
    "# error_log = init_log('logs/err.log')\n",
    "# error_log.error('this is an error')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "import logging\n",
    "import logging.handlers\n",
    "\n",
    "\n",
    "# 定义logger\n",
    "logger = logging.getLogger(__name__)\n",
    "logger.setLevel(logging.DEBUG)\n",
    "\n",
    "\n",
    "\n",
    "def set_log_file(filename,level):\n",
    "    handler = logging.FileHandler(filename)\n",
    "    handler.setLevel(level)\n",
    "    handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s\"))\n",
    "    logger.addHandler(handler)\n",
    "\n",
    "set_log_file('logs/error.log',logging.ERROR)\n",
    "set_log_file('logs/app.log',logging.WARNING)\n",
    "\n",
    "logger.info('this is info')\n",
    "logger.error('this is an error')\n",
    "logger.error('this is an error')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: total: 31.2 ms\n",
      "Wall time: 1.28 s\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "\"Here's a light-hearted joke for you:\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything! 😄\""
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "%%time\n",
    "llm.invoke('tell me a joke').content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "    1. 需求\n",
    "    现在有以下几个日志记录的需求：\n",
    "        1）要求将所有级别的所有日志都写入磁盘文件中\n",
    "        2）all.log文件中记录所有的日志信息，日志格式为：日期和时间 - 日志级别 - 日志信息\n",
    "        3）error.log文件中单独记录error及以上级别的日志信息，日志格式为：日期和时间 - 日志级别 - 文件名[:行号] - 日志信息\n",
    "        4）要求all.log在每天凌晨进行日志切割\n",
    "\n",
    "    2. 分析\n",
    "        1）要记录所有级别的日志，因此日志器的有效level需要设置为最低级别--DEBUG;\n",
    "        2）日志需要被发送到两个不同的目的地，因此需要为日志器设置两个handler；另外，两个目的地都是磁盘文件，因此这两个handler都是与FileHandler相关的；\n",
    "        3）all.log要求按照时间进行日志切割，因此他需要用logging.handlers.TimedRotatingFileHandler; 而error.log没有要求日志切割，因此可以使用FileHandler;\n",
    "        4）两个日志文件的格式不同，因此需要对这两个handler分别设置格式器；\n",
    "    '''\n",
    "\n",
    "    import logging\n",
    "    import logging.handlers\n",
    "    import datetime\n",
    "\n",
    "\n",
    "    # 定义logger\n",
    "    logger = logging.getLogger('mylogger')\n",
    "    logger.setLevel(logging.DEBUG)\n",
    "\n",
    "\n",
    "    #为两个不同的文件设置不同的handler\n",
    "    rf_handler = logging.handlers.TimedRotatingFileHandler('logs/all.log', when='midnight', interval=1, backupCount=7, atTime=datetime.time(0, 0, 0, 0))\n",
    "    rf_handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n",
    "\n",
    "\n",
    "\n",
    "    f_handler = logging.FileHandler('logs/error.log')\n",
    "    f_handler.setLevel(logging.ERROR)\n",
    "    f_handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s\"))\n",
    "\n",
    "    # 把相应的处理器组装到logger上\n",
    "    logger.addHandler(rf_handler)\n",
    "    logger.addHandler(f_handler)\n",
    "\n",
    "\n",
    "    logger.debug('debug message')\n",
    "    logger.info('info message')\n",
    "    logger.warning('warning message')\n",
    "    logger.error('error message')\n",
    "    logger.critical('critical message')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'如果您喜欢吃番茄，可以做很多美味的菜肴。这里有几个简单又受欢迎的番茄菜谱推荐给您：\\n\\n1. 番茄炒蛋：简单易做，营养丰富，将鸡蛋和番茄翻炒，既可以作为早餐，也可以作为中晚餐的菜肴。\\n\\n2. 番茄炖牛腩：将牛腩和番茄一起炖煮，牛腩的鲜美与番茄的酸甜相融，非常适合秋冬季节食用。\\n\\n3. 番茄炒肉片：选择猪肉或鸡肉切片，与番茄一起快炒，酸甜可口，非常适合家常便饭。\\n\\n4. 番茄意面：将番茄酱与煮熟的意面混合，加入喜欢的蔬菜和肉类，是一道简单美味的西式料理。\\n\\n5. 番茄鸡蛋汤：清爽开胃，适合夏天食用，简单地将番茄和鸡蛋煮成汤即可。\\n\\n6. 番茄炖鱼：选择鲜美的鱼类，与番茄一起炖煮，酸甜的番茄能很好地中和鱼的腥味，提升菜肴的整体风味。\\n\\n这些都是使用番茄制作的简单菜肴，您可以根据个人口味和喜好进行调整。'"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import json\n",
    "p=[\"{\\\"lc\\\": 1, \\\"type\\\": \\\"constructor\\\", \\\"id\\\": [\\\"langchain\\\", \\\"schema\\\", \\\"output\\\", \\\"ChatGeneration\\\"], \\\"kwargs\\\": {\\\"text\\\": \\\"\\\\u5982\\\\u679c\\\\u60a8\\\\u559c\\\\u6b22\\\\u5403\\\\u756a\\\\u8304\\\\uff0c\\\\u53ef\\\\u4ee5\\\\u505a\\\\u5f88\\\\u591a\\\\u7f8e\\\\u5473\\\\u7684\\\\u83dc\\\\u80b4\\\\u3002\\\\u8fd9\\\\u91cc\\\\u6709\\\\u51e0\\\\u4e2a\\\\u7b80\\\\u5355\\\\u53c8\\\\u53d7\\\\u6b22\\\\u8fce\\\\u7684\\\\u756a\\\\u8304\\\\u83dc\\\\u8c31\\\\u63a8\\\\u8350\\\\u7ed9\\\\u60a8\\\\uff1a\\\\n\\\\n1. \\\\u756a\\\\u8304\\\\u7092\\\\u86cb\\\\uff1a\\\\u7b80\\\\u5355\\\\u6613\\\\u505a\\\\uff0c\\\\u8425\\\\u517b\\\\u4e30\\\\u5bcc\\\\uff0c\\\\u5c06\\\\u9e21\\\\u86cb\\\\u548c\\\\u756a\\\\u8304\\\\u7ffb\\\\u7092\\\\uff0c\\\\u65e2\\\\u53ef\\\\u4ee5\\\\u4f5c\\\\u4e3a\\\\u65e9\\\\u9910\\\\uff0c\\\\u4e5f\\\\u53ef\\\\u4ee5\\\\u4f5c\\\\u4e3a\\\\u4e2d\\\\u665a\\\\u9910\\\\u7684\\\\u83dc\\\\u80b4\\\\u3002\\\\n\\\\n2. \\\\u756a\\\\u8304\\\\u7096\\\\u725b\\\\u8169\\\\uff1a\\\\u5c06\\\\u725b\\\\u8169\\\\u548c\\\\u756a\\\\u8304\\\\u4e00\\\\u8d77\\\\u7096\\\\u716e\\\\uff0c\\\\u725b\\\\u8169\\\\u7684\\\\u9c9c\\\\u7f8e\\\\u4e0e\\\\u756a\\\\u8304\\\\u7684\\\\u9178\\\\u751c\\\\u76f8\\\\u878d\\\\uff0c\\\\u975e\\\\u5e38\\\\u9002\\\\u5408\\\\u79cb\\\\u51ac\\\\u5b63\\\\u8282\\\\u98df\\\\u7528\\\\u3002\\\\n\\\\n3. \\\\u756a\\\\u8304\\\\u7092\\\\u8089\\\\u7247\\\\uff1a\\\\u9009\\\\u62e9\\\\u732a\\\\u8089\\\\u6216\\\\u9e21\\\\u8089\\\\u5207\\\\u7247\\\\uff0c\\\\u4e0e\\\\u756a\\\\u8304\\\\u4e00\\\\u8d77\\\\u5feb\\\\u7092\\\\uff0c\\\\u9178\\\\u751c\\\\u53ef\\\\u53e3\\\\uff0c\\\\u975e\\\\u5e38\\\\u9002\\\\u5408\\\\u5bb6\\\\u5e38\\\\u4fbf\\\\u996d\\\\u3002\\\\n\\\\n4. \\\\u756a\\\\u8304\\\\u610f\\\\u9762\\\\uff1a\\\\u5c06\\\\u756a\\\\u8304\\\\u9171\\\\u4e0e\\\\u716e\\\\u719f\\\\u7684\\\\u610f\\\\u9762\\\\u6df7\\\\u5408\\\\uff0c\\\\u52a0\\\\u5165\\\\u559c\\\\u6b22\\\\u7684\\\\u852c\\\\u83dc\\\\u548c\\\\u8089\\\\u7c7b\\\\uff0c\\\\u662f\\\\u4e00\\\\u9053\\\\u7b80\\\\u5355\\\\u7f8e\\\\u5473\\\\u7684\\\\u897f\\\\u5f0f\\\\u6599\\\\u7406\\\\u3002\\\\n\\\\n5. \\\\u756a\\\\u8304\\\\u9e21\\\\u86cb\\\\u6c64\\\\uff1a\\\\u6e05\\\\u723d\\\\u5f00\\\\u80c3\\\\uff0c\\\\u9002\\\\u5408\\\\u590f\\\\u5929\\\\u98df\\\\u7528\\\\uff0c\\\\u7b80\\\\u5355\\\\u5730\\\\u5c06\\\\u756a\\\\u8304\\\\u548c\\\\u9e21\\\\u86cb\\\\u716e\\\\u6210\\\\u6c64\\\\u5373\\\\u53ef\\\\u3002\\\\n\\\\n6. \\\\u756a\\\\u8304\\\\u7096\\\\u9c7c\\\\uff1a\\\\u9009\\\\u62e9\\\\u9c9c\\\\u7f8e\\\\u7684\\\\u9c7c\\\\u7c7b\\\\uff0c\\\\u4e0e\\\\u756a\\\\u8304\\\\u4e00\\\\u8d77\\\\u7096\\\\u716e\\\\uff0c\\\\u9178\\\\u751c\\\\u7684\\\\u756a\\\\u8304\\\\u80fd\\\\u5f88\\\\u597d\\\\u5730\\\\u4e2d\\\\u548c\\\\u9c7c\\\\u7684\\\\u8165\\\\u5473\\\\uff0c\\\\u63d0\\\\u5347\\\\u83dc\\\\u80b4\\\\u7684\\\\u6574\\\\u4f53\\\\u98ce\\\\u5473\\\\u3002\\\\n\\\\n\\\\u8fd9\\\\u4e9b\\\\u90fd\\\\u662f\\\\u4f7f\\\\u7528\\\\u756a\\\\u8304\\\\u5236\\\\u4f5c\\\\u7684\\\\u7b80\\\\u5355\\\\u83dc\\\\u80b4\\\\uff0c\\\\u60a8\\\\u53ef\\\\u4ee5\\\\u6839\\\\u636e\\\\u4e2a\\\\u4eba\\\\u53e3\\\\u5473\\\\u548c\\\\u559c\\\\u597d\\\\u8fdb\\\\u884c\\\\u8c03\\\\u6574\\\\u3002\\\", \\\"generation_info\\\": {\\\"finish_reason\\\": \\\"stop\\\", \\\"logprobs\\\": null}, \\\"type\\\": \\\"ChatGeneration\\\", \\\"message\\\": {\\\"lc\\\": 1, \\\"type\\\": \\\"constructor\\\", \\\"id\\\": [\\\"langchain\\\", \\\"schema\\\", \\\"messages\\\", \\\"AIMessage\\\"], \\\"kwargs\\\": {\\\"content\\\": \\\"\\\\u5982\\\\u679c\\\\u60a8\\\\u559c\\\\u6b22\\\\u5403\\\\u756a\\\\u8304\\\\uff0c\\\\u53ef\\\\u4ee5\\\\u505a\\\\u5f88\\\\u591a\\\\u7f8e\\\\u5473\\\\u7684\\\\u83dc\\\\u80b4\\\\u3002\\\\u8fd9\\\\u91cc\\\\u6709\\\\u51e0\\\\u4e2a\\\\u7b80\\\\u5355\\\\u53c8\\\\u53d7\\\\u6b22\\\\u8fce\\\\u7684\\\\u756a\\\\u8304\\\\u83dc\\\\u8c31\\\\u63a8\\\\u8350\\\\u7ed9\\\\u60a8\\\\uff1a\\\\n\\\\n1. \\\\u756a\\\\u8304\\\\u7092\\\\u86cb\\\\uff1a\\\\u7b80\\\\u5355\\\\u6613\\\\u505a\\\\uff0c\\\\u8425\\\\u517b\\\\u4e30\\\\u5bcc\\\\uff0c\\\\u5c06\\\\u9e21\\\\u86cb\\\\u548c\\\\u756a\\\\u8304\\\\u7ffb\\\\u7092\\\\uff0c\\\\u65e2\\\\u53ef\\\\u4ee5\\\\u4f5c\\\\u4e3a\\\\u65e9\\\\u9910\\\\uff0c\\\\u4e5f\\\\u53ef\\\\u4ee5\\\\u4f5c\\\\u4e3a\\\\u4e2d\\\\u665a\\\\u9910\\\\u7684\\\\u83dc\\\\u80b4\\\\u3002\\\\n\\\\n2. \\\\u756a\\\\u8304\\\\u7096\\\\u725b\\\\u8169\\\\uff1a\\\\u5c06\\\\u725b\\\\u8169\\\\u548c\\\\u756a\\\\u8304\\\\u4e00\\\\u8d77\\\\u7096\\\\u716e\\\\uff0c\\\\u725b\\\\u8169\\\\u7684\\\\u9c9c\\\\u7f8e\\\\u4e0e\\\\u756a\\\\u8304\\\\u7684\\\\u9178\\\\u751c\\\\u76f8\\\\u878d\\\\uff0c\\\\u975e\\\\u5e38\\\\u9002\\\\u5408\\\\u79cb\\\\u51ac\\\\u5b63\\\\u8282\\\\u98df\\\\u7528\\\\u3002\\\\n\\\\n3. \\\\u756a\\\\u8304\\\\u7092\\\\u8089\\\\u7247\\\\uff1a\\\\u9009\\\\u62e9\\\\u732a\\\\u8089\\\\u6216\\\\u9e21\\\\u8089\\\\u5207\\\\u7247\\\\uff0c\\\\u4e0e\\\\u756a\\\\u8304\\\\u4e00\\\\u8d77\\\\u5feb\\\\u7092\\\\uff0c\\\\u9178\\\\u751c\\\\u53ef\\\\u53e3\\\\uff0c\\\\u975e\\\\u5e38\\\\u9002\\\\u5408\\\\u5bb6\\\\u5e38\\\\u4fbf\\\\u996d\\\\u3002\\\\n\\\\n4. \\\\u756a\\\\u8304\\\\u610f\\\\u9762\\\\uff1a\\\\u5c06\\\\u756a\\\\u8304\\\\u9171\\\\u4e0e\\\\u716e\\\\u719f\\\\u7684\\\\u610f\\\\u9762\\\\u6df7\\\\u5408\\\\uff0c\\\\u52a0\\\\u5165\\\\u559c\\\\u6b22\\\\u7684\\\\u852c\\\\u83dc\\\\u548c\\\\u8089\\\\u7c7b\\\\uff0c\\\\u662f\\\\u4e00\\\\u9053\\\\u7b80\\\\u5355\\\\u7f8e\\\\u5473\\\\u7684\\\\u897f\\\\u5f0f\\\\u6599\\\\u7406\\\\u3002\\\\n\\\\n5. \\\\u756a\\\\u8304\\\\u9e21\\\\u86cb\\\\u6c64\\\\uff1a\\\\u6e05\\\\u723d\\\\u5f00\\\\u80c3\\\\uff0c\\\\u9002\\\\u5408\\\\u590f\\\\u5929\\\\u98df\\\\u7528\\\\uff0c\\\\u7b80\\\\u5355\\\\u5730\\\\u5c06\\\\u756a\\\\u8304\\\\u548c\\\\u9e21\\\\u86cb\\\\u716e\\\\u6210\\\\u6c64\\\\u5373\\\\u53ef\\\\u3002\\\\n\\\\n6. \\\\u756a\\\\u8304\\\\u7096\\\\u9c7c\\\\uff1a\\\\u9009\\\\u62e9\\\\u9c9c\\\\u7f8e\\\\u7684\\\\u9c7c\\\\u7c7b\\\\uff0c\\\\u4e0e\\\\u756a\\\\u8304\\\\u4e00\\\\u8d77\\\\u7096\\\\u716e\\\\uff0c\\\\u9178\\\\u751c\\\\u7684\\\\u756a\\\\u8304\\\\u80fd\\\\u5f88\\\\u597d\\\\u5730\\\\u4e2d\\\\u548c\\\\u9c7c\\\\u7684\\\\u8165\\\\u5473\\\\uff0c\\\\u63d0\\\\u5347\\\\u83dc\\\\u80b4\\\\u7684\\\\u6574\\\\u4f53\\\\u98ce\\\\u5473\\\\u3002\\\\n\\\\n\\\\u8fd9\\\\u4e9b\\\\u90fd\\\\u662f\\\\u4f7f\\\\u7528\\\\u756a\\\\u8304\\\\u5236\\\\u4f5c\\\\u7684\\\\u7b80\\\\u5355\\\\u83dc\\\\u80b4\\\\uff0c\\\\u60a8\\\\u53ef\\\\u4ee5\\\\u6839\\\\u636e\\\\u4e2a\\\\u4eba\\\\u53e3\\\\u5473\\\\u548c\\\\u559c\\\\u597d\\\\u8fdb\\\\u884c\\\\u8c03\\\\u6574\\\\u3002\\\", \\\"response_metadata\\\": {\\\"token_usage\\\": {\\\"completion_tokens\\\": 231, \\\"prompt_tokens\\\": 56, \\\"total_tokens\\\": 287}, \\\"model_name\\\": \\\"glm-4\\\", \\\"system_fingerprint\\\": null, \\\"finish_reason\\\": \\\"stop\\\", \\\"logprobs\\\": null}, \\\"type\\\": \\\"ai\\\", \\\"id\\\": \\\"run-7ce557f0-177a-485e-baf4-d4838f457fd3-0\\\", \\\"usage_metadata\\\": {\\\"input_tokens\\\": 56, \\\"output_tokens\\\": 231, \\\"total_tokens\\\": 287}, \\\"tool_calls\\\": [], \\\"invalid_tool_calls\\\": []}}}}\"]\n",
    "\n",
    "r = json.loads(p[-1])\n",
    "r['kwargs']['text']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: total: 31.2 ms\n",
      "Wall time: 2.13 s\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "\"Sure! Here are two jokes for you:\\n\\n1. Why don't scientists trust atoms?\\n\\nBecause they make up everything!\\n\\n2. Why did the scarecrow win an award?\\n\\nBecause he was outstanding in his field!\\n\\nI hope those jokes bring a smile to your face!\""
      ]
     },
     "execution_count": 73,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "%%time\n",
    "llm.invoke('tell me 2 jokes').content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 会话配置2 RunnableWithMessageHistory\n",
    "from langchain_core.chat_history import (\n",
    "    BaseChatMessageHistory,\n",
    "    InMemoryChatMessageHistory,\n",
    ")\n",
    "from langchain_core.runnables.history import RunnableWithMessageHistory\n",
    "from langchain.schema import HumanMessage\n",
    "store = {}\n",
    "\n",
    "\n",
    "def get_session_history(session_id: str) -> BaseChatMessageHistory:\n",
    "    if session_id not in store:\n",
    "        store[session_id] = InMemoryChatMessageHistory()\n",
    "    return store[session_id]\n",
    "\n",
    "\n",
    "with_message_history = RunnableWithMessageHistory(llm, get_session_history)\n",
    "\n",
    "config = {\"configurable\": {\"session_id\": \"abc2\"}}\n",
    "# response = with_message_history.invoke(\n",
    "#     [HumanMessage(content=\"Hi! I'm Bob\")],\n",
    "#     config=config,\n",
    "# )\n",
    "\n",
    "response = with_message_history.invoke(\n",
    "    \"Hi! I'm Bob\",\n",
    "    config=config,\n",
    ")\n",
    "\n",
    "response.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "config = {\"configurable\": {\"session_id\": \"abc2\"}}\n",
    "\n",
    "response = with_message_history.invoke(\n",
    "    \"Hi! I'm Bob\",\n",
    "    config=config,\n",
    ")\n",
    "\n",
    "response.content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "config = {\"configurable\": {\"session_id\": \"abc2\"}}\n",
    "for r in with_message_history.stream(\n",
    "     \n",
    "        \"what's my name\",\n",
    "        \n",
    "    \n",
    "    config=config,\n",
    "):\n",
    "    print(r.content, end=\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "base_dir ='E:\\\\Programs\\\\PythonPro\\\\shouwujiai'\n",
    "upload_folder='static/uploads/'\n",
    "filename = 'yXgi6x2zB5S2c75c21b0db77dfedb92517aca2242efd.jpg'\n",
    "file_name=upload_folder+filename\n",
    "url = 'http://localhost:5000/'\n",
    "os.path.join(url,file_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "image = None\n",
    "type(image) == 'NoneType'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# *************测试会话流式输出\n",
    "\n",
    " \n",
    "def test():\n",
    "    messages =['我是jam','请问1+1等于几']\n",
    "    res = conversation.stream(input=messages[0])\n",
    "    print(res)\n",
    "    for chunk in res:\n",
    "        print('==\\t',chunk.content)\n",
    "        yield chunk.content\n",
    "test()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "##*************以下是huggingface模型测试****************\n",
    "import os\n",
    "os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n",
    "import requests\n",
    "from PIL import Image\n",
    "from transformers import BlipProcessor, BlipForConditionalGeneration\n",
    "from langchain.tools import BaseTool\n",
    "from langchain.agents import initialize_agent, AgentType\n",
    "\n",
    "# hf_model = \"../models/blip-image-captioning-large\"\n",
    "hf_model='Salesforce/blip-image-captioning-large'\n",
    "# 初始化处理器和模型\n",
    "processor = BlipProcessor.from_pretrained(hf_model)\n",
    "model = BlipForConditionalGeneration.from_pretrained(hf_model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[\"HumanMessage(content='马铃薯是什么')\",\n",
       " \"AIMessage(content='*** 用途 ***\\\\n\\\\n1. 食用：马铃薯是常见的蔬菜，可用于烹饪各种菜肴。\\\\n2. 加工：可加工成薯条、薯片等休闲食品。\\\\n\\\\n*** 使用方法***\\\\n\\\\n1. 烹饪前处理：将马铃薯清洗干净，去皮或不去皮，切成所需形状。\\\\n2. 烹饪：可以煮、蒸、炸、炒等多种方式制作马铃薯菜肴。\\\\n3. 储存：将马铃薯置于阴凉通风处，避免阳光直射，可延长保存时间。')\",\n",
       " \"HumanMessage(content='番茄是什么')\"]"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c=\"[HumanMessage(content='马铃薯是什么'), AIMessage(content='*** 用途 ***\\\\n\\\\n1. 食用：马铃薯是常见的蔬菜，可用于烹饪各种菜肴。\\\\n2. 加工：可加工成薯条、薯片等休闲食品。\\\\n\\\\n*** 使用方法***\\\\n\\\\n1. 烹饪前处理：将马铃薯清洗干净，去皮或不去皮，切成所需形状。\\\\n2. 烹饪：可以煮、蒸、炸、炒等多种方式制作马铃薯菜肴。\\\\n3. 储存：将马铃薯置于阴凉通风处，避免阳光直射，可延长保存时间。'), HumanMessage(content='番茄是什么')]\"\n",
    "c.replace('[','').replace(']','').split(', ')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' \n",
    "raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')\n",
    "\n",
    "# conditional image captioning\n",
    "text = \"this is \"\n",
    "inputs = processor(raw_image, text, return_tensors=\"pt\")\n",
    "\n",
    "out = model.generate(**inputs)\n",
    "print(processor.decode(out[0], skip_special_tokens=True))\n",
    "\n",
    "# unconditional image captioning\n",
    "inputs = processor(raw_image, return_tensors=\"pt\")\n",
    "\n",
    "out = model.generate(**inputs)\n",
    "print(processor.decode(out[0], skip_special_tokens=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ImageCapTool(BaseTool):       \n",
    "    name = \"Image captioner\"   \n",
    "    description = \"为图片创作说明文案.\"    \n",
    "    def _run(self, url: str):        \n",
    "        # 下载图像并将其转换为PIL对象        \n",
    "        image = Image.open(requests.get(url, stream=True).raw).convert('RGB')       \n",
    "        # 预处理图像        \n",
    "        inputs = processor(image, return_tensors=\"pt\")        # 生成字幕        \n",
    "        out = model.generate(**inputs, max_new_tokens=20)        # 获取字幕        \n",
    "        caption = processor.decode(out[0], skip_special_tokens=True)  \n",
    "        print(caption)      \n",
    "        return caption    \n",
    "        \n",
    "    def _arun(self, query: str):        \n",
    "        raise NotImplementedError(\"This tool does not support async\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_openai import ChatOpenAI\n",
    "from langchain.schema import HumanMessage\n",
    " \n",
    "ZHIPUAI_API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'\n",
    "BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'\n",
    "llm =  ChatOpenAI(model_name='glm-4',temperature=.7,openai_api_key=ZHIPUAI_API_KEY,base_url=BASE_PATH)\n",
    "# 初始化工具和代理\n",
    "tools = [ImageCapTool()]\n",
    "agent = initialize_agent(    \n",
    "    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,    \n",
    "    tools=tools,    \n",
    "    llm=llm,    \n",
    "    verbose=True,\n",
    "    )\n",
    "# 测试图像URL\n",
    "img_url = 'http://img.aiimg.com/uploads/allimg/191126/1-191126232400.jpg'# 运行代理以生成文案\n",
    "agent.run(input=f\"{img_url}\\n根据描述帮我撰写一段发布在微信朋友圈的中文文案\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "url = 'http://img.aiimg.com/uploads/allimg/191126/1-191126232400.jpg'\n",
    "\n",
    "r=requests.get(url, stream=True)\n",
    "print(r.raw)\n",
    "l = Image.open(r.raw)\n",
    "\n",
    "image = l.convert('RGB')\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.memory import ConversationBufferWindowMemory\n",
    "from langchain_community.chat_message_histories import ChatMessageHistory\n",
    "from langchain.chains.conversation.base import ConversationChain\n",
    "conversation = ConversationChain(\n",
    "    llm=llm,\n",
    "    verbose=True,\n",
    "    memory=ConversationBufferWindowMemory()\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import TrOCRProcessor, VisionEncoderDecoderModel\n",
    "from PIL import Image\n",
    "import requests\n",
    "\n",
    "# load image from the IAM database\n",
    "url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'\n",
    "image = Image.open(requests.get(url, stream=True).raw).convert(\"RGB\")\n",
    "\n",
    "processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')\n",
    "model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')\n",
    "pixel_values = processor(images=image, return_tensors=\"pt\").pixel_values\n",
    "\n",
    "generated_ids = model.generate(pixel_values)\n",
    "generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==1==\n",
      " 您是用户的智能管家，你对中文的理解非常棒，用户会给出一个物品名称，你需要根据用户给出的物品名称，先判断这是不是一个物品，如果是物品就返回这个物品的用途和使用方法，按如下格式给到用户：\n",
      "    \n",
      "*** 用途 ***\n",
      "\n",
      "    1. ....\n",
      "\n",
      "    2. ....\n",
      "\n",
      "    ....\n",
      "\n",
      "    \n",
      "*** 使用方法***\n",
      "\n",
      "    1. ....\n",
      "\n",
      "    2. ....\n",
      "\n",
      "    ....\n",
      "\n",
      "\n",
      "    ，无需其他的文字做末尾总结。如果不是一个物品的名称返回“无”，无需其他的文字做末尾总结或提示用户”\n",
      "==2==\n",
      " 马铃薯\n"
     ]
    }
   ],
   "source": [
    "p = '[{\"lc\": 1, \"type\": \"constructor\", \"id\": [\"langchain\", \"schema\", \"messages\", \"SystemMessage\"], \"kwargs\": {\"content\": \"\\\\u60a8\\\\u662f\\\\u7528\\\\u6237\\\\u7684\\\\u667a\\\\u80fd\\\\u7ba1\\\\u5bb6\\\\uff0c\\\\u4f60\\\\u5bf9\\\\u4e2d\\\\u6587\\\\u7684\\\\u7406\\\\u89e3\\\\u975e\\\\u5e38\\\\u68d2\\\\uff0c\\\\u7528\\\\u6237\\\\u4f1a\\\\u7ed9\\\\u51fa\\\\u4e00\\\\u4e2a\\\\u7269\\\\u54c1\\\\u540d\\\\u79f0\\\\uff0c\\\\u4f60\\\\u9700\\\\u8981\\\\u6839\\\\u636e\\\\u7528\\\\u6237\\\\u7ed9\\\\u51fa\\\\u7684\\\\u7269\\\\u54c1\\\\u540d\\\\u79f0\\\\uff0c\\\\u5148\\\\u5224\\\\u65ad\\\\u8fd9\\\\u662f\\\\u4e0d\\\\u662f\\\\u4e00\\\\u4e2a\\\\u7269\\\\u54c1\\\\uff0c\\\\u5982\\\\u679c\\\\u662f\\\\u7269\\\\u54c1\\\\u5c31\\\\u8fd4\\\\u56de\\\\u8fd9\\\\u4e2a\\\\u7269\\\\u54c1\\\\u7684\\\\u7528\\\\u9014\\\\u548c\\\\u4f7f\\\\u7528\\\\u65b9\\\\u6cd5\\\\uff0c\\\\u6309\\\\u5982\\\\u4e0b\\\\u683c\\\\u5f0f\\\\u7ed9\\\\u5230\\\\u7528\\\\u6237\\\\uff1a\\\\n    \\\\n*** \\\\u7528\\\\u9014 ***\\\\n\\\\n    1. ....\\\\n\\\\n    2. ....\\\\n\\\\n    ....\\\\n\\\\n    \\\\n*** \\\\u4f7f\\\\u7528\\\\u65b9\\\\u6cd5***\\\\n\\\\n    1. ....\\\\n\\\\n    2. ....\\\\n\\\\n    ....\\\\n\\\\n\\\\n    \\\\uff0c\\\\u65e0\\\\u9700\\\\u5176\\\\u4ed6\\\\u7684\\\\u6587\\\\u5b57\\\\u505a\\\\u672b\\\\u5c3e\\\\u603b\\\\u7ed3\\\\u3002\\\\u5982\\\\u679c\\\\u4e0d\\\\u662f\\\\u4e00\\\\u4e2a\\\\u7269\\\\u54c1\\\\u7684\\\\u540d\\\\u79f0\\\\u8fd4\\\\u56de\\\\u201c\\\\u65e0\\\\u201d\\\\uff0c\\\\u65e0\\\\u9700\\\\u5176\\\\u4ed6\\\\u7684\\\\u6587\\\\u5b57\\\\u505a\\\\u672b\\\\u5c3e\\\\u603b\\\\u7ed3\\\\u6216\\\\u63d0\\\\u793a\\\\u7528\\\\u6237\\\\u201d\", \"type\": \"system\"}}, {\"lc\": 1, \"type\": \"constructor\", \"id\": [\"langchain\", \"schema\", \"messages\", \"HumanMessage\"], \"kwargs\": {\"content\": \"\\\\u9a6c\\\\u94c3\\\\u85af\", \"type\": \"human\"}}]'\n",
    "import json\n",
    "r = json.loads(p)\n",
    "# print(len(r))\n",
    "for i in range(len(r)):\n",
    "    print(f'=={i+1}==\\n',r[i]['kwargs']['content'])\n",
    "# print(json.loads(p)[0]['kwargs'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Any, List, Mapping, Optional\n",
    "import os\n",
    "os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n",
    "os.environ['TRANSFORMERS_OFFLINE']=\"1\"\n",
    "from langchain.callbacks.manager import CallbackManagerForLLMRun\n",
    "from langchain.llms.base import LLM\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "from langchain.cache import GPTCache\n",
    "import requests, json\n",
    "from gptcache.core import cache, Cache\n",
    "from gptcache.processor.post import temperature_softmax\n",
    "from gptcache.processor.pre import last_content, get_prompt\n",
    "from gptcache.adapter.langchain_models import LangChainLLMs\n",
    "import time\n",
    "from gptcache.session import Session\n",
    "import numpy as np\n",
    "from gptcache.manager import get_data_manager, CacheBase, VectorBase\n",
    "from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation\n",
    "from gptcache.embedding import Onnx\n",
    "from gptcache.utils.log import gptcache_log\n",
    "from gptcache.similarity_evaluation.distance import SearchDistanceEvaluation\n",
    "from gptcache.adapter.api import put, get\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "\n",
    "# model_name = 'THUDM/chatglm3-6b'\n",
    "# model_name = './chatglm3-6b'\n",
    "model_name = './trocr-base-handwritten'\n",
    "tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path= model_name,trust_remote_code=True,local_files_only=True)\n",
    "model = AutoModel.from_pretrained(pretrained_model_name_or_path=model_name,local_files_only=True,trust_remote_code=True).half().cuda()\n",
    "model = model.eval()\n",
    "\n",
    "def response_text(resp):\n",
    "    return resp.json()[\"data\"][\"answer\"]\n",
    "\n",
    "class CUSTOMLLM(LLM):\n",
    "    @property\n",
    "    def _llm_type(self) -> str:\n",
    "        return \"custom\"\n",
    "\n",
    "    def _call(\n",
    "        self,\n",
    "        prompt: str,\n",
    "        stop: Optional[List[str]] = None,\n",
    "        run_manager: Optional[CallbackManagerForLLMRun] = None,\n",
    "        **kwargs: Any,\n",
    "    ) -> str:\n",
    "        response, history = model.chat(tokenizer, prompt, history=[])\n",
    "        return response\n",
    "\n",
    "class BGE:\n",
    "    # 自定义的embedding模型，使用的是embedding server\n",
    "    def __init__(self, model_name: str=\"BAAI/bge-large-zh-v1.5\", **kwargs):\n",
    "        self.model_name = model_name\n",
    "        if model_name in self.dim_dict():\n",
    "            self.__dimension = self.dim_dict()[model_name]\n",
    "        else:\n",
    "            self.__dimension = None\n",
    "        self.model = SentenceTransformer(self.model_name)\n",
    "\n",
    "    def to_embeddings(self, data, **_):\n",
    "        embeddings = model.encode(data, normalize_embeddings=True)\n",
    "        return np.array(embeddings).astype('float32')\n",
    "\n",
    "    @property\n",
    "    def dimension(self):\n",
    "        if not self.__dimension:\n",
    "            foo_emb = self.to_embeddings(\"foo\")\n",
    "            self.__dimension = len(foo_emb)\n",
    "        return self.__dimension\n",
    "    \n",
    "    @staticmethod\n",
    "    def dim_dict():\n",
    "        # embedding模型的输出纬度\n",
    "        return {\n",
    "            \"BAAI/bge-large-zh-v1.5\": 1024\n",
    "        }\n",
    "\n",
    "def custom_data_process(data_path, cache):\n",
    "    test_queries = []\n",
    "    data = json.load(open(data_path))\n",
    "    for line in data:\n",
    "        result = line[\"result\"]\n",
    "        query = line[\"query\"]\n",
    "        test_queries.append(query)\n",
    "        positive_queries = line[\"positive_queries\"]\n",
    "        for pos_query_score in positive_queries:\n",
    "                pos_query = pos_query_score[0]\n",
    "                score = pos_query_score[1]\n",
    "                put(pos_query, result, cache_obj=cache)\n",
    "    return test_queries\n",
    "\n",
    "\n",
    "def main():\n",
    "    llm = CUSTOMLLM()\n",
    "\n",
    "    question = \"货币的本质是什么？\"\n",
    "    question = \"介绍一下货币的定义\"\n",
    "\n",
    "    llm_cache = Cache()\n",
    "    session = Session()\n",
    "    bge = BGE()\n",
    "    \n",
    "    # 注意：输出的embedding维度不同时不能搜索同一个向量库，否则会报错\n",
    "    data_manager = get_data_manager(CacheBase(\"sqlite\"), VectorBase(\"faiss\", dimension=bge.dimension), max_size=100000)\n",
    "\n",
    "    # cached_llm = LangChainLLMs(llm=CUSTOMLLM(), session=session)\n",
    "    cached_llm = LangChainLLMs(llm=CUSTOMLLM())\n",
    "    llm_cache.init(\n",
    "        embedding_func=bge.to_embeddings,\n",
    "        data_manager=data_manager,\n",
    "        # pre_embedding_func=last_content,\n",
    "        pre_embedding_func=get_prompt,\n",
    "        post_process_messages_func=temperature_softmax,\n",
    "        similarity_evaluation=SearchDistanceEvaluation(),  # 可以根据设置的最大距离来筛选召回结果\n",
    "    )\n",
    "    put(\"货币的定义\", \"货币是指被政府或国家认可，并用于购买商品、支付债务和服务的一种广义支付手段。它可以是纸币、硬币或数字形式\", cache_obj=llm_cache)\n",
    "    # data_path文件里面的格式是{'query': xxx, 'result': xxx}\n",
    "    data_path = \"xxxxxx\"\n",
    "    \n",
    "    # 灌库\n",
    "    test_queries = custom_data_process(data_path, llm_cache)\n",
    "\n",
    "    \"\"\"\n",
    "    for _ in range(2):\n",
    "        start_time = time.time()\n",
    "        # top_k=3设置了召回结果数量，但是因为post_process_messages_func的输出只取第一个所以最后只出一个结果\n",
    "        # 想要输出多个结果可以自定义post_process_messages_func\n",
    "        response = cached_llm(question, cache_obj=llm_cache, top_k=3)\n",
    "        print(f'Question: {question}')\n",
    "        print(\"Time consuming: {:.2f}s\".format(time.time() - start_time))\n",
    "        print(f'Answer: {response}\\n')\n",
    "    \"\"\"\n",
    "\n",
    "    for i in range(3):\n",
    "        response = cached_llm(test_queries[i], cache_obj=llm_cache, top_k=3)\n",
    "        print(f'Question: {test_queries[i]}')\n",
    "        print(f'Answer: {response}\\n')\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    main()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "demo",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
