{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e39ef2c8-e8c6-429e-87a6-c350fe7035d1",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T05:05:51.830327Z",
     "iopub.status.busy": "2024-01-15T05:05:51.829959Z",
     "iopub.status.idle": "2024-01-15T05:06:00.088306Z",
     "shell.execute_reply": "2024-01-15T05:06:00.087722Z",
     "shell.execute_reply.started": "2024-01-15T05:05:51.830303Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Collecting package metadata (repodata.json): | ERROR conda.auxlib.logz:stringify(171): Expecting value: line 1 column 1 (char 0)\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 971, in json\n",
      "    return complexjson.loads(self.text, **kwargs)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/__init__.py\", line 514, in loads\n",
      "    return _default_decoder.decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 386, in decode\n",
      "    obj, end = self.raw_decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 416, in raw_decode\n",
      "    return self.scan_once(s, idx=_w(s, idx).end())\n",
      "simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 165, in stringify\n",
      "    requests_models_Response_builder(builder, obj)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 141, in requests_models_Response_builder\n",
      "    resp = response_object.json()\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 975, in json\n",
      "    raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)\n",
      "requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "ERROR conda.auxlib.logz:stringify(171): Expecting value: line 1 column 1 (char 0)\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 971, in json\n",
      "    return complexjson.loads(self.text, **kwargs)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/__init__.py\", line 514, in loads\n",
      "    return _default_decoder.decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 386, in decode\n",
      "    obj, end = self.raw_decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 416, in raw_decode\n",
      "    return self.scan_once(s, idx=_w(s, idx).end())\n",
      "simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 165, in stringify\n",
      "    requests_models_Response_builder(builder, obj)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 141, in requests_models_Response_builder\n",
      "    resp = response_object.json()\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 975, in json\n",
      "    raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)\n",
      "requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "ERROR conda.auxlib.logz:stringify(171): Expecting value: line 1 column 1 (char 0)\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 971, in json\n",
      "    return complexjson.loads(self.text, **kwargs)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/__init__.py\", line 514, in loads\n",
      "    return _default_decoder.decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 386, in decode\n",
      "    obj, end = self.raw_decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 416, in raw_decode\n",
      "    return self.scan_once(s, idx=_w(s, idx).end())\n",
      "simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 165, in stringify\n",
      "    requests_models_Response_builder(builder, obj)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 141, in requests_models_Response_builder\n",
      "    resp = response_object.json()\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 975, in json\n",
      "    raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)\n",
      "requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "ERROR conda.auxlib.logz:stringify(171): Expecting value: line 1 column 1 (char 0)\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 971, in json\n",
      "    return complexjson.loads(self.text, **kwargs)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/__init__.py\", line 514, in loads\n",
      "    return _default_decoder.decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 386, in decode\n",
      "    obj, end = self.raw_decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 416, in raw_decode\n",
      "    return self.scan_once(s, idx=_w(s, idx).end())\n",
      "simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 165, in stringify\n",
      "    requests_models_Response_builder(builder, obj)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 141, in requests_models_Response_builder\n",
      "    resp = response_object.json()\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 975, in json\n",
      "    raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)\n",
      "requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "ERROR conda.auxlib.logz:stringify(171): Expecting value: line 1 column 1 (char 0)\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 971, in json\n",
      "    return complexjson.loads(self.text, **kwargs)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/__init__.py\", line 514, in loads\n",
      "    return _default_decoder.decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 386, in decode\n",
      "    obj, end = self.raw_decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 416, in raw_decode\n",
      "    return self.scan_once(s, idx=_w(s, idx).end())\n",
      "simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 165, in stringify\n",
      "    requests_models_Response_builder(builder, obj)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 141, in requests_models_Response_builder\n",
      "    resp = response_object.json()\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 975, in json\n",
      "    raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)\n",
      "requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "ERROR conda.auxlib.logz:stringify(171): Expecting value: line 1 column 1 (char 0)\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 971, in json\n",
      "    return complexjson.loads(self.text, **kwargs)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/__init__.py\", line 514, in loads\n",
      "    return _default_decoder.decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 386, in decode\n",
      "    obj, end = self.raw_decode(s)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/simplejson/decoder.py\", line 416, in raw_decode\n",
      "    return self.scan_once(s, idx=_w(s, idx).end())\n",
      "simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 165, in stringify\n",
      "    requests_models_Response_builder(builder, obj)\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/conda/auxlib/logz.py\", line 141, in requests_models_Response_builder\n",
      "    resp = response_object.json()\n",
      "  File \"/opt/conda/lib/python3.10/site-packages/requests/models.py\", line 975, in json\n",
      "    raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)\n",
      "requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n",
      "done\n",
      "Solving environment: done\n",
      "\n",
      "Downloading and Extracting Packages:\n",
      "\n",
      "Preparing transaction: done\n",
      "Verifying transaction: done\n",
      "Executing transaction: done\n",
      "#\n",
      "# To activate this environment, use\n",
      "#\n",
      "#     $ conda activate chatglm\n",
      "#\n",
      "# To deactivate an active environment, use\n",
      "#\n",
      "#     $ conda deactivate\n",
      "\n"
     ]
    }
   ],
   "source": [
    "!conda env create -f env.yml"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "614dbdee-721f-40d8-9ecc-129ac8d59d41",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:06:07.202950Z",
     "iopub.status.busy": "2024-01-15T05:06:07.202619Z",
     "iopub.status.idle": "2024-01-15T05:06:07.822608Z",
     "shell.execute_reply": "2024-01-15T05:06:07.822069Z",
     "shell.execute_reply.started": "2024-01-15T05:06:07.202929Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# conda environments:\n",
      "#\n",
      "base                     /opt/conda\n",
      "chatglm                  /opt/conda/envs/chatglm\n",
      "\n"
     ]
    }
   ],
   "source": [
    "!conda env list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "8edee3e7-9a69-44e4-a7b8-cbe397146e97",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T05:06:10.425306Z",
     "iopub.status.busy": "2024-01-15T05:06:10.424980Z",
     "iopub.status.idle": "2024-01-15T05:06:10.674394Z",
     "shell.execute_reply": "2024-01-15T05:06:10.673805Z",
     "shell.execute_reply.started": "2024-01-15T05:06:10.425287Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "!activate chatglm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "d328514e-c495-4645-ab6e-b6e49396e855",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T05:06:13.032889Z",
     "iopub.status.busy": "2024-01-15T05:06:13.032571Z",
     "iopub.status.idle": "2024-01-15T05:06:13.194508Z",
     "shell.execute_reply": "2024-01-15T05:06:13.194005Z",
     "shell.execute_reply.started": "2024-01-15T05:06:13.032869Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Python 3.10.13\n"
     ]
    }
   ],
   "source": [
    "!python --version"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "0333c2c4-d0b8-401a-9e13-db157a974fc7",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:06:59.884163Z",
     "iopub.status.busy": "2024-01-15T05:06:59.883757Z",
     "iopub.status.idle": "2024-01-15T05:06:59.887359Z",
     "shell.execute_reply": "2024-01-15T05:06:59.886896Z",
     "shell.execute_reply.started": "2024-01-15T05:06:59.884119Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'2.1.0+cpu'"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "torch.__version__"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8a5ecf18-5c97-4a98-8f6e-bbcc80e9ccf7",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:07:32.814669Z",
     "iopub.status.busy": "2024-01-15T05:07:32.814340Z",
     "iopub.status.idle": "2024-01-15T05:07:37.377242Z",
     "shell.execute_reply": "2024-01-15T05:07:37.376264Z",
     "shell.execute_reply.started": "2024-01-15T05:07:32.814650Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "获取:1 http://mirrors.aliyun.com/ubuntu jammy InRelease [270 kB]\n",
      "获取:2 http://mirrors.aliyun.com/ubuntu jammy-security InRelease [110 kB]\n",
      "获取:3 http://mirrors.aliyun.com/ubuntu jammy-updates InRelease [119 kB]\n",
      "获取:4 http://mirrors.aliyun.com/ubuntu jammy-backports InRelease [109 kB]\n",
      "获取:5 http://mirrors.aliyun.com/ubuntu jammy/multiverse amd64 Packages [266 kB]\n",
      "获取:6 http://mirrors.aliyun.com/ubuntu jammy/universe amd64 Packages [17.5 MB]\n",
      "获取:7 http://mirrors.aliyun.com/ubuntu jammy/restricted amd64 Packages [164 kB]\n",
      "获取:8 http://mirrors.aliyun.com/ubuntu jammy/main amd64 Packages [1,792 kB]\n",
      "获取:9 http://mirrors.aliyun.com/ubuntu jammy-security/main amd64 Packages [1,340 kB]\n",
      "获取:10 http://mirrors.aliyun.com/ubuntu jammy-security/multiverse amd64 Packages [44.6 kB]\n",
      "获取:11 http://mirrors.aliyun.com/ubuntu jammy-security/restricted amd64 Packages [1,595 kB]\n",
      "获取:12 http://mirrors.aliyun.com/ubuntu jammy-security/universe amd64 Packages [1,049 kB]\n",
      "获取:13 http://mirrors.aliyun.com/ubuntu jammy-updates/multiverse amd64 Packages [50.4 kB]\n",
      "获取:14 http://mirrors.aliyun.com/ubuntu jammy-updates/restricted amd64 Packages [1,631 kB]\n",
      "获取:15 http://mirrors.aliyun.com/ubuntu jammy-updates/universe amd64 Packages [1,309 kB]\n",
      "获取:16 http://mirrors.aliyun.com/ubuntu jammy-updates/main amd64 Packages [1,617 kB]\n",
      "获取:17 http://mirrors.aliyun.com/ubuntu jammy-backports/main amd64 Packages [50.4 kB]\n",
      "获取:18 http://mirrors.aliyun.com/ubuntu jammy-backports/universe amd64 Packages [28.1 kB]\n",
      "已下载 29.0 MB，耗时 1秒 (22.9 MB/s)                               \n",
      "正在读取软件包列表... 完成%\n",
      "正在读取软件包列表... 完成%\n",
      "正在分析软件包的依赖关系树... 完成%\n",
      "正在读取状态信息... 完成                   \n",
      "g++ 已经是最新版 (4:11.2.0-1ubuntu1)。\n",
      "g++ 已设置为手动安装。\n",
      "将会同时安装下列软件：\n",
      "  cmake-data dh-elpa-helper emacsen-common libjsoncpp25 librhash0 libuv1\n",
      "建议安装：\n",
      "  cmake-doc cmake-format\n",
      "下列【新】软件包将被安装：\n",
      "  cmake cmake-data dh-elpa-helper emacsen-common libjsoncpp25 librhash0 libuv1\n",
      "升级了 0 个软件包，新安装了 7 个软件包， 要卸载 0 个软件包，有 53 个软件包未被升级。\n",
      "需要下载 7,246 kB 的归档。\n",
      "解压缩后会消耗 32.2 MB 的额外空间。\n",
      "获取:1 http://mirrors.aliyun.com/ubuntu jammy/main amd64 libuv1 amd64 1.43.0-1 [93.1 kB]\n",
      "获取:2 http://mirrors.aliyun.com/ubuntu jammy/main amd64 libjsoncpp25 amd64 1.9.5-3 [80.0 kB]\n",
      "获取:3 http://mirrors.aliyun.com/ubuntu jammy/main amd64 librhash0 amd64 1.4.2-1ubuntu1 [125 kB]\n",
      "获取:4 http://mirrors.aliyun.com/ubuntu jammy/main amd64 dh-elpa-helper all 2.0.9ubuntu1 [7,610 B]\n",
      "获取:5 http://mirrors.aliyun.com/ubuntu jammy/main amd64 emacsen-common all 3.0.4 [14.9 kB]\n",
      "获取:6 http://mirrors.aliyun.com/ubuntu jammy-updates/main amd64 cmake-data all 3.22.1-1ubuntu1.22.04.1 [1,913 kB]\n",
      "获取:7 http://mirrors.aliyun.com/ubuntu jammy-updates/main amd64 cmake amd64 3.22.1-1ubuntu1.22.04.1 [5,013 kB]\n",
      "已下载 7,246 kB，耗时 0秒 (18.9 MB/s) \n",
      "debconf: 无法初始化前端界面：Dialog\n",
      "debconf: (没有安装任何可用的对话框类程序，所以无法使用基于此种形式的界面。 at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 78, <> line 7.)\n",
      "debconf: 返回前端界面：Readline\n",
      "正在选中未选择的软件包 libuv1:amd64。\n",
      "(正在读取数据库 ... 系统当前共安装有 35520 个文件和目录。)\n",
      "准备解压 .../0-libuv1_1.43.0-1_amd64.deb  ...\n",
      "正在解压 libuv1:amd64 (1.43.0-1) ...\n",
      "正在选中未选择的软件包 libjsoncpp25:amd64。\n",
      "准备解压 .../1-libjsoncpp25_1.9.5-3_amd64.deb  ...\n",
      "正在解压 libjsoncpp25:amd64 (1.9.5-3) ...\n",
      "正在选中未选择的软件包 librhash0:amd64。\n",
      "准备解压 .../2-librhash0_1.4.2-1ubuntu1_amd64.deb  ...\n",
      "正在解压 librhash0:amd64 (1.4.2-1ubuntu1) ...\n",
      "正在选中未选择的软件包 dh-elpa-helper。\n",
      "准备解压 .../3-dh-elpa-helper_2.0.9ubuntu1_all.deb  ...\n",
      "正在解压 dh-elpa-helper (2.0.9ubuntu1) ...\n",
      "正在选中未选择的软件包 emacsen-common。\n",
      "准备解压 .../4-emacsen-common_3.0.4_all.deb  ...\n",
      "正在解压 emacsen-common (3.0.4) ...\n",
      "正在选中未选择的软件包 cmake-data。\n",
      "准备解压 .../5-cmake-data_3.22.1-1ubuntu1.22.04.1_all.deb  ...\n",
      "正在解压 cmake-data (3.22.1-1ubuntu1.22.04.1) ...\n",
      "正在选中未选择的软件包 cmake。\n",
      "准备解压 .../6-cmake_3.22.1-1ubuntu1.22.04.1_amd64.deb  ...\n",
      "正在解压 cmake (3.22.1-1ubuntu1.22.04.1) ...\n",
      "正在设置 libuv1:amd64 (1.43.0-1) ...\n",
      "正在设置 emacsen-common (3.0.4) ...\n",
      "正在设置 dh-elpa-helper (2.0.9ubuntu1) ...\n",
      "正在设置 libjsoncpp25:amd64 (1.9.5-3) ...\n",
      "正在设置 librhash0:amd64 (1.4.2-1ubuntu1) ...\n",
      "正在设置 cmake-data (3.22.1-1ubuntu1.22.04.1) ...\n",
      "正在设置 cmake (3.22.1-1ubuntu1.22.04.1) ...\n",
      "正在处理用于 libc-bin (2.35-0ubuntu3.4) 的触发器 ...\n"
     ]
    }
   ],
   "source": [
    "!apt-get update && apt-get -y install g++ cmake"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "28f37cb9-85d4-489a-bfb6-e7969b47b7a0",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T05:07:45.853631Z",
     "iopub.status.busy": "2024-01-15T05:07:45.853296Z",
     "iopub.status.idle": "2024-01-15T05:07:46.025574Z",
     "shell.execute_reply": "2024-01-15T05:07:46.024960Z",
     "shell.execute_reply.started": "2024-01-15T05:07:45.853611Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cmake version 3.22.1\n",
      "\n",
      "CMake suite maintained and supported by Kitware (kitware.com/cmake).\n"
     ]
    }
   ],
   "source": [
    "!cmake --version"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "6f7dbb26-f413-4d7e-a87b-a299d511236a",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:06:45.258888Z",
     "iopub.status.busy": "2024-01-15T05:06:45.258510Z",
     "iopub.status.idle": "2024-01-15T05:06:45.800554Z",
     "shell.execute_reply": "2024-01-15T05:06:45.799948Z",
     "shell.execute_reply.started": "2024-01-15T05:06:45.258868Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Installed kernelspec chatglm3-demo in /root/.local/share/jupyter/kernels/chatglm3-demo\n"
     ]
    }
   ],
   "source": [
    "!ipython kernel install --name chatglm3-demo --user"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "60fb539d-7cdd-45f1-9896-c11ff4a53d7f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:07:56.913208Z",
     "iopub.status.busy": "2024-01-15T05:07:56.912817Z",
     "iopub.status.idle": "2024-01-15T05:07:57.102437Z",
     "shell.execute_reply": "2024-01-15T05:07:57.101871Z",
     "shell.execute_reply.started": "2024-01-15T05:07:56.913183Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Updated Git hooks.\n",
      "Git LFS initialized.\n"
     ]
    }
   ],
   "source": [
    "!git lfs install"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "09373e97-64ec-4359-9e8f-16d7a93bc0d2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:08:04.214419Z",
     "iopub.status.busy": "2024-01-15T05:08:04.214051Z",
     "iopub.status.idle": "2024-01-15T05:09:15.151546Z",
     "shell.execute_reply": "2024-01-15T05:09:15.150977Z",
     "shell.execute_reply.started": "2024-01-15T05:08:04.214397Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正克隆到 'chatglm3-6b'...\n",
      "remote: Enumerating objects: 119, done.\u001b[K\n",
      "remote: Counting objects: 100% (18/18), done.\u001b[K\n",
      "remote: Compressing objects: 100% (18/18), done.\u001b[K\n",
      "remote: Total 119 (delta 3), reused 0 (delta 0), pack-reused 101\u001b[K\n",
      "接收对象中: 100% (119/119), 58.99 KiB | 8.43 MiB/s, 完成.\n",
      "处理 delta 中: 100% (45/45), 完成.\n",
      "过滤内容: 100% (15/15), 23.26 GiB | 339.82 MiB/s, 完成.\n"
     ]
    }
   ],
   "source": [
    "!git clone https://www.modelscope.cn/ZhipuAI/chatglm3-6b.git\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "63f8eb4b-d5cb-4fe2-940f-acbeccb9a2b4",
   "metadata": {
    "ExecutionIndicator": {
     "show": false
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T05:09:21.495865Z",
     "iopub.status.busy": "2024-01-15T05:09:21.495530Z",
     "iopub.status.idle": "2024-01-15T05:11:01.170212Z",
     "shell.execute_reply": "2024-01-15T05:11:01.169690Z",
     "shell.execute_reply.started": "2024-01-15T05:09:21.495844Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正克隆到 'chatglm.cpp'...\n",
      "remote: Enumerating objects: 589, done.\u001b[K\n",
      "remote: Counting objects: 100% (589/589), done.\u001b[K\n",
      "remote: Compressing objects: 100% (226/226), done.\u001b[K\n",
      "remote: Total 589 (delta 342), reused 589 (delta 342), pack-reused 0\u001b[K\n",
      "接收对象中: 100% (589/589), 1.44 MiB | 990.00 KiB/s, 完成.\n",
      "处理 delta 中: 100% (342/342), 完成.\n",
      "子模组 'third_party/ggml'（https://github.com/ggerganov/ggml.git）已对路径 'third_party/ggml' 注册\n",
      "子模组 'third_party/pybind11'（https://github.com/pybind/pybind11.git）已对路径 'third_party/pybind11' 注册\n",
      "子模组 'third_party/sentencepiece'（https://github.com/google/sentencepiece.git）已对路径 'third_party/sentencepiece' 注册\n",
      "正克隆到 '/mnt/workspace/my_chatglm/chatglm.cpp/third_party/ggml'...\n",
      "remote: Enumerating objects: 5678, done.        \n",
      "remote: Counting objects: 100% (1371/1371), done.        \n",
      "remote: Compressing objects: 100% (305/305), done.        \n",
      "remote: Total 5678 (delta 1159), reused 1194 (delta 1031), pack-reused 4307        \n",
      "接收对象中: 100% (5678/5678), 6.64 MiB | 4.72 MiB/s, 完成.\n",
      "处理 delta 中: 100% (3514/3514), 完成.\n",
      "正克隆到 '/mnt/workspace/my_chatglm/chatglm.cpp/third_party/pybind11'...\n",
      "remote: Enumerating objects: 27512, done.        \n",
      "remote: Counting objects: 100% (263/263), done.        \n",
      "remote: Compressing objects: 100% (118/118), done.        \n",
      "remote: Total 27512 (delta 148), reused 206 (delta 124), pack-reused 27249        \n",
      "接收对象中: 100% (27512/27512), 10.50 MiB | 6.75 MiB/s, 完成.\n",
      "处理 delta 中: 100% (19425/19425), 完成.\n",
      "正克隆到 '/mnt/workspace/my_chatglm/chatglm.cpp/third_party/sentencepiece'...\n",
      "remote: Enumerating objects: 4994, done.        \n",
      "remote: Counting objects: 100% (2026/2026), done.        \n",
      "remote: Compressing objects: 100% (276/276), done.        \n",
      "remote: Total 4994 (delta 1790), reused 1802 (delta 1744), pack-reused 2968        \n",
      "接收对象中: 100% (4994/4994), 26.77 MiB | 12.13 MiB/s, 完成.\n",
      "处理 delta 中: 100% (3443/3443), 完成.\n",
      "子模组路径 'third_party/ggml'：检出 '6549d12f2e3176050040a86334f17c001e170f13'\n",
      "子模组路径 'third_party/pybind11'：检出 '8b03ffa7c06cd9c8a38297b1c8923695d1ff1b07'\n",
      "子模组路径 'third_party/sentencepiece'：检出 '635fe8423a249b6e081aacd290d8aef7476c6a28'\n"
     ]
    }
   ],
   "source": [
    "!git clone --recursive https://gitee.com/wfhe/chatglm.cpp.git"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "918ce34b-6c42-4068-9e7a-cab14feef39f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:16:22.357046Z",
     "iopub.status.busy": "2024-01-15T05:16:22.356715Z",
     "iopub.status.idle": "2024-01-15T05:18:04.860852Z",
     "shell.execute_reply": "2024-01-15T05:18:04.860268Z",
     "shell.execute_reply.started": "2024-01-15T05:16:22.357026Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:00<00:00, 18.74it/s]\n",
      "Processing model states: 100%|████████████████| 199/199 [01:39<00:00,  2.01it/s]\n",
      "+---------------------------------------------------------------------+---------------------------+---------+\n",
      "| name                                                                | shape                     | dtype   |\n",
      "|---------------------------------------------------------------------+---------------------------+---------|\n",
      "| transformer.embedding.word_embeddings.weight                        | torch.Size([65024, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.0.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.0.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.0.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.0.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.0.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.0.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.0.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.1.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.1.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.1.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.1.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.1.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.1.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.1.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.2.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.2.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.2.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.2.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.2.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.2.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.2.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.3.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.3.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.3.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.3.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.3.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.3.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.3.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.4.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.4.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.4.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.4.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.4.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.4.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.4.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.5.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.5.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.5.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.5.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.5.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.5.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.5.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.6.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.6.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.6.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.6.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.6.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.6.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.6.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.7.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.7.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.7.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.7.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.7.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.7.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.7.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.8.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.8.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.8.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.8.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.8.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.8.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.8.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.9.input_layernorm.weight                 | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.9.self_attention.query_key_value.weight  | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.9.self_attention.query_key_value.bias    | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.9.self_attention.dense.weight            | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.9.post_attention_layernorm.weight        | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.9.mlp.dense_h_to_4h.weight               | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.9.mlp.dense_4h_to_h.weight               | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.10.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.10.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.10.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.10.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.10.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.10.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.10.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.11.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.11.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.11.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.11.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.11.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.11.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.11.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.12.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.12.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.12.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.12.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.12.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.12.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.12.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.13.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.13.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.13.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.13.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.13.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.13.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.13.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.14.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.14.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.14.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.14.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.14.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.14.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.14.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.15.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.15.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.15.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.15.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.15.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.15.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.15.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.16.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.16.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.16.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.16.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.16.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.16.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.16.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.17.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.17.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.17.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.17.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.17.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.17.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.17.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.18.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.18.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.18.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.18.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.18.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.18.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.18.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.19.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.19.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.19.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.19.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.19.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.19.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.19.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.20.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.20.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.20.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.20.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.20.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.20.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.20.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.21.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.21.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.21.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.21.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.21.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.21.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.21.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.22.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.22.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.22.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.22.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.22.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.22.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.22.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.23.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.23.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.23.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.23.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.23.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.23.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.23.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.24.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.24.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.24.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.24.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.24.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.24.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.24.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.25.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.25.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.25.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.25.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.25.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.25.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.25.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.26.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.26.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.26.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.26.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.26.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.26.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.26.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.layers.27.input_layernorm.weight                | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.27.self_attention.query_key_value.weight | torch.Size([4608, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.27.self_attention.query_key_value.bias   | torch.Size([4608])        | F32     |\n",
      "| transformer.encoder.layers.27.self_attention.dense.weight           | torch.Size([4096, 4096])  | Q4_0    |\n",
      "| transformer.encoder.layers.27.post_attention_layernorm.weight       | torch.Size([4096])        | F32     |\n",
      "| transformer.encoder.layers.27.mlp.dense_h_to_4h.weight              | torch.Size([27392, 4096]) | Q4_0    |\n",
      "| transformer.encoder.layers.27.mlp.dense_4h_to_h.weight              | torch.Size([4096, 13696]) | Q4_0    |\n",
      "| transformer.encoder.final_layernorm.weight                          | torch.Size([4096])        | F32     |\n",
      "| transformer.output_layer.weight                                     | torch.Size([65024, 4096]) | Q4_0    |\n",
      "+---------------------------------------------------------------------+---------------------------+---------+\n",
      "GGML model saved to chatglm-ggml.bin\n"
     ]
    }
   ],
   "source": [
    "!python chatglm.cpp/chatglm_cpp/convert.py -i chatglm3-6b -t q4_0 -o ./chatglm-ggml.bin"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "ca1c0f26-60ff-4778-82d7-a3be96d9104d",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:18:15.249118Z",
     "iopub.status.busy": "2024-01-15T05:18:15.248772Z",
     "iopub.status.idle": "2024-01-15T05:19:10.162381Z",
     "shell.execute_reply": "2024-01-15T05:19:10.161796Z",
     "shell.execute_reply.started": "2024-01-15T05:18:15.249096Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple\n",
      "Processing ./chatglm.cpp\n",
      "  Installing build dependencies ... \u001b[?25ldone\n",
      "\u001b[?25h  Getting requirements to build wheel ... \u001b[?25ldone\n",
      "\u001b[?25h  Installing backend dependencies ... \u001b[?25ldone\n",
      "\u001b[?25h  Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
      "\u001b[?25hBuilding wheels for collected packages: chatglm-cpp\n",
      "  Building wheel for chatglm-cpp (pyproject.toml) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for chatglm-cpp: filename=chatglm_cpp-0.3.1.dev0-cp310-cp310-linux_x86_64.whl size=824201 sha256=31ac3f8f0d819ee939b7db8e6226da37f9221b68c278b5c15638dfb12a29c98c\n",
      "  Stored in directory: /tmp/pip-ephem-wheel-cache-fk5wt7k4/wheels/6c/45/99/ba12893a810b018cdce4b561c7ac5d52ee66f7d7a404709f37\n",
      "Successfully built chatglm-cpp\n",
      "\u001b[33mDEPRECATION: omegaconf 2.0.6 has a non-standard dependency specifier PyYAML>=5.1.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of omegaconf or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n",
      "\u001b[0m\u001b[33mDEPRECATION: pytorch-lightning 1.7.7 has a non-standard dependency specifier torch>=1.9.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n",
      "\u001b[0mInstalling collected packages: chatglm-cpp\n",
      "Successfully installed chatglm-cpp-0.3.1.dev0\n",
      "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
      "\u001b[0m\n",
      "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.3.2\u001b[0m\n",
      "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "!pip install ./chatglm.cpp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a1f627fb-2542-46cc-b0f0-23a2a54562fe",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T05:20:31.302998Z",
     "iopub.status.busy": "2024-01-15T05:20:31.302672Z",
     "iopub.status.idle": "2024-01-15T05:20:55.611137Z",
     "shell.execute_reply": "2024-01-15T05:20:55.610675Z",
     "shell.execute_reply.started": "2024-01-15T05:20:31.302978Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ChatMessage(role=\"assistant\", content=\"好的，我将为你绘制一个简单的圆形。Python的 `turtle` 模块可以帮助我们绘制图形。以下是代码：\n",
       "\n",
       "```python\n",
       "import turtle\n",
       "\n",
       "# 创建一个新的画布\n",
       "turtle.setup(800, 600)\n",
       "\n",
       "# 创建一个名为“my_turtle”的画笔\n",
       "turtle.Turtle()\n",
       "turtle.speed(0)  # 设置画笔的速度为最慢\n",
       "\n",
       "# 开始绘制圆形\n",
       "turtle.circle(50)\n",
       "\n",
       "# 保持画笔在原点，然后绘制另一个圆形\n",
       "turtle.penup()\n",
       "turtle.goto(-50, 0)\n",
       "turtle.pendown()\n",
       "turtle.circle(50)\n",
       "\n",
       "# 结束绘制，关闭画布\n",
       "turtle.done()\n",
       "```\n",
       "\n",
       "这段代码将会创建一个窗口（画布），并在其中绘制一个红色的圆形。你想要我在这里执行这段代码吗？\", tool_calls=[])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import chatglm_cpp\n",
    "pipeline = chatglm_cpp.Pipeline(\"./chatglm-ggml.bin\", dtype=\"q4_0\")\n",
    "pipeline.chat([chatglm_cpp.ChatMessage(role=\"user\", content=\"用python画一个圆\"), \n",
    "               chatglm_cpp.ChatMessage(role=\"system\", content=\"你是一位智能AI助手，你叫ChatGLM，你连接着一台电脑，但请注意不能联网。在使用Python解决任务时，你可以运行代码并得到结果，如果运行结果有错误，你需要尽可能对代码进行改进。你可以处理用户上传到电脑上的文件，文件默认存储路径是/mnt/workspace\")])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "73b194c5-e2cf-421d-97ad-cceaaa58a9a8",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T05:21:21.446807Z",
     "iopub.status.busy": "2024-01-15T05:21:21.446490Z",
     "iopub.status.idle": "2024-01-15T05:21:25.170738Z",
     "shell.execute_reply": "2024-01-15T05:21:25.170144Z",
     "shell.execute_reply.started": "2024-01-15T05:21:21.446789Z"
    },
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://mirrors.aliyun.com/pypi/simple\n",
      "Collecting uvicorn (from -r requirement.txt (line 1))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/26/59/fddd9df489fe27f492cc97626e03663fb3b9b6ef7ce8597a7cdc5f2cbbad/uvicorn-0.25.0-py3-none-any.whl (60 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.3/60.3 kB\u001b[0m \u001b[31m8.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hCollecting fastapi (from -r requirement.txt (line 2))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/e5/80/ddbf524c6169072ab5e8dd4e106d4eb482bf920da1996dde9f308f90aa8c/fastapi-0.109.0-py3-none-any.whl (92 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.0/92.0 kB\u001b[0m \u001b[31m19.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hCollecting pydantic_settings (from -r requirement.txt (line 3))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/5d/c9/8042368e9a1e6e229b5ec5d88449441a3ee8f8afe09988faeb190af30248/pydantic_settings-2.1.0-py3-none-any.whl (11 kB)\n",
      "Collecting sse_starlette (from -r requirement.txt (line 4))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/34/7e/d6087916bf58a4343459b47807a116a3a755e6ddd4857f375547e00f6252/sse_starlette-1.8.2-py3-none-any.whl (8.9 kB)\n",
      "Requirement already satisfied: click>=7.0 in /opt/conda/lib/python3.10/site-packages (from uvicorn->-r requirement.txt (line 1)) (8.1.7)\n",
      "Collecting h11>=0.8 (from uvicorn->-r requirement.txt (line 1))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl (58 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m20.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hRequirement already satisfied: typing-extensions>=4.0 in /opt/conda/lib/python3.10/site-packages (from uvicorn->-r requirement.txt (line 1)) (4.8.0)\n",
      "Requirement already satisfied: pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4 in /opt/conda/lib/python3.10/site-packages (from fastapi->-r requirement.txt (line 2)) (2.5.2)\n",
      "Collecting starlette<0.36.0,>=0.35.0 (from fastapi->-r requirement.txt (line 2))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/03/13/c60c738da2fb69d60ee1dc5631e8d152352003cc0bc4ce39582bdd90e293/starlette-0.35.1-py3-none-any.whl (71 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.1/71.1 kB\u001b[0m \u001b[31m27.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hCollecting python-dotenv>=0.21.0 (from pydantic_settings->-r requirement.txt (line 3))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/44/2f/62ea1c8b593f4e093cc1a7768f0d46112107e790c3e478532329e434f00b/python_dotenv-1.0.0-py3-none-any.whl (19 kB)\n",
      "Collecting anyio (from sse_starlette->-r requirement.txt (line 4))\n",
      "  Using cached https://mirrors.aliyun.com/pypi/packages/bf/cd/d6d9bb1dadf73e7af02d18225cbd2c93f8552e13130484f1c8dcfece292b/anyio-4.2.0-py3-none-any.whl (85 kB)\n",
      "Requirement already satisfied: annotated-types>=0.4.0 in /opt/conda/lib/python3.10/site-packages (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4->fastapi->-r requirement.txt (line 2)) (0.6.0)\n",
      "Requirement already satisfied: pydantic-core==2.14.5 in /opt/conda/lib/python3.10/site-packages (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4->fastapi->-r requirement.txt (line 2)) (2.14.5)\n",
      "Requirement already satisfied: idna>=2.8 in /opt/conda/lib/python3.10/site-packages (from anyio->sse_starlette->-r requirement.txt (line 4)) (3.4)\n",
      "Collecting sniffio>=1.1 (from anyio->sse_starlette->-r requirement.txt (line 4))\n",
      "  Downloading https://mirrors.aliyun.com/pypi/packages/c3/a0/5dba8ed157b0136607c7f2151db695885606968d1fae123dc3391e0cfdbf/sniffio-1.3.0-py3-none-any.whl (10 kB)\n",
      "Requirement already satisfied: exceptiongroup>=1.0.2 in /opt/conda/lib/python3.10/site-packages (from anyio->sse_starlette->-r requirement.txt (line 4)) (1.2.0)\n",
      "\u001b[33mDEPRECATION: omegaconf 2.0.6 has a non-standard dependency specifier PyYAML>=5.1.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of omegaconf or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n",
      "\u001b[0m\u001b[33mDEPRECATION: pytorch-lightning 1.7.7 has a non-standard dependency specifier torch>=1.9.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n",
      "\u001b[0mInstalling collected packages: sniffio, python-dotenv, h11, uvicorn, anyio, starlette, pydantic_settings, fastapi, sse_starlette\n",
      "Successfully installed anyio-4.2.0 fastapi-0.109.0 h11-0.14.0 pydantic_settings-2.1.0 python-dotenv-1.0.0 sniffio-1.3.0 sse_starlette-1.8.2 starlette-0.35.1 uvicorn-0.25.0\n",
      "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
      "\u001b[0m\n",
      "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.3.2\u001b[0m\n",
      "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "!pip install -r requirement.txt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ac002937-4186-44e1-ae11-50002fc24f2c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T05:21:35.875175Z",
     "iopub.status.busy": "2024-01-15T05:21:35.874817Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "!uvicorn openai_api:app --host 127.0.0.1 --port 8000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "220dcf8a-5535-402c-ad2a-829003f20d2f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T03:22:41.465268Z",
     "iopub.status.busy": "2024-01-15T03:22:41.464964Z",
     "iopub.status.idle": "2024-01-15T03:22:50.865159Z",
     "shell.execute_reply": "2024-01-15T03:22:50.864529Z",
     "shell.execute_reply.started": "2024-01-15T03:22:41.465245Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "!conda env export > env.yml"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "af3dc59b-b81b-4a91-ba6c-11a7f8f39666",
   "metadata": {
    "ExecutionIndicator": {
     "show": false
    },
    "execution": {
     "iopub.execute_input": "2024-01-15T03:12:21.678113Z",
     "iopub.status.busy": "2024-01-15T03:12:21.677779Z",
     "iopub.status.idle": "2024-01-15T03:12:21.848312Z",
     "shell.execute_reply": "2024-01-15T03:12:21.847820Z",
     "shell.execute_reply.started": "2024-01-15T03:12:21.678092Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[master 8104bb5] add chat_cpp example\n",
      " 1 file changed, 62 insertions(+), 222 deletions(-)\n"
     ]
    }
   ],
   "source": [
    "!git add .\n",
    "!git commit -m 'add chat_cpp example'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "115a4d4b-a928-43bc-8829-41a3cc1231e5",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-01-15T03:12:42.125824Z",
     "iopub.status.busy": "2024-01-15T03:12:42.125480Z",
     "iopub.status.idle": "2024-01-15T03:12:43.903366Z",
     "shell.execute_reply": "2024-01-15T03:12:43.902816Z",
     "shell.execute_reply.started": "2024-01-15T03:12:42.125802Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "枚举对象中: 5, 完成.\n",
      "对象计数中: 100% (5/5), 完成.\n",
      "使用 64 个线程进行压缩\n",
      "压缩对象中: 100% (3/3), 完成.\n",
      "写入对象中: 100% (3/3), 1.35 KiB | 1.35 MiB/s, 完成.\n",
      "总共 3（差异 2），复用 0（差异 0），包复用 0\n",
      "remote: Powered by \u001b[01;33mGITEE.COM \u001b[0m[\u001b[01;35mGNK-6.4\u001b[0m]\u001b[0m\u001b[K\n",
      "To https://gitee.com/wfhe/my_chatglm.git\n",
      "   0f62e98..8104bb5  master -> master\n"
     ]
    }
   ],
   "source": [
    "!git push origin master"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1f5ca2b4-e7b7-4904-bc59-406ae80644d0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "chatglm3-demo",
   "language": "python",
   "name": "chatglm3-demo"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
