{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "6189ee34-aaf4-4f7d-b717-49561fffd785",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: protobuf in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 1)) (5.26.1)\n",
      "Requirement already satisfied: transformers==4.36.2 in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 2)) (4.36.2)\n",
      "Requirement already satisfied: cpm_kernels in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 3)) (1.0.11)\n",
      "Requirement already satisfied: torch>=2.1.2 in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 4)) (2.2.2)\n",
      "Requirement already satisfied: gradio in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 5)) (4.25.0)\n",
      "Requirement already satisfied: mdtex2html in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 6)) (1.3.0)\n",
      "Requirement already satisfied: sentencepiece in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 7)) (0.2.0)\n",
      "Requirement already satisfied: accelerate in /root/miniconda3/lib/python3.12/site-packages (from -r ./ChatGLM-6B/requirements.txt (line 8)) (0.28.0)\n",
      "Requirement already satisfied: filelock in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (3.13.3)\n",
      "Requirement already satisfied: huggingface-hub<1.0,>=0.19.3 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (0.22.2)\n",
      "Requirement already satisfied: numpy>=1.17 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (1.26.4)\n",
      "Requirement already satisfied: packaging>=20.0 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (23.1)\n",
      "Requirement already satisfied: pyyaml>=5.1 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (6.0.1)\n",
      "Requirement already satisfied: regex!=2019.12.17 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (2023.12.25)\n",
      "Requirement already satisfied: requests in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (2.31.0)\n",
      "Requirement already satisfied: tokenizers<0.19,>=0.14 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (0.15.2)\n",
      "Requirement already satisfied: safetensors>=0.3.1 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (0.4.2)\n",
      "Requirement already satisfied: tqdm>=4.27 in /root/miniconda3/lib/python3.12/site-packages (from transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (4.65.0)\n",
      "Requirement already satisfied: typing-extensions>=4.8.0 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (4.10.0)\n",
      "Requirement already satisfied: sympy in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (1.12)\n",
      "Requirement already satisfied: networkx in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (3.2.1)\n",
      "Requirement already satisfied: jinja2 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (3.1.3)\n",
      "Requirement already satisfied: fsspec in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (2024.2.0)\n",
      "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (12.1.105)\n",
      "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (12.1.105)\n",
      "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (12.1.105)\n",
      "Requirement already satisfied: nvidia-cudnn-cu12==8.9.2.26 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (8.9.2.26)\n",
      "Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (12.1.3.1)\n",
      "Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (11.0.2.54)\n",
      "Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (10.3.2.106)\n",
      "Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (11.4.5.107)\n",
      "Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (12.1.0.106)\n",
      "Requirement already satisfied: nvidia-nccl-cu12==2.19.3 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (2.19.3)\n",
      "Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (12.1.105)\n",
      "Requirement already satisfied: nvidia-nvjitlink-cu12 in /root/miniconda3/lib/python3.12/site-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (12.4.99)\n",
      "Requirement already satisfied: aiofiles<24.0,>=22.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (23.2.1)\n",
      "Requirement already satisfied: altair<6.0,>=4.2.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (5.3.0)\n",
      "Requirement already satisfied: fastapi in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.110.1)\n",
      "Requirement already satisfied: ffmpy in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.3.2)\n",
      "Requirement already satisfied: gradio-client==0.15.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.15.0)\n",
      "Requirement already satisfied: httpx>=0.24.1 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.27.0)\n",
      "Requirement already satisfied: importlib-resources<7.0,>=1.3 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (6.4.0)\n",
      "Requirement already satisfied: markupsafe~=2.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2.1.5)\n",
      "Requirement already satisfied: matplotlib~=3.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (3.8.3)\n",
      "Requirement already satisfied: orjson~=3.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (3.10.0)\n",
      "Requirement already satisfied: pandas<3.0,>=1.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2.2.1)\n",
      "Requirement already satisfied: pillow<11.0,>=8.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (10.3.0)\n",
      "Requirement already satisfied: pydantic>=2.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2.6.4)\n",
      "Requirement already satisfied: pydub in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.25.1)\n",
      "Requirement already satisfied: python-multipart>=0.0.9 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.0.9)\n",
      "Requirement already satisfied: ruff>=0.2.2 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.3.5)\n",
      "Requirement already satisfied: semantic-version~=2.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2.10.0)\n",
      "Requirement already satisfied: tomlkit==0.12.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.12.0)\n",
      "Requirement already satisfied: typer<1.0,>=0.9 in /root/miniconda3/lib/python3.12/site-packages (from typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.12.0)\n",
      "Requirement already satisfied: uvicorn>=0.14.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.29.0)\n",
      "Requirement already satisfied: websockets<12.0,>=10.0 in /root/miniconda3/lib/python3.12/site-packages (from gradio-client==0.15.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (11.0.3)\n",
      "Requirement already satisfied: markdown in /root/miniconda3/lib/python3.12/site-packages (from mdtex2html->-r ./ChatGLM-6B/requirements.txt (line 6)) (3.6)\n",
      "Requirement already satisfied: latex2mathml in /root/miniconda3/lib/python3.12/site-packages (from mdtex2html->-r ./ChatGLM-6B/requirements.txt (line 6)) (3.77.0)\n",
      "Requirement already satisfied: psutil in /root/miniconda3/lib/python3.12/site-packages (from accelerate->-r ./ChatGLM-6B/requirements.txt (line 8)) (5.9.8)\n",
      "Requirement already satisfied: jsonschema>=3.0 in /root/miniconda3/lib/python3.12/site-packages (from altair<6.0,>=4.2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (4.21.1)\n",
      "Requirement already satisfied: toolz in /root/miniconda3/lib/python3.12/site-packages (from altair<6.0,>=4.2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.12.1)\n",
      "Requirement already satisfied: anyio in /root/miniconda3/lib/python3.12/site-packages (from httpx>=0.24.1->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (4.3.0)\n",
      "Requirement already satisfied: certifi in /root/miniconda3/lib/python3.12/site-packages (from httpx>=0.24.1->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2024.2.2)\n",
      "Requirement already satisfied: httpcore==1.* in /root/miniconda3/lib/python3.12/site-packages (from httpx>=0.24.1->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (1.0.5)\n",
      "Requirement already satisfied: idna in /root/miniconda3/lib/python3.12/site-packages (from httpx>=0.24.1->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (3.4)\n",
      "Requirement already satisfied: sniffio in /root/miniconda3/lib/python3.12/site-packages (from httpx>=0.24.1->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (1.3.1)\n",
      "Requirement already satisfied: h11<0.15,>=0.13 in /root/miniconda3/lib/python3.12/site-packages (from httpcore==1.*->httpx>=0.24.1->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.14.0)\n",
      "Requirement already satisfied: contourpy>=1.0.1 in /root/miniconda3/lib/python3.12/site-packages (from matplotlib~=3.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (1.2.1)\n",
      "Requirement already satisfied: cycler>=0.10 in /root/miniconda3/lib/python3.12/site-packages (from matplotlib~=3.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.12.1)\n",
      "Requirement already satisfied: fonttools>=4.22.0 in /root/miniconda3/lib/python3.12/site-packages (from matplotlib~=3.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (4.50.0)\n",
      "Requirement already satisfied: kiwisolver>=1.3.1 in /root/miniconda3/lib/python3.12/site-packages (from matplotlib~=3.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (1.4.5)\n",
      "Requirement already satisfied: pyparsing>=2.3.1 in /root/miniconda3/lib/python3.12/site-packages (from matplotlib~=3.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (3.1.2)\n",
      "Requirement already satisfied: python-dateutil>=2.7 in /root/miniconda3/lib/python3.12/site-packages (from matplotlib~=3.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2.9.0)\n",
      "Requirement already satisfied: pytz>=2020.1 in /root/miniconda3/lib/python3.12/site-packages (from pandas<3.0,>=1.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2024.1)\n",
      "Requirement already satisfied: tzdata>=2022.7 in /root/miniconda3/lib/python3.12/site-packages (from pandas<3.0,>=1.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2024.1)\n",
      "Requirement already satisfied: annotated-types>=0.4.0 in /root/miniconda3/lib/python3.12/site-packages (from pydantic>=2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.6.0)\n",
      "Requirement already satisfied: pydantic-core==2.16.3 in /root/miniconda3/lib/python3.12/site-packages (from pydantic>=2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2.16.3)\n",
      "Requirement already satisfied: typer-slim==0.12.0 in /root/miniconda3/lib/python3.12/site-packages (from typer-slim[standard]==0.12.0->typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.12.0)\n",
      "Requirement already satisfied: typer-cli==0.12.0 in /root/miniconda3/lib/python3.12/site-packages (from typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.12.0)\n",
      "Requirement already satisfied: click>=8.0.0 in /root/miniconda3/lib/python3.12/site-packages (from typer-slim==0.12.0->typer-slim[standard]==0.12.0->typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (8.1.7)\n",
      "Requirement already satisfied: shellingham>=1.3.0 in /root/miniconda3/lib/python3.12/site-packages (from typer-slim[standard]==0.12.0->typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (1.5.4)\n",
      "Requirement already satisfied: rich>=10.11.0 in /root/miniconda3/lib/python3.12/site-packages (from typer-slim[standard]==0.12.0->typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (13.7.1)\n",
      "\u001b[33mWARNING: typer 0.12.0 does not provide the extra 'all'\u001b[0m\u001b[33m\n",
      "\u001b[0mRequirement already satisfied: starlette<0.38.0,>=0.37.2 in /root/miniconda3/lib/python3.12/site-packages (from fastapi->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.37.2)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/lib/python3.12/site-packages (from requests->transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (2.0.4)\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/lib/python3.12/site-packages (from requests->transformers==4.36.2->-r ./ChatGLM-6B/requirements.txt (line 2)) (2.1.0)\n",
      "Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/lib/python3.12/site-packages (from sympy->torch>=2.1.2->-r ./ChatGLM-6B/requirements.txt (line 4)) (1.3.0)\n",
      "Requirement already satisfied: attrs>=22.2.0 in /root/miniconda3/lib/python3.12/site-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (23.2.0)\n",
      "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /root/miniconda3/lib/python3.12/site-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2023.12.1)\n",
      "Requirement already satisfied: referencing>=0.28.4 in /root/miniconda3/lib/python3.12/site-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.34.0)\n",
      "Requirement already satisfied: rpds-py>=0.7.1 in /root/miniconda3/lib/python3.12/site-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.18.0)\n",
      "Requirement already satisfied: six>=1.5 in /root/miniconda3/lib/python3.12/site-packages (from python-dateutil>=2.7->matplotlib~=3.0->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (1.16.0)\n",
      "Requirement already satisfied: markdown-it-py>=2.2.0 in /root/miniconda3/lib/python3.12/site-packages (from rich>=10.11.0->typer-slim[standard]==0.12.0->typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (3.0.0)\n",
      "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /root/miniconda3/lib/python3.12/site-packages (from rich>=10.11.0->typer-slim[standard]==0.12.0->typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (2.17.2)\n",
      "Requirement already satisfied: mdurl~=0.1 in /root/miniconda3/lib/python3.12/site-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer-slim[standard]==0.12.0->typer<1.0,>=0.9->typer[all]<1.0,>=0.9; sys_platform != \"emscripten\"->gradio->-r ./ChatGLM-6B/requirements.txt (line 5)) (0.1.2)\n",
      "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
      "\u001b[0mRequirement already satisfied: datasets in /root/miniconda3/lib/python3.12/site-packages (2.18.0)\n",
      "Requirement already satisfied: filelock in /root/miniconda3/lib/python3.12/site-packages (from datasets) (3.13.3)\n",
      "Requirement already satisfied: numpy>=1.17 in /root/miniconda3/lib/python3.12/site-packages (from datasets) (1.26.4)\n",
      "Requirement already satisfied: pyarrow>=12.0.0 in /root/miniconda3/lib/python3.12/site-packages (from datasets) (15.0.2)\n",
      "Requirement already satisfied: pyarrow-hotfix in /root/miniconda3/lib/python3.12/site-packages (from datasets) (0.6)\n",
      "Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/lib/python3.12/site-packages (from datasets) (0.3.8)\n",
      "Requirement already satisfied: pandas in /root/miniconda3/lib/python3.12/site-packages (from datasets) (2.2.1)\n",
      "Requirement already satisfied: requests>=2.19.0 in /root/miniconda3/lib/python3.12/site-packages (from datasets) (2.31.0)\n",
      "Requirement already satisfied: tqdm>=4.62.1 in /root/miniconda3/lib/python3.12/site-packages (from datasets) (4.65.0)\n",
      "Requirement already satisfied: xxhash in /root/miniconda3/lib/python3.12/site-packages (from datasets) (3.4.1)\n",
      "Requirement already satisfied: multiprocess in /root/miniconda3/lib/python3.12/site-packages (from datasets) (0.70.16)\n",
      "Requirement already satisfied: fsspec<=2024.2.0,>=2023.1.0 in /root/miniconda3/lib/python3.12/site-packages (from fsspec[http]<=2024.2.0,>=2023.1.0->datasets) (2024.2.0)\n",
      "Requirement already satisfied: aiohttp in /root/miniconda3/lib/python3.12/site-packages (from datasets) (3.9.3)\n",
      "Requirement already satisfied: huggingface-hub>=0.19.4 in /root/miniconda3/lib/python3.12/site-packages (from datasets) (0.22.2)\n",
      "Requirement already satisfied: packaging in /root/miniconda3/lib/python3.12/site-packages (from datasets) (23.1)\n",
      "Requirement already satisfied: pyyaml>=5.1 in /root/miniconda3/lib/python3.12/site-packages (from datasets) (6.0.1)\n",
      "Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/lib/python3.12/site-packages (from aiohttp->datasets) (1.3.1)\n",
      "Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/lib/python3.12/site-packages (from aiohttp->datasets) (23.2.0)\n",
      "Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/lib/python3.12/site-packages (from aiohttp->datasets) (1.4.1)\n",
      "Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/lib/python3.12/site-packages (from aiohttp->datasets) (6.0.5)\n",
      "Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/lib/python3.12/site-packages (from aiohttp->datasets) (1.9.4)\n",
      "Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/lib/python3.12/site-packages (from huggingface-hub>=0.19.4->datasets) (4.10.0)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/lib/python3.12/site-packages (from requests>=2.19.0->datasets) (2.0.4)\n",
      "Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/lib/python3.12/site-packages (from requests>=2.19.0->datasets) (3.4)\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/lib/python3.12/site-packages (from requests>=2.19.0->datasets) (2.1.0)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/lib/python3.12/site-packages (from requests>=2.19.0->datasets) (2024.2.2)\n",
      "Requirement already satisfied: python-dateutil>=2.8.2 in /root/miniconda3/lib/python3.12/site-packages (from pandas->datasets) (2.9.0)\n",
      "Requirement already satisfied: pytz>=2020.1 in /root/miniconda3/lib/python3.12/site-packages (from pandas->datasets) (2024.1)\n",
      "Requirement already satisfied: tzdata>=2022.7 in /root/miniconda3/lib/python3.12/site-packages (from pandas->datasets) (2024.1)\n",
      "Requirement already satisfied: six>=1.5 in /root/miniconda3/lib/python3.12/site-packages (from python-dateutil>=2.8.2->pandas->datasets) (1.16.0)\n",
      "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
      "\u001b[0m"
     ]
    }
   ],
   "source": [
    "# 安装依赖\n",
    "!pip3 install -r ./ChatGLM3/requirements.txt\n",
    "\n",
    "!pip3 install datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8977db31-a929-4828-9b7b-2185c74f522e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "d1acb190-1a5f-4ac8-89bc-b2a566991fbd",
   "metadata": {},
   "source": [
    "# ChatGLM Lora 微调\n",
    "使用官方提供的示例，成功微调出广告数据集，要求使用 Lora 进行微调：\n",
    "\n",
    "- 你能看到 loss 的下降，并在最终回到 3.2 左右。\n",
    "- 你需要自己适配 inference.py 中的代码，并迁移到其他的推理框架中。例如，basic_demo 中没有读取微调模型后的 adapter 的内容，你需要参考 inference.py 的代码并进行修改，让其他 demo 能读入你的微调代码，将其部署到 basic_demo 下的 gradio_demo 中，并能够通过 webui 来进行调用"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fb0515f7-ceba-4e4b-9eea-92f35b0b813a",
   "metadata": {},
   "source": [
    "## 1 准备数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "1c31d18a-832f-4fba-8b2e-d4dafcf9218c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from typing import Union\n",
    "from pathlib import Path\n",
    "\n",
    "\n",
    "def _resolve_path(path: Union[str, Path]) -> Path:\n",
    "    return Path(path).expanduser().resolve()\n",
    "\n",
    "\n",
    "def _mkdir(dir_name: Union[str, Path]):\n",
    "    dir_name = _resolve_path(dir_name)\n",
    "    if not dir_name.is_dir():\n",
    "        dir_name.mkdir(parents=True, exist_ok=False)\n",
    "\n",
    "\n",
    "def convert_adgen(data_dir: Union[str, Path], save_dir: Union[str, Path]):\n",
    "    def _convert(in_file: Path, out_file: Path):\n",
    "        _mkdir(out_file.parent)\n",
    "        with open(in_file, encoding='utf-8') as fin:\n",
    "            with open(out_file, 'wt', encoding='utf-8') as fout:\n",
    "                for line in fin:\n",
    "                    dct = json.loads(line)\n",
    "                    sample = {'conversations': [{'role': 'user', 'content': dct['content']},\n",
    "                                                {'role': 'assistant', 'content': dct['summary']}]}\n",
    "                    fout.write(json.dumps(sample, ensure_ascii=False) + '\\n')\n",
    "\n",
    "    data_dir = _resolve_path(data_dir)\n",
    "    save_dir = _resolve_path(save_dir)\n",
    "\n",
    "    train_file = data_dir / 'train.json'\n",
    "    if train_file.is_file():\n",
    "        out_file = save_dir / train_file.relative_to(data_dir)\n",
    "        _convert(train_file, out_file)\n",
    "\n",
    "    dev_file = data_dir / 'dev.json'\n",
    "    if dev_file.is_file():\n",
    "        out_file = save_dir / dev_file.relative_to(data_dir)\n",
    "        _convert(dev_file, out_file)\n",
    "\n",
    "\n",
    "convert_adgen('data/AdvertiseGen', 'data/AdvertiseGen_fix')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "de452c76-acc2-4ef9-b50e-4a7fef2eccae",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "b7530878-d3e5-4607-90d0-85452e57d00b",
   "metadata": {},
   "source": [
    "### 查看数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "71fac32c-8c50-47b4-a61e-719dd14f24d5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['conversations'],\n",
       "        num_rows: 114599\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "from datasets import load_dataset\n",
    "\n",
    "tran_dataset = load_dataset('json', data_files='data/AdvertiseGen_fix/train.json')\n",
    "tran_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "9645adfc-4d56-4415-a696-0583faa94756",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['conversations'],\n",
       "        num_rows: 1070\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dev_dataset = load_dataset('json', data_files='data/AdvertiseGen_fix/dev.json')\n",
    "dev_dataset"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f6d5c639-56e0-47bc-bb18-2e199a9fd22e",
   "metadata": {},
   "source": [
    "## 2. 使用命令行开始微调"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "0f3af83e-b1ad-46fb-a10c-387a46f9e29d",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: jieba in /root/miniconda3/lib/python3.12/site-packages (0.42.1)\n",
      "Requirement already satisfied: nltk in /root/miniconda3/lib/python3.12/site-packages (3.8.1)\n",
      "Requirement already satisfied: peft in /root/miniconda3/lib/python3.12/site-packages (0.10.0)\n",
      "Requirement already satisfied: rouge_chinese in /root/miniconda3/lib/python3.12/site-packages (1.0.3)\n",
      "Requirement already satisfied: click in /root/miniconda3/lib/python3.12/site-packages (from nltk) (8.1.7)\n",
      "Requirement already satisfied: joblib in /root/miniconda3/lib/python3.12/site-packages (from nltk) (1.3.2)\n",
      "Requirement already satisfied: regex>=2021.8.3 in /root/miniconda3/lib/python3.12/site-packages (from nltk) (2023.12.25)\n",
      "Requirement already satisfied: tqdm in /root/miniconda3/lib/python3.12/site-packages (from nltk) (4.65.0)\n",
      "Requirement already satisfied: numpy>=1.17 in /root/miniconda3/lib/python3.12/site-packages (from peft) (1.26.4)\n",
      "Requirement already satisfied: packaging>=20.0 in /root/miniconda3/lib/python3.12/site-packages (from peft) (23.2)\n",
      "Requirement already satisfied: psutil in /root/miniconda3/lib/python3.12/site-packages (from peft) (5.9.8)\n",
      "Requirement already satisfied: pyyaml in /root/miniconda3/lib/python3.12/site-packages (from peft) (6.0.1)\n",
      "Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/lib/python3.12/site-packages (from peft) (2.2.2)\n",
      "Requirement already satisfied: transformers in /root/miniconda3/lib/python3.12/site-packages (from peft) (4.39.3)\n",
      "Requirement already satisfied: accelerate>=0.21.0 in /root/miniconda3/lib/python3.12/site-packages (from peft) (0.28.0)\n",
      "Requirement already satisfied: safetensors in /root/miniconda3/lib/python3.12/site-packages (from peft) (0.4.2)\n",
      "Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/lib/python3.12/site-packages (from peft) (0.22.2)\n",
      "Requirement already satisfied: six in /root/miniconda3/lib/python3.12/site-packages (from rouge_chinese) (1.16.0)\n",
      "Requirement already satisfied: filelock in /root/miniconda3/lib/python3.12/site-packages (from huggingface-hub>=0.17.0->peft) (3.13.3)\n",
      "Requirement already satisfied: fsspec>=2023.5.0 in /root/miniconda3/lib/python3.12/site-packages (from huggingface-hub>=0.17.0->peft) (2024.2.0)\n",
      "Requirement already satisfied: requests in /root/miniconda3/lib/python3.12/site-packages (from huggingface-hub>=0.17.0->peft) (2.31.0)\n",
      "Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/lib/python3.12/site-packages (from huggingface-hub>=0.17.0->peft) (4.10.0)\n",
      "Requirement already satisfied: sympy in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (1.12)\n",
      "Requirement already satisfied: networkx in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (3.2.1)\n",
      "Requirement already satisfied: jinja2 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (3.1.3)\n",
      "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (12.1.105)\n",
      "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (12.1.105)\n",
      "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (12.1.105)\n",
      "Requirement already satisfied: nvidia-cudnn-cu12==8.9.2.26 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (8.9.2.26)\n",
      "Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (12.1.3.1)\n",
      "Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (11.0.2.54)\n",
      "Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (10.3.2.106)\n",
      "Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (11.4.5.107)\n",
      "Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (12.1.0.106)\n",
      "Requirement already satisfied: nvidia-nccl-cu12==2.19.3 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (2.19.3)\n",
      "Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /root/miniconda3/lib/python3.12/site-packages (from torch>=1.13.0->peft) (12.1.105)\n",
      "Requirement already satisfied: nvidia-nvjitlink-cu12 in /root/miniconda3/lib/python3.12/site-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=1.13.0->peft) (12.4.99)\n",
      "Requirement already satisfied: tokenizers<0.19,>=0.14 in /root/miniconda3/lib/python3.12/site-packages (from transformers->peft) (0.15.2)\n",
      "Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/lib/python3.12/site-packages (from jinja2->torch>=1.13.0->peft) (2.1.5)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/lib/python3.12/site-packages (from requests->huggingface-hub>=0.17.0->peft) (2.0.4)\n",
      "Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/lib/python3.12/site-packages (from requests->huggingface-hub>=0.17.0->peft) (3.4)\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/lib/python3.12/site-packages (from requests->huggingface-hub>=0.17.0->peft) (2.1.0)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/lib/python3.12/site-packages (from requests->huggingface-hub>=0.17.0->peft) (2024.2.2)\n",
      "Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/lib/python3.12/site-packages (from sympy->torch>=1.13.0->peft) (1.3.0)\n",
      "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
      "\u001b[0m"
     ]
    }
   ],
   "source": [
    "!pip install jieba nltk peft rouge_chinese"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "117925ec-06c0-4092-8c6c-fd4cdc101e0a",
   "metadata": {},
   "source": [
    "#### 使用默认配置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "1f00c5d9-1cc4-4ded-978c-689704f07202",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:04<00:00,  1.63it/s]\n",
      "trainable params: 1,949,696 || all params: 6,245,533,696 || trainable%: 0.031217444255383614\n",
      "--> Model\n",
      "\n",
      "--> model has 1.949696M params\n",
      "\n",
      "train_dataset: Dataset({\n",
      "    features: ['input_ids', 'labels'],\n",
      "    num_rows: 114599\n",
      "})\n",
      "val_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "test_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "--> Sanity check\n",
      "           '[gMASK]': 64790 -> -100\n",
      "               'sop': 64792 -> -100\n",
      "          '<|user|>': 64795 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '\\n': 13 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '类型': 33467 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '版': 55090 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '宽松': 40833 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '风格': 32799 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '性感': 40589 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '图案': 37505 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '线条': 37216 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '阔': 56529 -> -100\n",
      "                 '腿': 56158 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "     '<|assistant|>': 64796 -> -100\n",
      "                  '': 30910 -> 30910\n",
      "                '\\n': 13 -> 13\n",
      "                  '': 30910 -> 30910\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '阔': 56529 -> 56529\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '这': 54551 -> 54551\n",
      "                '两年': 33808 -> 33808\n",
      "                '真的': 32041 -> 32041\n",
      "                 '吸': 55360 -> 55360\n",
      "                 '粉': 55486 -> 55486\n",
      "                '不少': 32138 -> 32138\n",
      "                 '，': 31123 -> 31123\n",
      "                '明星': 32943 -> 32943\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '达': 54880 -> 54880\n",
      "                '人的': 31664 -> 31664\n",
      "                '心头': 46565 -> 46565\n",
      "                 '爱': 54799 -> 54799\n",
      "                 '。': 31155 -> 31155\n",
      "                '毕竟': 33051 -> 33051\n",
      "                 '好': 54591 -> 54591\n",
      "                 '穿': 55432 -> 55432\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '，': 31123 -> 31123\n",
      "                 '谁': 55622 -> 55622\n",
      "                '都能': 32904 -> 32904\n",
      "                 '穿': 55432 -> 55432\n",
      "                 '出': 54557 -> 54557\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '长': 54625 -> 54625\n",
      "                 '2': 30943 -> 30943\n",
      "                 '米': 55055 -> 55055\n",
      "               '的效果': 35590 -> 35590\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '，': 31123 -> 31123\n",
      "               '当然是': 48466 -> 48466\n",
      "                 '遮': 57148 -> 57148\n",
      "                 '肉': 55343 -> 55343\n",
      "                 '小': 54603 -> 54603\n",
      "                '能手': 49355 -> 49355\n",
      "                 '啊': 55674 -> 55674\n",
      "                 '。': 31155 -> 31155\n",
      "                '上身': 51605 -> 51605\n",
      "                 '随': 55119 -> 55119\n",
      "                 '性': 54642 -> 54642\n",
      "                '自然': 31799 -> 31799\n",
      "                 '不': 54535 -> 54535\n",
      "                 '拘': 57036 -> 57036\n",
      "                 '束': 55625 -> 55625\n",
      "                 '，': 31123 -> 31123\n",
      "                '面料': 46839 -> 46839\n",
      "                 '亲': 55113 -> 55113\n",
      "                 '肤': 56089 -> 56089\n",
      "                '舒适': 33894 -> 33894\n",
      "                 '贴': 55778 -> 55778\n",
      "                '身体': 31902 -> 31902\n",
      "                 '验': 55017 -> 55017\n",
      "                 '感': 54706 -> 54706\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '哒': 59230 -> 59230\n",
      "                 '。': 31155 -> 31155\n",
      "                 '系': 54712 -> 54712\n",
      "                 '带': 54882 -> 54882\n",
      "                '部分': 31726 -> 31726\n",
      "                '增加': 31917 -> 31917\n",
      "                '设计': 31735 -> 31735\n",
      "                '看点': 45032 -> 45032\n",
      "                 '，': 31123 -> 31123\n",
      "                 '还': 54656 -> 54656\n",
      "                 '让': 54772 -> 54772\n",
      "                '单品': 46539 -> 46539\n",
      "               '的设计': 34481 -> 34481\n",
      "                 '感': 54706 -> 54706\n",
      "                '更强': 43084 -> 43084\n",
      "                 '。': 31155 -> 31155\n",
      "                '腿部': 46799 -> 46799\n",
      "                '线条': 37216 -> 37216\n",
      "                 '若': 55351 -> 55351\n",
      "                 '隐': 55733 -> 55733\n",
      "                 '若': 55351 -> 55351\n",
      "                 '现': 54600 -> 54600\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                '性感': 40589 -> 40589\n",
      "                 '撩': 58521 -> 58521\n",
      "                 '人': 54533 -> 54533\n",
      "                 '。': 31155 -> 31155\n",
      "                '颜色': 33692 -> 33692\n",
      "                 '敲': 57004 -> 57004\n",
      "                '温柔': 34678 -> 34678\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                 '与': 54619 -> 54619\n",
      "                '裤子': 44722 -> 44722\n",
      "                '本身': 32754 -> 32754\n",
      "                 '所': 54626 -> 54626\n",
      "                '呈现': 33169 -> 33169\n",
      "               '的风格': 48084 -> 48084\n",
      "                '有点': 33149 -> 33149\n",
      "                 '反': 54955 -> 54955\n",
      "                 '差': 55342 -> 55342\n",
      "                 '萌': 56842 -> 56842\n",
      "                 '。': 31155 -> 31155\n",
      "                  '': 2 -> 2\n",
      "/root/miniconda3/lib/python3.12/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead: \n",
      "dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)\n",
      "  warnings.warn(\n",
      "Detected kernel version 3.10.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n",
      "max_steps is given, it will override any value given in num_train_epochs\n",
      "/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running training *****\n",
      "  Num examples = 114,599\n",
      "  Num Epochs = 1\n",
      "  Instantaneous batch size per device = 4\n",
      "  Total train batch size (w. parallel, distributed & accumulation) = 4\n",
      "  Gradient Accumulation steps = 1\n",
      "  Total optimization steps = 3,000\n",
      "  Number of trainable parameters = 1,949,696\n",
      "{'loss': 4.8312, 'grad_norm': 2.154928684234619, 'learning_rate': 4.9833333333333336e-05, 'epoch': 0.0}\n",
      "{'loss': 4.6039, 'grad_norm': 3.1419801712036133, 'learning_rate': 4.966666666666667e-05, 'epoch': 0.0}\n",
      "{'loss': 4.4902, 'grad_norm': 2.9887514114379883, 'learning_rate': 4.9500000000000004e-05, 'epoch': 0.0}\n",
      "{'loss': 4.125, 'grad_norm': 3.3602099418640137, 'learning_rate': 4.933333333333334e-05, 'epoch': 0.0}\n",
      "{'loss': 4.117, 'grad_norm': 2.7146646976470947, 'learning_rate': 4.9166666666666665e-05, 'epoch': 0.0}\n",
      "{'loss': 3.8697, 'grad_norm': 2.9158823490142822, 'learning_rate': 4.9e-05, 'epoch': 0.0}\n",
      "{'loss': 3.8439, 'grad_norm': 2.8361949920654297, 'learning_rate': 4.883333333333334e-05, 'epoch': 0.0}\n",
      "{'loss': 3.748, 'grad_norm': 2.905505895614624, 'learning_rate': 4.866666666666667e-05, 'epoch': 0.0}\n",
      "{'loss': 3.6387, 'grad_norm': 3.1462669372558594, 'learning_rate': 4.85e-05, 'epoch': 0.0}\n",
      "{'loss': 3.7213, 'grad_norm': 3.3166770935058594, 'learning_rate': 4.8333333333333334e-05, 'epoch': 0.0}\n",
      "{'loss': 3.6723, 'grad_norm': 3.5137250423431396, 'learning_rate': 4.8166666666666674e-05, 'epoch': 0.0}\n",
      "{'loss': 3.8479, 'grad_norm': 3.8054819107055664, 'learning_rate': 4.8e-05, 'epoch': 0.0}\n",
      "{'loss': 3.6125, 'grad_norm': 3.425347328186035, 'learning_rate': 4.7833333333333335e-05, 'epoch': 0.0}\n",
      "{'loss': 3.7311, 'grad_norm': 4.3545613288879395, 'learning_rate': 4.766666666666667e-05, 'epoch': 0.0}\n",
      "{'loss': 3.6822, 'grad_norm': 3.5810747146606445, 'learning_rate': 4.75e-05, 'epoch': 0.01}\n",
      "{'loss': 3.7412, 'grad_norm': 3.8734400272369385, 'learning_rate': 4.7333333333333336e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5736, 'grad_norm': 4.0092573165893555, 'learning_rate': 4.716666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.576, 'grad_norm': 4.253482341766357, 'learning_rate': 4.7e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5506, 'grad_norm': 4.688349723815918, 'learning_rate': 4.683333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5771, 'grad_norm': 4.531797885894775, 'learning_rate': 4.666666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.552, 'grad_norm': 4.891948699951172, 'learning_rate': 4.6500000000000005e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6486, 'grad_norm': 4.028026580810547, 'learning_rate': 4.633333333333333e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6162, 'grad_norm': 4.669238090515137, 'learning_rate': 4.6166666666666666e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5123, 'grad_norm': 4.422101974487305, 'learning_rate': 4.600000000000001e-05, 'epoch': 0.01}\n",
      "{'loss': 3.4768, 'grad_norm': 5.246294021606445, 'learning_rate': 4.5833333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6047, 'grad_norm': 5.205477714538574, 'learning_rate': 4.566666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5529, 'grad_norm': 5.3229780197143555, 'learning_rate': 4.55e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6143, 'grad_norm': 4.490516662597656, 'learning_rate': 4.5333333333333335e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6303, 'grad_norm': 4.656806945800781, 'learning_rate': 4.516666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5426, 'grad_norm': 5.648224830627441, 'learning_rate': 4.5e-05, 'epoch': 0.01}\n",
      "{'loss': 3.4674, 'grad_norm': 5.185174465179443, 'learning_rate': 4.483333333333333e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6063, 'grad_norm': 5.705077648162842, 'learning_rate': 4.466666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.4191, 'grad_norm': 5.122574329376221, 'learning_rate': 4.4500000000000004e-05, 'epoch': 0.01}\n",
      "{'loss': 3.4955, 'grad_norm': 5.213160991668701, 'learning_rate': 4.433333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5211, 'grad_norm': 5.428754806518555, 'learning_rate': 4.4166666666666665e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5748, 'grad_norm': 5.0916056632995605, 'learning_rate': 4.4000000000000006e-05, 'epoch': 0.01}\n",
      "{'loss': 3.3625, 'grad_norm': 4.770575046539307, 'learning_rate': 4.383333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5299, 'grad_norm': 5.066187381744385, 'learning_rate': 4.3666666666666666e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5242, 'grad_norm': 5.202919006347656, 'learning_rate': 4.35e-05, 'epoch': 0.01}\n",
      "{'loss': 3.4738, 'grad_norm': 5.567348957061768, 'learning_rate': 4.3333333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6953, 'grad_norm': 5.396520137786865, 'learning_rate': 4.316666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.4988, 'grad_norm': 4.961470127105713, 'learning_rate': 4.3e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6227, 'grad_norm': 5.53696346282959, 'learning_rate': 4.2833333333333335e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4174, 'grad_norm': 6.352376461029053, 'learning_rate': 4.266666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4141, 'grad_norm': 5.941121578216553, 'learning_rate': 4.25e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4281, 'grad_norm': 5.515372276306152, 'learning_rate': 4.233333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5318, 'grad_norm': 5.696809768676758, 'learning_rate': 4.216666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4453, 'grad_norm': 6.9524431228637695, 'learning_rate': 4.2e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4613, 'grad_norm': 5.672770023345947, 'learning_rate': 4.183333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5625, 'grad_norm': 5.831884384155273, 'learning_rate': 4.166666666666667e-05, 'epoch': 0.02}\n",
      " 17%|██████▋                                 | 500/3000 [04:29<25:30,  1.63it/s]***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.69s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:38<00:13, 13.63s/it]\u001b[A\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:57<00:00, 15.49s/it]\u001b[ABuilding prefix dict from the default dictionary ...\n",
      "Loading model from cache /tmp/jieba.cache\n",
      "Loading model cost 0.745 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "                                                                                \n",
      "\u001b[A{'eval_rouge-1': 31.706024000000003, 'eval_rouge-2': 7.137056, 'eval_rouge-l': 23.830496000000004, 'eval_bleu-4': 0.033068774930854576, 'eval_runtime': 61.893, 'eval_samples_per_second': 0.808, 'eval_steps_per_second': 0.065, 'epoch': 0.02}\n",
      " 17%|██████▋                                 | 500/3000 [05:31<25:30,  1.63it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:58<00:00, 15.49s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output/checkpoint-500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.3225, 'grad_norm': 5.682308197021484, 'learning_rate': 4.15e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5479, 'grad_norm': 6.5132832527160645, 'learning_rate': 4.133333333333333e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5836, 'grad_norm': 5.925625324249268, 'learning_rate': 4.116666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4859, 'grad_norm': 5.3197922706604, 'learning_rate': 4.1e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5227, 'grad_norm': 5.299259662628174, 'learning_rate': 4.0833333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.6457, 'grad_norm': 5.708182334899902, 'learning_rate': 4.066666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4936, 'grad_norm': 5.70455265045166, 'learning_rate': 4.05e-05, 'epoch': 0.02}\n",
      "{'loss': 3.3713, 'grad_norm': 5.540102005004883, 'learning_rate': 4.0333333333333336e-05, 'epoch': 0.02}\n",
      "{'loss': 3.423, 'grad_norm': 6.181765556335449, 'learning_rate': 4.016666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4928, 'grad_norm': 6.424213886260986, 'learning_rate': 4e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4373, 'grad_norm': 6.072690486907959, 'learning_rate': 3.983333333333333e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4545, 'grad_norm': 6.612813949584961, 'learning_rate': 3.966666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4465, 'grad_norm': 5.923634052276611, 'learning_rate': 3.9500000000000005e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4555, 'grad_norm': 6.080725193023682, 'learning_rate': 3.933333333333333e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5312, 'grad_norm': 5.865947723388672, 'learning_rate': 3.9166666666666665e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4867, 'grad_norm': 6.251274108886719, 'learning_rate': 3.9000000000000006e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5418, 'grad_norm': 6.068418979644775, 'learning_rate': 3.883333333333333e-05, 'epoch': 0.02}\n",
      "{'loss': 3.3051, 'grad_norm': 6.919337272644043, 'learning_rate': 3.866666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.3977, 'grad_norm': 6.578913688659668, 'learning_rate': 3.85e-05, 'epoch': 0.02}\n",
      "{'loss': 3.3512, 'grad_norm': 6.229973793029785, 'learning_rate': 3.8333333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4971, 'grad_norm': 6.921490669250488, 'learning_rate': 3.816666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5252, 'grad_norm': 6.770037651062012, 'learning_rate': 3.8e-05, 'epoch': 0.03}\n",
      "{'loss': 3.25, 'grad_norm': 6.85292387008667, 'learning_rate': 3.7833333333333336e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5729, 'grad_norm': 5.783699989318848, 'learning_rate': 3.766666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.3973, 'grad_norm': 6.45324182510376, 'learning_rate': 3.7500000000000003e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4764, 'grad_norm': 6.002246856689453, 'learning_rate': 3.733333333333334e-05, 'epoch': 0.03}\n",
      "{'loss': 3.6182, 'grad_norm': 6.386537075042725, 'learning_rate': 3.7166666666666664e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4713, 'grad_norm': 6.236800193786621, 'learning_rate': 3.7e-05, 'epoch': 0.03}\n",
      "{'loss': 3.3227, 'grad_norm': 6.354572772979736, 'learning_rate': 3.683333333333334e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5521, 'grad_norm': 6.8438215255737305, 'learning_rate': 3.6666666666666666e-05, 'epoch': 0.03}\n",
      "{'loss': 3.2883, 'grad_norm': 6.497671604156494, 'learning_rate': 3.65e-05, 'epoch': 0.03}\n",
      "{'loss': 3.3604, 'grad_norm': 6.509902000427246, 'learning_rate': 3.633333333333333e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4633, 'grad_norm': 7.136723518371582, 'learning_rate': 3.6166666666666674e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4066, 'grad_norm': 6.183115482330322, 'learning_rate': 3.6e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5092, 'grad_norm': 6.145983695983887, 'learning_rate': 3.5833333333333335e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5334, 'grad_norm': 6.1694722175598145, 'learning_rate': 3.566666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.2928, 'grad_norm': 7.147686958312988, 'learning_rate': 3.55e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4926, 'grad_norm': 6.6215596199035645, 'learning_rate': 3.5333333333333336e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4551, 'grad_norm': 7.347283363342285, 'learning_rate': 3.516666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.2648, 'grad_norm': 7.908386707305908, 'learning_rate': 3.5e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4568, 'grad_norm': 7.738822937011719, 'learning_rate': 3.483333333333334e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4186, 'grad_norm': 6.861604690551758, 'learning_rate': 3.466666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.459, 'grad_norm': 7.470913887023926, 'learning_rate': 3.45e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5652, 'grad_norm': 7.300028324127197, 'learning_rate': 3.433333333333333e-05, 'epoch': 0.03}\n",
      "{'loss': 3.3611, 'grad_norm': 6.421728134155273, 'learning_rate': 3.4166666666666666e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4363, 'grad_norm': 7.730878829956055, 'learning_rate': 3.4000000000000007e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5316, 'grad_norm': 5.862722396850586, 'learning_rate': 3.3833333333333334e-05, 'epoch': 0.03}\n",
      "{'loss': 3.3219, 'grad_norm': 6.8936076164245605, 'learning_rate': 3.366666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4598, 'grad_norm': 7.317920684814453, 'learning_rate': 3.35e-05, 'epoch': 0.03}\n",
      "{'loss': 3.3977, 'grad_norm': 7.717495441436768, 'learning_rate': 3.3333333333333335e-05, 'epoch': 0.03}\n",
      " 33%|█████████████                          | 1000/3000 [09:58<18:28,  1.80it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.63s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:06<00:02,  2.18s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.088174, 'eval_rouge-2': 6.8397000000000014, 'eval_rouge-l': 25.552676, 'eval_bleu-4': 0.03416142368281261, 'eval_runtime': 28.4737, 'eval_samples_per_second': 1.756, 'eval_steps_per_second': 0.14, 'epoch': 0.03}\n",
      " 33%|█████████████                          | 1000/3000 [10:27<18:28,  1.80it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:08<00:00,  2.08s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output/checkpoint-1000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.4482, 'grad_norm': 6.911911487579346, 'learning_rate': 3.316666666666667e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4604, 'grad_norm': 7.48888635635376, 'learning_rate': 3.3e-05, 'epoch': 0.04}\n",
      "{'loss': 3.65, 'grad_norm': 8.10289192199707, 'learning_rate': 3.283333333333333e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4006, 'grad_norm': 6.442454814910889, 'learning_rate': 3.266666666666667e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3896, 'grad_norm': 8.657624244689941, 'learning_rate': 3.2500000000000004e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3559, 'grad_norm': 7.758888244628906, 'learning_rate': 3.233333333333333e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3869, 'grad_norm': 7.178406715393066, 'learning_rate': 3.2166666666666665e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4631, 'grad_norm': 7.179416179656982, 'learning_rate': 3.2000000000000005e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5258, 'grad_norm': 7.013150691986084, 'learning_rate': 3.183333333333334e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4684, 'grad_norm': 6.507485866546631, 'learning_rate': 3.1666666666666666e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3443, 'grad_norm': 6.85249662399292, 'learning_rate': 3.15e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5264, 'grad_norm': 7.857052803039551, 'learning_rate': 3.1333333333333334e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4365, 'grad_norm': 7.381261348724365, 'learning_rate': 3.116666666666667e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3615, 'grad_norm': 8.240775108337402, 'learning_rate': 3.1e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3182, 'grad_norm': 7.575430870056152, 'learning_rate': 3.0833333333333335e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3613, 'grad_norm': 7.1634392738342285, 'learning_rate': 3.066666666666667e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4496, 'grad_norm': 6.593518257141113, 'learning_rate': 3.05e-05, 'epoch': 0.04}\n",
      "{'loss': 3.473, 'grad_norm': 6.509487628936768, 'learning_rate': 3.0333333333333337e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3551, 'grad_norm': 6.736058712005615, 'learning_rate': 3.016666666666667e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4107, 'grad_norm': 6.371884346008301, 'learning_rate': 3e-05, 'epoch': 0.04}\n",
      "{'loss': 3.2449, 'grad_norm': 6.575644493103027, 'learning_rate': 2.9833333333333335e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3402, 'grad_norm': 7.33398962020874, 'learning_rate': 2.9666666666666672e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3814, 'grad_norm': 7.460495948791504, 'learning_rate': 2.95e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3779, 'grad_norm': 8.191845893859863, 'learning_rate': 2.9333333333333336e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4451, 'grad_norm': 6.691037178039551, 'learning_rate': 2.916666666666667e-05, 'epoch': 0.04}\n",
      "{'loss': 3.2824, 'grad_norm': 7.604856967926025, 'learning_rate': 2.9e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4555, 'grad_norm': 7.210885524749756, 'learning_rate': 2.8833333333333334e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3352, 'grad_norm': 7.203238487243652, 'learning_rate': 2.8666666666666668e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3887, 'grad_norm': 6.920904159545898, 'learning_rate': 2.8499999999999998e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4789, 'grad_norm': 7.307106971740723, 'learning_rate': 2.8333333333333335e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4627, 'grad_norm': 6.905856609344482, 'learning_rate': 2.816666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4555, 'grad_norm': 6.743160724639893, 'learning_rate': 2.8000000000000003e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4037, 'grad_norm': 10.617742538452148, 'learning_rate': 2.7833333333333333e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3027, 'grad_norm': 7.5631103515625, 'learning_rate': 2.7666666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3492, 'grad_norm': 7.58022928237915, 'learning_rate': 2.7500000000000004e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3002, 'grad_norm': 7.869373798370361, 'learning_rate': 2.733333333333333e-05, 'epoch': 0.05}\n",
      "{'loss': 3.5162, 'grad_norm': 7.25886869430542, 'learning_rate': 2.716666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3832, 'grad_norm': 7.294155120849609, 'learning_rate': 2.7000000000000002e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3574, 'grad_norm': 7.255176067352295, 'learning_rate': 2.6833333333333333e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4164, 'grad_norm': 6.589298248291016, 'learning_rate': 2.6666666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.349, 'grad_norm': 7.417629241943359, 'learning_rate': 2.6500000000000004e-05, 'epoch': 0.05}\n",
      "{'loss': 3.2666, 'grad_norm': 7.680657386779785, 'learning_rate': 2.633333333333333e-05, 'epoch': 0.05}\n",
      "{'loss': 3.383, 'grad_norm': 7.579665184020996, 'learning_rate': 2.6166666666666668e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3564, 'grad_norm': 7.233595371246338, 'learning_rate': 2.6000000000000002e-05, 'epoch': 0.05}\n",
      "{'loss': 3.2682, 'grad_norm': 6.894739627838135, 'learning_rate': 2.5833333333333336e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3936, 'grad_norm': 7.254997253417969, 'learning_rate': 2.5666666666666666e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4385, 'grad_norm': 9.08654499053955, 'learning_rate': 2.5500000000000003e-05, 'epoch': 0.05}\n",
      "{'loss': 3.298, 'grad_norm': 6.652235984802246, 'learning_rate': 2.5333333333333337e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4414, 'grad_norm': 7.410639762878418, 'learning_rate': 2.5166666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4555, 'grad_norm': 6.834026336669922, 'learning_rate': 2.5e-05, 'epoch': 0.05}\n",
      " 50%|███████████████████▌                   | 1500/3000 [14:54<11:55,  2.10it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.52s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:37<00:13, 13.34s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 31.321912000000008, 'eval_rouge-2': 6.7536760000000005, 'eval_rouge-l': 24.706290000000003, 'eval_bleu-4': 0.03247529644736442, 'eval_runtime': 45.0905, 'eval_samples_per_second': 1.109, 'eval_steps_per_second': 0.089, 'epoch': 0.05}\n",
      " 50%|███████████████████▌                   | 1500/3000 [15:39<11:55,  2.10it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:40<00:00,  9.34s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output/checkpoint-1500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.3459, 'grad_norm': 7.1005964279174805, 'learning_rate': 2.4833333333333335e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3887, 'grad_norm': 8.237688064575195, 'learning_rate': 2.466666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4422, 'grad_norm': 8.04233455657959, 'learning_rate': 2.45e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4027, 'grad_norm': 7.083126068115234, 'learning_rate': 2.4333333333333336e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4945, 'grad_norm': 7.374517917633057, 'learning_rate': 2.4166666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4068, 'grad_norm': 8.385078430175781, 'learning_rate': 2.4e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4674, 'grad_norm': 7.9670491218566895, 'learning_rate': 2.3833333333333334e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4383, 'grad_norm': 7.476895809173584, 'learning_rate': 2.3666666666666668e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5064, 'grad_norm': 9.125017166137695, 'learning_rate': 2.35e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3918, 'grad_norm': 7.107120990753174, 'learning_rate': 2.3333333333333336e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3678, 'grad_norm': 7.8743696212768555, 'learning_rate': 2.3166666666666666e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3709, 'grad_norm': 8.479377746582031, 'learning_rate': 2.3000000000000003e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4705, 'grad_norm': 7.343986988067627, 'learning_rate': 2.2833333333333334e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3201, 'grad_norm': 8.063881874084473, 'learning_rate': 2.2666666666666668e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3693, 'grad_norm': 7.5173563957214355, 'learning_rate': 2.25e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3051, 'grad_norm': 6.936359405517578, 'learning_rate': 2.2333333333333335e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4803, 'grad_norm': 8.537959098815918, 'learning_rate': 2.216666666666667e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3715, 'grad_norm': 7.233067035675049, 'learning_rate': 2.2000000000000003e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3768, 'grad_norm': 7.381922721862793, 'learning_rate': 2.1833333333333333e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5178, 'grad_norm': 6.933073997497559, 'learning_rate': 2.1666666666666667e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4629, 'grad_norm': 7.290614128112793, 'learning_rate': 2.15e-05, 'epoch': 0.06}\n",
      "{'loss': 3.501, 'grad_norm': 7.43897819519043, 'learning_rate': 2.1333333333333335e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4029, 'grad_norm': 7.458011150360107, 'learning_rate': 2.116666666666667e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3959, 'grad_norm': 7.588024139404297, 'learning_rate': 2.1e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4645, 'grad_norm': 7.4663405418396, 'learning_rate': 2.0833333333333336e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4432, 'grad_norm': 7.836616516113281, 'learning_rate': 2.0666666666666666e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3594, 'grad_norm': 8.339995384216309, 'learning_rate': 2.05e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3506, 'grad_norm': 8.119256973266602, 'learning_rate': 2.0333333333333334e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3926, 'grad_norm': 8.300532341003418, 'learning_rate': 2.0166666666666668e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3395, 'grad_norm': 7.852771282196045, 'learning_rate': 2e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3775, 'grad_norm': 8.911588668823242, 'learning_rate': 1.9833333333333335e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3402, 'grad_norm': 7.748927593231201, 'learning_rate': 1.9666666666666666e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5814, 'grad_norm': 7.880917072296143, 'learning_rate': 1.9500000000000003e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3453, 'grad_norm': 8.698152542114258, 'learning_rate': 1.9333333333333333e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4928, 'grad_norm': 8.92833137512207, 'learning_rate': 1.9166666666666667e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3838, 'grad_norm': 7.530654430389404, 'learning_rate': 1.9e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3162, 'grad_norm': 8.092974662780762, 'learning_rate': 1.8833333333333335e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3068, 'grad_norm': 7.494999408721924, 'learning_rate': 1.866666666666667e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3998, 'grad_norm': 7.309693813323975, 'learning_rate': 1.85e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3705, 'grad_norm': 7.830413341522217, 'learning_rate': 1.8333333333333333e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3939, 'grad_norm': 8.077669143676758, 'learning_rate': 1.8166666666666667e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4844, 'grad_norm': 7.426691055297852, 'learning_rate': 1.8e-05, 'epoch': 0.07}\n",
      "{'loss': 3.2818, 'grad_norm': 7.805764198303223, 'learning_rate': 1.7833333333333334e-05, 'epoch': 0.07}\n",
      "{'loss': 3.5006, 'grad_norm': 7.788572311401367, 'learning_rate': 1.7666666666666668e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3609, 'grad_norm': 6.960601806640625, 'learning_rate': 1.75e-05, 'epoch': 0.07}\n",
      "{'loss': 3.2873, 'grad_norm': 8.637812614440918, 'learning_rate': 1.7333333333333336e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3738, 'grad_norm': 7.579611778259277, 'learning_rate': 1.7166666666666666e-05, 'epoch': 0.07}\n",
      "{'loss': 3.243, 'grad_norm': 7.651642799377441, 'learning_rate': 1.7000000000000003e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4195, 'grad_norm': 7.1403350830078125, 'learning_rate': 1.6833333333333334e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4664, 'grad_norm': 8.01436710357666, 'learning_rate': 1.6666666666666667e-05, 'epoch': 0.07}\n",
      " 67%|██████████████████████████             | 2000/3000 [20:06<08:40,  1.92it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.88s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:07<00:02,  2.51s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 30.95867, 'eval_rouge-2': 6.647781999999999, 'eval_rouge-l': 23.094668000000002, 'eval_bleu-4': 0.03188818645885937, 'eval_runtime': 29.9791, 'eval_samples_per_second': 1.668, 'eval_steps_per_second': 0.133, 'epoch': 0.07}\n",
      " 67%|██████████████████████████             | 2000/3000 [20:36<08:40,  1.92it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:09<00:00,  2.50s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output/checkpoint-2000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.3914, 'grad_norm': 8.718515396118164, 'learning_rate': 1.65e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4973, 'grad_norm': 7.484218597412109, 'learning_rate': 1.6333333333333335e-05, 'epoch': 0.07}\n",
      "{'loss': 3.5592, 'grad_norm': 8.820233345031738, 'learning_rate': 1.6166666666666665e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4912, 'grad_norm': 8.359233856201172, 'learning_rate': 1.6000000000000003e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3662, 'grad_norm': 8.200981140136719, 'learning_rate': 1.5833333333333333e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3275, 'grad_norm': 7.662619590759277, 'learning_rate': 1.5666666666666667e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4379, 'grad_norm': 8.191414833068848, 'learning_rate': 1.55e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4186, 'grad_norm': 8.248183250427246, 'learning_rate': 1.5333333333333334e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4379, 'grad_norm': 7.562425136566162, 'learning_rate': 1.5166666666666668e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3572, 'grad_norm': 7.702509880065918, 'learning_rate': 1.5e-05, 'epoch': 0.07}\n",
      "{'loss': 3.2961, 'grad_norm': 7.67454195022583, 'learning_rate': 1.4833333333333336e-05, 'epoch': 0.07}\n",
      "{'loss': 3.5797, 'grad_norm': 7.960419178009033, 'learning_rate': 1.4666666666666668e-05, 'epoch': 0.07}\n",
      "{'loss': 3.25, 'grad_norm': 7.760265827178955, 'learning_rate': 1.45e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3609, 'grad_norm': 8.248686790466309, 'learning_rate': 1.4333333333333334e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3996, 'grad_norm': 7.432130813598633, 'learning_rate': 1.4166666666666668e-05, 'epoch': 0.08}\n",
      "{'loss': 3.5189, 'grad_norm': 8.039673805236816, 'learning_rate': 1.4000000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3926, 'grad_norm': 6.821283340454102, 'learning_rate': 1.3833333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4188, 'grad_norm': 7.899528503417969, 'learning_rate': 1.3666666666666666e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3574, 'grad_norm': 7.773036003112793, 'learning_rate': 1.3500000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4393, 'grad_norm': 7.671910762786865, 'learning_rate': 1.3333333333333333e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4521, 'grad_norm': 6.840303421020508, 'learning_rate': 1.3166666666666665e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4209, 'grad_norm': 7.795290946960449, 'learning_rate': 1.3000000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4188, 'grad_norm': 8.17656421661377, 'learning_rate': 1.2833333333333333e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3705, 'grad_norm': 8.185439109802246, 'learning_rate': 1.2666666666666668e-05, 'epoch': 0.08}\n",
      "{'loss': 3.2385, 'grad_norm': 8.417939186096191, 'learning_rate': 1.25e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3627, 'grad_norm': 7.919494152069092, 'learning_rate': 1.2333333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4293, 'grad_norm': 8.814216613769531, 'learning_rate': 1.2166666666666668e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4664, 'grad_norm': 7.672926902770996, 'learning_rate': 1.2e-05, 'epoch': 0.08}\n",
      "{'loss': 3.2939, 'grad_norm': 8.19534969329834, 'learning_rate': 1.1833333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3572, 'grad_norm': 8.336996078491211, 'learning_rate': 1.1666666666666668e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3174, 'grad_norm': 8.38010311126709, 'learning_rate': 1.1500000000000002e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3287, 'grad_norm': 8.505497932434082, 'learning_rate': 1.1333333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3664, 'grad_norm': 8.873369216918945, 'learning_rate': 1.1166666666666668e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3666, 'grad_norm': 7.828722953796387, 'learning_rate': 1.1000000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.2697, 'grad_norm': 8.600749969482422, 'learning_rate': 1.0833333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3791, 'grad_norm': 8.11614990234375, 'learning_rate': 1.0666666666666667e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3543, 'grad_norm': 7.98288631439209, 'learning_rate': 1.05e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4924, 'grad_norm': 8.52971076965332, 'learning_rate': 1.0333333333333333e-05, 'epoch': 0.08}\n",
      "{'loss': 3.2391, 'grad_norm': 8.49200439453125, 'learning_rate': 1.0166666666666667e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4617, 'grad_norm': 7.913151741027832, 'learning_rate': 1e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4574, 'grad_norm': 8.158500671386719, 'learning_rate': 9.833333333333333e-06, 'epoch': 0.08}\n",
      "{'loss': 3.2752, 'grad_norm': 8.08176326751709, 'learning_rate': 9.666666666666667e-06, 'epoch': 0.08}\n",
      "{'loss': 3.3703, 'grad_norm': 7.351267337799072, 'learning_rate': 9.5e-06, 'epoch': 0.08}\n",
      "{'loss': 3.3783, 'grad_norm': 7.995841979980469, 'learning_rate': 9.333333333333334e-06, 'epoch': 0.09}\n",
      "{'loss': 3.2754, 'grad_norm': 7.790261745452881, 'learning_rate': 9.166666666666666e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3117, 'grad_norm': 7.884588241577148, 'learning_rate': 9e-06, 'epoch': 0.09}\n",
      "{'loss': 3.2615, 'grad_norm': 8.908665657043457, 'learning_rate': 8.833333333333334e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4338, 'grad_norm': 7.456766128540039, 'learning_rate': 8.666666666666668e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4707, 'grad_norm': 7.750051975250244, 'learning_rate': 8.500000000000002e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3936, 'grad_norm': 9.47743034362793, 'learning_rate': 8.333333333333334e-06, 'epoch': 0.09}\n",
      " 83%|████████████████████████████████▌      | 2500/3000 [25:03<04:22,  1.90it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.55s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:22<00:06,  6.93s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 31.622515999999997, 'eval_rouge-2': 7.153186, 'eval_rouge-l': 23.837428, 'eval_bleu-4': 0.03315202964085586, 'eval_runtime': 60.7275, 'eval_samples_per_second': 0.823, 'eval_steps_per_second': 0.066, 'epoch': 0.09}\n",
      " 83%|████████████████████████████████▌      | 2500/3000 [26:04<04:22,  1.90it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:41<00:00, 11.25s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output/checkpoint-2500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.3016, 'grad_norm': 8.571910858154297, 'learning_rate': 8.166666666666668e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3416, 'grad_norm': 10.362907409667969, 'learning_rate': 8.000000000000001e-06, 'epoch': 0.09}\n",
      "{'loss': 3.249, 'grad_norm': 8.124931335449219, 'learning_rate': 7.833333333333333e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3984, 'grad_norm': 8.391274452209473, 'learning_rate': 7.666666666666667e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3902, 'grad_norm': 7.8629961013793945, 'learning_rate': 7.5e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4057, 'grad_norm': 8.372814178466797, 'learning_rate': 7.333333333333334e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4709, 'grad_norm': 8.014429092407227, 'learning_rate': 7.166666666666667e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4801, 'grad_norm': 8.443587303161621, 'learning_rate': 7.000000000000001e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3793, 'grad_norm': 8.329874992370605, 'learning_rate': 6.833333333333333e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4801, 'grad_norm': 8.6181058883667, 'learning_rate': 6.666666666666667e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3572, 'grad_norm': 8.033228874206543, 'learning_rate': 6.5000000000000004e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4242, 'grad_norm': 7.679358005523682, 'learning_rate': 6.333333333333334e-06, 'epoch': 0.09}\n",
      "{'loss': 3.5262, 'grad_norm': 7.679103851318359, 'learning_rate': 6.166666666666667e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4492, 'grad_norm': 8.605116844177246, 'learning_rate': 6e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4051, 'grad_norm': 8.104225158691406, 'learning_rate': 5.833333333333334e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3525, 'grad_norm': 7.827607154846191, 'learning_rate': 5.666666666666667e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4078, 'grad_norm': 8.56901741027832, 'learning_rate': 5.500000000000001e-06, 'epoch': 0.09}\n",
      "{'loss': 3.2697, 'grad_norm': 7.6554274559021, 'learning_rate': 5.333333333333334e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4764, 'grad_norm': 8.653867721557617, 'learning_rate': 5.166666666666667e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4549, 'grad_norm': 8.983596801757812, 'learning_rate': 5e-06, 'epoch': 0.09}\n",
      "{'loss': 3.4303, 'grad_norm': 8.282877922058105, 'learning_rate': 4.833333333333333e-06, 'epoch': 0.09}\n",
      "{'loss': 3.2535, 'grad_norm': 7.715260982513428, 'learning_rate': 4.666666666666667e-06, 'epoch': 0.09}\n",
      "{'loss': 3.3799, 'grad_norm': 8.047174453735352, 'learning_rate': 4.5e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3844, 'grad_norm': 8.057994842529297, 'learning_rate': 4.333333333333334e-06, 'epoch': 0.1}\n",
      "{'loss': 3.4594, 'grad_norm': 9.007599830627441, 'learning_rate': 4.166666666666667e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3982, 'grad_norm': 8.218299865722656, 'learning_rate': 4.000000000000001e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3477, 'grad_norm': 8.322694778442383, 'learning_rate': 3.833333333333334e-06, 'epoch': 0.1}\n",
      "{'loss': 3.259, 'grad_norm': 8.675442695617676, 'learning_rate': 3.666666666666667e-06, 'epoch': 0.1}\n",
      "{'loss': 3.2781, 'grad_norm': 8.229736328125, 'learning_rate': 3.5000000000000004e-06, 'epoch': 0.1}\n",
      "{'loss': 3.2393, 'grad_norm': 7.612515926361084, 'learning_rate': 3.3333333333333333e-06, 'epoch': 0.1}\n",
      "{'loss': 3.4473, 'grad_norm': 7.919465065002441, 'learning_rate': 3.166666666666667e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3738, 'grad_norm': 7.950122356414795, 'learning_rate': 3e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3875, 'grad_norm': 7.962871074676514, 'learning_rate': 2.8333333333333335e-06, 'epoch': 0.1}\n",
      "{'loss': 3.4396, 'grad_norm': 9.072876930236816, 'learning_rate': 2.666666666666667e-06, 'epoch': 0.1}\n",
      "{'loss': 3.4047, 'grad_norm': 8.54981803894043, 'learning_rate': 2.5e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3424, 'grad_norm': 8.343316078186035, 'learning_rate': 2.3333333333333336e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3746, 'grad_norm': 8.674938201904297, 'learning_rate': 2.166666666666667e-06, 'epoch': 0.1}\n",
      "{'loss': 3.5117, 'grad_norm': 9.027300834655762, 'learning_rate': 2.0000000000000003e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3037, 'grad_norm': 8.128785133361816, 'learning_rate': 1.8333333333333335e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3289, 'grad_norm': 8.933310508728027, 'learning_rate': 1.6666666666666667e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3082, 'grad_norm': 7.763113021850586, 'learning_rate': 1.5e-06, 'epoch': 0.1}\n",
      "{'loss': 3.25, 'grad_norm': 7.3353271484375, 'learning_rate': 1.3333333333333334e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3633, 'grad_norm': 8.749879837036133, 'learning_rate': 1.1666666666666668e-06, 'epoch': 0.1}\n",
      "{'loss': 3.2561, 'grad_norm': 8.39769458770752, 'learning_rate': 1.0000000000000002e-06, 'epoch': 0.1}\n",
      "{'loss': 3.3801, 'grad_norm': 8.015907287597656, 'learning_rate': 8.333333333333333e-07, 'epoch': 0.1}\n",
      "{'loss': 3.2121, 'grad_norm': 8.978567123413086, 'learning_rate': 6.666666666666667e-07, 'epoch': 0.1}\n",
      "{'loss': 3.4488, 'grad_norm': 8.839228630065918, 'learning_rate': 5.000000000000001e-07, 'epoch': 0.1}\n",
      "{'loss': 3.4293, 'grad_norm': 8.872966766357422, 'learning_rate': 3.3333333333333335e-07, 'epoch': 0.1}\n",
      "{'loss': 3.4764, 'grad_norm': 8.217439651489258, 'learning_rate': 1.6666666666666668e-07, 'epoch': 0.1}\n",
      "{'loss': 3.3686, 'grad_norm': 7.793020248413086, 'learning_rate': 0.0, 'epoch': 0.1}\n",
      "100%|███████████████████████████████████████| 3000/3000 [30:32<00:00,  1.98it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.75s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:23<00:07,  7.17s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.317054000000006, 'eval_rouge-2': 7.253375999999999, 'eval_rouge-l': 24.847366, 'eval_bleu-4': 0.03522588168661786, 'eval_runtime': 45.5505, 'eval_samples_per_second': 1.098, 'eval_steps_per_second': 0.088, 'epoch': 0.1}\n",
      "100%|███████████████████████████████████████| 3000/3000 [31:17<00:00,  1.98it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:41<00:00, 11.47s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output/checkpoint-3000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "\n",
      "\n",
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
      "\n",
      "\n",
      "{'train_runtime': 1878.8448, 'train_samples_per_second': 6.387, 'train_steps_per_second': 1.597, 'train_loss': 3.4465481770833333, 'epoch': 0.1}\n",
      "100%|███████████████████████████████████████| 3000/3000 [31:18<00:00,  1.60it/s]\n",
      "/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Prediction *****\n",
      "  Num examples = 1070\n",
      "  Batch size = 16\n",
      "100%|███████████████████████████████████████████| 67/67 [15:58<00:00, 14.30s/it]\n"
     ]
    }
   ],
   "source": [
    "!python finetune_hf.py  data/AdvertiseGen_fix THUDM/chatglm3-6b configs/lora.yaml"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "750b2f23-f728-4504-845a-2bb3a303e4c6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "bb23741a-ca8f-4b97-871b-cab81790adee",
   "metadata": {},
   "source": [
    "### 观察GPU显存占用\n",
    "\n",
    "```bash\n",
    "Every 2.0s: nvidia-smi                                                                                                                                                          Thu Apr  4 04:18:49 2024\n",
    "\n",
    "Thu Apr  4 04:18:49 2024\n",
    "+-----------------------------------------------------------------------------------------+\n",
    "| NVIDIA-SMI 550.54.15              Driver Version: 550.54.15\t   CUDA Version: 12.4     |\n",
    "|-----------------------------------------+------------------------+----------------------+\n",
    "| GPU  Name                 Persistence-M | Bus-Id          Disp.A | Volatile Uncorr. ECC |\n",
    "| Fan  Temp   Perf          Pwr:Usage/Cap |           Memory-Usage | GPU-Util  Compute M. |\n",
    "|                                         |                        |               MIG M. |\n",
    "|=========================================+========================+======================|\n",
    "|   0  Tesla V100S-PCIE-32GB          Off |   00000000:00:03.0 Off |                    0 |\n",
    "| N/A   60C    P0            253W /  250W |   19643MiB /  32768MiB |\t 93%\t  Default |\n",
    "|                                         |                        |                  N/A |\n",
    "+-----------------------------------------+------------------------+----------------------+\n",
    "\n",
    "+-----------------------------------------------------------------------------------------+\n",
    "| Processes:                                                                              |\n",
    "|  GPU   GI   CI        PID   Type   Process name                              GPU Memory |\n",
    "|        ID   ID                                                               Usage\t  |\n",
    "|=========================================================================================|\n",
    "|    0   N/A  N/A       950      C   python                                      19640MiB |\n",
    "+-----------------------------------------------------------------------------------------+\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bb4011ee-9b61-42c1-b5b5-d102b0e7367f",
   "metadata": {},
   "source": [
    "#### 使用自定义配置\n",
    "\n",
    "max_steps: 15000\n",
    "# needed to be fit for the dataset\n",
    "learning_rate: 5e-5\n",
    "# settings for data loading\n",
    "per_device_train_batch_size: 8\n",
    "\n",
    "peft_config:\n",
    "  peft_type: LORA\n",
    "  task_type: CAUSAL_LM\n",
    "  r: 16\n",
    "  lora_alpha: 64\n",
    "  lora_dropout: 0.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d27a0b1-cd33-4616-af41-6bd318c75e6d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:04<00:00,  1.57it/s]\n",
      "trainable params: 3,899,392 || all params: 6,247,483,392 || trainable%: 0.06241540401681151\n",
      "--> Model\n",
      "\n",
      "--> model has 3.899392M params\n",
      "\n",
      "train_dataset: Dataset({\n",
      "    features: ['input_ids', 'labels'],\n",
      "    num_rows: 114599\n",
      "})\n",
      "val_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "test_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "--> Sanity check\n",
      "           '[gMASK]': 64790 -> -100\n",
      "               'sop': 64792 -> -100\n",
      "          '<|user|>': 64795 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '\\n': 13 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '类型': 33467 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '版': 55090 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '宽松': 40833 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '风格': 32799 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '性感': 40589 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '图案': 37505 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '线条': 37216 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '阔': 56529 -> -100\n",
      "                 '腿': 56158 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "     '<|assistant|>': 64796 -> -100\n",
      "                  '': 30910 -> 30910\n",
      "                '\\n': 13 -> 13\n",
      "                  '': 30910 -> 30910\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '阔': 56529 -> 56529\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '这': 54551 -> 54551\n",
      "                '两年': 33808 -> 33808\n",
      "                '真的': 32041 -> 32041\n",
      "                 '吸': 55360 -> 55360\n",
      "                 '粉': 55486 -> 55486\n",
      "                '不少': 32138 -> 32138\n",
      "                 '，': 31123 -> 31123\n",
      "                '明星': 32943 -> 32943\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '达': 54880 -> 54880\n",
      "                '人的': 31664 -> 31664\n",
      "                '心头': 46565 -> 46565\n",
      "                 '爱': 54799 -> 54799\n",
      "                 '。': 31155 -> 31155\n",
      "                '毕竟': 33051 -> 33051\n",
      "                 '好': 54591 -> 54591\n",
      "                 '穿': 55432 -> 55432\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '，': 31123 -> 31123\n",
      "                 '谁': 55622 -> 55622\n",
      "                '都能': 32904 -> 32904\n",
      "                 '穿': 55432 -> 55432\n",
      "                 '出': 54557 -> 54557\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '长': 54625 -> 54625\n",
      "                 '2': 30943 -> 30943\n",
      "                 '米': 55055 -> 55055\n",
      "               '的效果': 35590 -> 35590\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '，': 31123 -> 31123\n",
      "               '当然是': 48466 -> 48466\n",
      "                 '遮': 57148 -> 57148\n",
      "                 '肉': 55343 -> 55343\n",
      "                 '小': 54603 -> 54603\n",
      "                '能手': 49355 -> 49355\n",
      "                 '啊': 55674 -> 55674\n",
      "                 '。': 31155 -> 31155\n",
      "                '上身': 51605 -> 51605\n",
      "                 '随': 55119 -> 55119\n",
      "                 '性': 54642 -> 54642\n",
      "                '自然': 31799 -> 31799\n",
      "                 '不': 54535 -> 54535\n",
      "                 '拘': 57036 -> 57036\n",
      "                 '束': 55625 -> 55625\n",
      "                 '，': 31123 -> 31123\n",
      "                '面料': 46839 -> 46839\n",
      "                 '亲': 55113 -> 55113\n",
      "                 '肤': 56089 -> 56089\n",
      "                '舒适': 33894 -> 33894\n",
      "                 '贴': 55778 -> 55778\n",
      "                '身体': 31902 -> 31902\n",
      "                 '验': 55017 -> 55017\n",
      "                 '感': 54706 -> 54706\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '哒': 59230 -> 59230\n",
      "                 '。': 31155 -> 31155\n",
      "                 '系': 54712 -> 54712\n",
      "                 '带': 54882 -> 54882\n",
      "                '部分': 31726 -> 31726\n",
      "                '增加': 31917 -> 31917\n",
      "                '设计': 31735 -> 31735\n",
      "                '看点': 45032 -> 45032\n",
      "                 '，': 31123 -> 31123\n",
      "                 '还': 54656 -> 54656\n",
      "                 '让': 54772 -> 54772\n",
      "                '单品': 46539 -> 46539\n",
      "               '的设计': 34481 -> 34481\n",
      "                 '感': 54706 -> 54706\n",
      "                '更强': 43084 -> 43084\n",
      "                 '。': 31155 -> 31155\n",
      "                '腿部': 46799 -> 46799\n",
      "                '线条': 37216 -> 37216\n",
      "                 '若': 55351 -> 55351\n",
      "                 '隐': 55733 -> 55733\n",
      "                 '若': 55351 -> 55351\n",
      "                 '现': 54600 -> 54600\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                '性感': 40589 -> 40589\n",
      "                 '撩': 58521 -> 58521\n",
      "                 '人': 54533 -> 54533\n",
      "                 '。': 31155 -> 31155\n",
      "                '颜色': 33692 -> 33692\n",
      "                 '敲': 57004 -> 57004\n",
      "                '温柔': 34678 -> 34678\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                 '与': 54619 -> 54619\n",
      "                '裤子': 44722 -> 44722\n",
      "                '本身': 32754 -> 32754\n",
      "                 '所': 54626 -> 54626\n",
      "                '呈现': 33169 -> 33169\n",
      "               '的风格': 48084 -> 48084\n",
      "                '有点': 33149 -> 33149\n",
      "                 '反': 54955 -> 54955\n",
      "                 '差': 55342 -> 55342\n",
      "                 '萌': 56842 -> 56842\n",
      "                 '。': 31155 -> 31155\n",
      "                  '': 2 -> 2\n",
      "/root/miniconda3/lib/python3.12/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead: \n",
      "dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)\n",
      "  warnings.warn(\n",
      "Detected kernel version 3.10.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n",
      "max_steps is given, it will override any value given in num_train_epochs\n",
      "/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running training *****\n",
      "  Num examples = 114,599\n",
      "  Num Epochs = 2\n",
      "  Instantaneous batch size per device = 8\n",
      "  Total train batch size (w. parallel, distributed & accumulation) = 8\n",
      "  Gradient Accumulation steps = 1\n",
      "  Total optimization steps = 15,000\n",
      "  Number of trainable parameters = 3,899,392\n",
      "{'loss': 4.6703, 'grad_norm': 2.8197057247161865, 'learning_rate': 7.994666666666666e-05, 'epoch': 0.0}\n",
      "{'loss': 4.1752, 'grad_norm': 2.6082141399383545, 'learning_rate': 7.989333333333334e-05, 'epoch': 0.0}\n",
      "{'loss': 3.9236, 'grad_norm': 2.546117067337036, 'learning_rate': 7.984000000000001e-05, 'epoch': 0.0}\n",
      "{'loss': 3.7559, 'grad_norm': 2.319216251373291, 'learning_rate': 7.978666666666667e-05, 'epoch': 0.0}\n",
      "{'loss': 3.673, 'grad_norm': 2.674680709838867, 'learning_rate': 7.973333333333334e-05, 'epoch': 0.0}\n",
      "{'loss': 3.7715, 'grad_norm': 3.025786876678467, 'learning_rate': 7.968000000000001e-05, 'epoch': 0.0}\n",
      "{'loss': 3.7012, 'grad_norm': 3.4497127532958984, 'learning_rate': 7.962666666666667e-05, 'epoch': 0.0}\n",
      "{'loss': 3.7754, 'grad_norm': 3.328488826751709, 'learning_rate': 7.957333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.643, 'grad_norm': 3.324289321899414, 'learning_rate': 7.952000000000001e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6227, 'grad_norm': 3.2670114040374756, 'learning_rate': 7.946666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6717, 'grad_norm': 3.3788836002349854, 'learning_rate': 7.941333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6404, 'grad_norm': 3.407684564590454, 'learning_rate': 7.936000000000001e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6197, 'grad_norm': 3.8688929080963135, 'learning_rate': 7.930666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6553, 'grad_norm': 3.7101776599884033, 'learning_rate': 7.925333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6486, 'grad_norm': 3.511148691177368, 'learning_rate': 7.92e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6111, 'grad_norm': 4.585350036621094, 'learning_rate': 7.914666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5291, 'grad_norm': 3.681063175201416, 'learning_rate': 7.909333333333334e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6254, 'grad_norm': 3.9506309032440186, 'learning_rate': 7.904e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5213, 'grad_norm': 3.7870750427246094, 'learning_rate': 7.898666666666667e-05, 'epoch': 0.01}\n",
      "{'loss': 3.5701, 'grad_norm': 3.83980393409729, 'learning_rate': 7.893333333333335e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6727, 'grad_norm': 3.8762078285217285, 'learning_rate': 7.888e-05, 'epoch': 0.01}\n",
      "{'loss': 3.6168, 'grad_norm': 4.497057914733887, 'learning_rate': 7.882666666666668e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5006, 'grad_norm': 3.898608684539795, 'learning_rate': 7.877333333333335e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5541, 'grad_norm': 4.896329402923584, 'learning_rate': 7.872e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5984, 'grad_norm': 4.32712459564209, 'learning_rate': 7.866666666666666e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5033, 'grad_norm': 4.111212730407715, 'learning_rate': 7.861333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.6145, 'grad_norm': 4.459745407104492, 'learning_rate': 7.856000000000001e-05, 'epoch': 0.02}\n",
      "{'loss': 3.674, 'grad_norm': 3.997986316680908, 'learning_rate': 7.850666666666666e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5131, 'grad_norm': 4.0927534103393555, 'learning_rate': 7.845333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5336, 'grad_norm': 4.924872875213623, 'learning_rate': 7.840000000000001e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5299, 'grad_norm': 4.67594051361084, 'learning_rate': 7.834666666666667e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5387, 'grad_norm': 4.460055351257324, 'learning_rate': 7.829333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.591, 'grad_norm': 4.175909519195557, 'learning_rate': 7.824000000000001e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4982, 'grad_norm': 4.375810146331787, 'learning_rate': 7.818666666666668e-05, 'epoch': 0.02}\n",
      "{'loss': 3.4432, 'grad_norm': 4.362689971923828, 'learning_rate': 7.813333333333334e-05, 'epoch': 0.02}\n",
      "{'loss': 3.5912, 'grad_norm': 4.634555339813232, 'learning_rate': 7.808000000000001e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4799, 'grad_norm': 4.332132339477539, 'learning_rate': 7.802666666666668e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5154, 'grad_norm': 3.9648447036743164, 'learning_rate': 7.797333333333334e-05, 'epoch': 0.03}\n",
      "{'loss': 3.6303, 'grad_norm': 4.619975566864014, 'learning_rate': 7.792e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5004, 'grad_norm': 4.8970746994018555, 'learning_rate': 7.786666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4021, 'grad_norm': 4.21634578704834, 'learning_rate': 7.781333333333334e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5092, 'grad_norm': 4.5227274894714355, 'learning_rate': 7.776e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5961, 'grad_norm': 4.429263591766357, 'learning_rate': 7.770666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.468, 'grad_norm': 4.580286502838135, 'learning_rate': 7.765333333333334e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4379, 'grad_norm': 4.95194673538208, 'learning_rate': 7.76e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5334, 'grad_norm': 4.535139083862305, 'learning_rate': 7.754666666666667e-05, 'epoch': 0.03}\n",
      "{'loss': 3.6143, 'grad_norm': 4.756340026855469, 'learning_rate': 7.749333333333335e-05, 'epoch': 0.03}\n",
      "{'loss': 3.4709, 'grad_norm': 4.783182621002197, 'learning_rate': 7.744e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5059, 'grad_norm': 5.020172595977783, 'learning_rate': 7.738666666666668e-05, 'epoch': 0.03}\n",
      "{'loss': 3.5057, 'grad_norm': 4.463188171386719, 'learning_rate': 7.733333333333333e-05, 'epoch': 0.03}\n",
      "  3%|█▏                                   | 500/15000 [06:49<3:24:12,  1.18it/s]***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.84s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:06<00:02,  2.24s/it]\u001b[A\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:08<00:00,  2.15s/it]\u001b[ABuilding prefix dict from the default dictionary ...\n",
      "Loading model from cache /tmp/jieba.cache\n",
      "Loading model cost 0.784 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "                                                                                \n",
      "\u001b[A{'eval_rouge-1': 31.746892000000003, 'eval_rouge-2': 6.642418000000001, 'eval_rouge-l': 25.459874, 'eval_bleu-4': 0.031141934609226582, 'eval_runtime': 29.952, 'eval_samples_per_second': 1.669, 'eval_steps_per_second': 0.134, 'epoch': 0.03}\n",
      "  3%|█▏                                   | 500/15000 [07:19<3:24:12,  1.18it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:09<00:00,  2.15s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.5494, 'grad_norm': 5.30453634262085, 'learning_rate': 7.728e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5945, 'grad_norm': 4.413326740264893, 'learning_rate': 7.722666666666668e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4512, 'grad_norm': 4.983961582183838, 'learning_rate': 7.717333333333334e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5164, 'grad_norm': 4.480841159820557, 'learning_rate': 7.712000000000001e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5877, 'grad_norm': 4.640350341796875, 'learning_rate': 7.706666666666668e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5121, 'grad_norm': 4.9427809715271, 'learning_rate': 7.701333333333334e-05, 'epoch': 0.04}\n",
      "{'loss': 3.484, 'grad_norm': 5.114394187927246, 'learning_rate': 7.696000000000001e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4186, 'grad_norm': 4.487595558166504, 'learning_rate': 7.690666666666668e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5332, 'grad_norm': 4.267470836639404, 'learning_rate': 7.685333333333334e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4785, 'grad_norm': 4.2621307373046875, 'learning_rate': 7.680000000000001e-05, 'epoch': 0.04}\n",
      "{'loss': 3.3738, 'grad_norm': 4.729582786560059, 'learning_rate': 7.674666666666668e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4514, 'grad_norm': 4.782769680023193, 'learning_rate': 7.669333333333334e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4508, 'grad_norm': 5.186783313751221, 'learning_rate': 7.664e-05, 'epoch': 0.04}\n",
      "{'loss': 3.4781, 'grad_norm': 5.03665828704834, 'learning_rate': 7.658666666666667e-05, 'epoch': 0.04}\n",
      "{'loss': 3.5277, 'grad_norm': 4.633838176727295, 'learning_rate': 7.653333333333334e-05, 'epoch': 0.05}\n",
      "{'loss': 3.5412, 'grad_norm': 4.833538055419922, 'learning_rate': 7.648e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4207, 'grad_norm': 5.077850818634033, 'learning_rate': 7.642666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4078, 'grad_norm': 4.987514495849609, 'learning_rate': 7.637333333333334e-05, 'epoch': 0.05}\n",
      "{'loss': 3.5391, 'grad_norm': 5.184547424316406, 'learning_rate': 7.632e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4668, 'grad_norm': 3.915761709213257, 'learning_rate': 7.626666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3973, 'grad_norm': 4.378795623779297, 'learning_rate': 7.621333333333334e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4629, 'grad_norm': 4.646332263946533, 'learning_rate': 7.616e-05, 'epoch': 0.05}\n",
      "{'loss': 3.3838, 'grad_norm': 4.582601547241211, 'learning_rate': 7.610666666666667e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4717, 'grad_norm': 4.35571813583374, 'learning_rate': 7.605333333333333e-05, 'epoch': 0.05}\n",
      "{'loss': 3.5244, 'grad_norm': 4.643686771392822, 'learning_rate': 7.6e-05, 'epoch': 0.05}\n",
      "{'loss': 3.4334, 'grad_norm': 4.716979503631592, 'learning_rate': 7.594666666666668e-05, 'epoch': 0.05}\n",
      "{'loss': 3.5014, 'grad_norm': 4.51218318939209, 'learning_rate': 7.589333333333333e-05, 'epoch': 0.05}\n",
      "{'loss': 3.5182, 'grad_norm': 5.130342960357666, 'learning_rate': 7.584e-05, 'epoch': 0.05}\n",
      "{'loss': 3.5484, 'grad_norm': 4.859909534454346, 'learning_rate': 7.578666666666668e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5348, 'grad_norm': 4.205305576324463, 'learning_rate': 7.573333333333334e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4488, 'grad_norm': 4.364794731140137, 'learning_rate': 7.568000000000001e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4773, 'grad_norm': 5.23344612121582, 'learning_rate': 7.562666666666668e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4176, 'grad_norm': 7.884349822998047, 'learning_rate': 7.557333333333334e-05, 'epoch': 0.06}\n",
      "{'loss': 3.507, 'grad_norm': 4.667613506317139, 'learning_rate': 7.552000000000001e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5035, 'grad_norm': 4.13116979598999, 'learning_rate': 7.546666666666668e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5617, 'grad_norm': 4.480526447296143, 'learning_rate': 7.541333333333334e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4744, 'grad_norm': 4.474471569061279, 'learning_rate': 7.536e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5375, 'grad_norm': 4.332697868347168, 'learning_rate': 7.530666666666667e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4396, 'grad_norm': 4.543668746948242, 'learning_rate': 7.525333333333334e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4441, 'grad_norm': 4.988092422485352, 'learning_rate': 7.52e-05, 'epoch': 0.06}\n",
      "{'loss': 3.4359, 'grad_norm': 4.413886070251465, 'learning_rate': 7.514666666666667e-05, 'epoch': 0.06}\n",
      "{'loss': 3.5434, 'grad_norm': 5.127940654754639, 'learning_rate': 7.509333333333334e-05, 'epoch': 0.06}\n",
      "{'loss': 3.508, 'grad_norm': 4.2591657638549805, 'learning_rate': 7.504e-05, 'epoch': 0.06}\n",
      "{'loss': 3.3809, 'grad_norm': 4.529130458831787, 'learning_rate': 7.498666666666667e-05, 'epoch': 0.07}\n",
      "{'loss': 3.452, 'grad_norm': 4.664702415466309, 'learning_rate': 7.493333333333334e-05, 'epoch': 0.07}\n",
      "{'loss': 3.499, 'grad_norm': 4.388268947601318, 'learning_rate': 7.488000000000001e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4721, 'grad_norm': 4.647937774658203, 'learning_rate': 7.482666666666667e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3916, 'grad_norm': 5.318851470947266, 'learning_rate': 7.477333333333333e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3756, 'grad_norm': 4.927263259887695, 'learning_rate': 7.472000000000002e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4994, 'grad_norm': 5.246305465698242, 'learning_rate': 7.466666666666667e-05, 'epoch': 0.07}\n",
      "  7%|██▍                                 | 1000/15000 [14:04<3:10:08,  1.23it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.79s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:23<00:07,  7.15s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 30.731166, 'eval_rouge-2': 6.463966000000002, 'eval_rouge-l': 22.952237999999994, 'eval_bleu-4': 0.03026846235081693, 'eval_runtime': 61.9925, 'eval_samples_per_second': 0.807, 'eval_steps_per_second': 0.065, 'epoch': 0.07}\n",
      "  7%|██▍                                 | 1000/15000 [15:06<3:10:08,  1.23it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:41<00:00, 11.45s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-1000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.4949, 'grad_norm': 4.452911853790283, 'learning_rate': 7.461333333333333e-05, 'epoch': 0.07}\n",
      "{'loss': 3.5934, 'grad_norm': 4.824969291687012, 'learning_rate': 7.456e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4143, 'grad_norm': 4.79075813293457, 'learning_rate': 7.450666666666668e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4986, 'grad_norm': 4.8907060623168945, 'learning_rate': 7.445333333333333e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4566, 'grad_norm': 4.839325904846191, 'learning_rate': 7.44e-05, 'epoch': 0.07}\n",
      "{'loss': 3.4965, 'grad_norm': 4.882376670837402, 'learning_rate': 7.434666666666668e-05, 'epoch': 0.07}\n",
      "{'loss': 3.3762, 'grad_norm': 4.637453079223633, 'learning_rate': 7.429333333333334e-05, 'epoch': 0.07}\n",
      "{'loss': 3.5197, 'grad_norm': 5.4930548667907715, 'learning_rate': 7.424000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4578, 'grad_norm': 4.83154821395874, 'learning_rate': 7.418666666666668e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4539, 'grad_norm': 4.709105014801025, 'learning_rate': 7.413333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4992, 'grad_norm': 4.469064712524414, 'learning_rate': 7.408000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4502, 'grad_norm': 4.569152355194092, 'learning_rate': 7.402666666666667e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3738, 'grad_norm': 4.68726921081543, 'learning_rate': 7.397333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.5236, 'grad_norm': 4.854297637939453, 'learning_rate': 7.392000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3861, 'grad_norm': 4.461036205291748, 'learning_rate': 7.386666666666667e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3904, 'grad_norm': 5.231841087341309, 'learning_rate': 7.381333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4166, 'grad_norm': 4.435332298278809, 'learning_rate': 7.376000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.3754, 'grad_norm': 4.50226354598999, 'learning_rate': 7.370666666666667e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4861, 'grad_norm': 4.566997528076172, 'learning_rate': 7.365333333333334e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4076, 'grad_norm': 4.188539028167725, 'learning_rate': 7.360000000000001e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4223, 'grad_norm': 4.69785737991333, 'learning_rate': 7.354666666666667e-05, 'epoch': 0.08}\n",
      "{'loss': 3.4375, 'grad_norm': 4.9698028564453125, 'learning_rate': 7.349333333333333e-05, 'epoch': 0.09}\n",
      "{'loss': 3.3424, 'grad_norm': 4.773835182189941, 'learning_rate': 7.344000000000002e-05, 'epoch': 0.09}\n",
      "{'loss': 3.423, 'grad_norm': 4.197528839111328, 'learning_rate': 7.338666666666667e-05, 'epoch': 0.09}\n",
      "{'loss': 3.4861, 'grad_norm': 5.1317548751831055, 'learning_rate': 7.333333333333333e-05, 'epoch': 0.09}\n",
      "{'loss': 3.3613, 'grad_norm': 4.819472789764404, 'learning_rate': 7.328e-05, 'epoch': 0.09}\n",
      "{'loss': 3.3629, 'grad_norm': 4.778006553649902, 'learning_rate': 7.322666666666667e-05, 'epoch': 0.09}\n",
      "{'loss': 3.4486, 'grad_norm': 4.664102077484131, 'learning_rate': 7.317333333333333e-05, 'epoch': 0.09}\n",
      "{'loss': 3.5406, 'grad_norm': 4.406762599945068, 'learning_rate': 7.312e-05, 'epoch': 0.09}\n",
      "{'loss': 3.482, 'grad_norm': 4.924619197845459, 'learning_rate': 7.306666666666668e-05, 'epoch': 0.09}\n",
      "{'loss': 3.4459, 'grad_norm': 4.488289833068848, 'learning_rate': 7.301333333333333e-05, 'epoch': 0.09}\n",
      "{'loss': 3.5234, 'grad_norm': 4.756265163421631, 'learning_rate': 7.296e-05, 'epoch': 0.09}\n",
      "{'loss': 3.4373, 'grad_norm': 4.557583808898926, 'learning_rate': 7.290666666666668e-05, 'epoch': 0.09}\n",
      "{'loss': 3.3896, 'grad_norm': 4.531164646148682, 'learning_rate': 7.285333333333334e-05, 'epoch': 0.09}\n",
      "{'loss': 3.5023, 'grad_norm': 4.5709333419799805, 'learning_rate': 7.280000000000001e-05, 'epoch': 0.09}\n",
      "{'loss': 3.399, 'grad_norm': 4.4934892654418945, 'learning_rate': 7.274666666666667e-05, 'epoch': 0.09}\n",
      "{'loss': 3.4441, 'grad_norm': 4.70988655090332, 'learning_rate': 7.269333333333334e-05, 'epoch': 0.1}\n",
      "{'loss': 3.4816, 'grad_norm': 4.769764423370361, 'learning_rate': 7.264000000000001e-05, 'epoch': 0.1}\n",
      "{'loss': 3.3619, 'grad_norm': 4.422647953033447, 'learning_rate': 7.258666666666667e-05, 'epoch': 0.1}\n",
      "{'loss': 3.3072, 'grad_norm': 4.871537685394287, 'learning_rate': 7.253333333333334e-05, 'epoch': 0.1}\n",
      "{'loss': 3.4412, 'grad_norm': 4.6567277908325195, 'learning_rate': 7.248000000000001e-05, 'epoch': 0.1}\n",
      "{'loss': 3.4652, 'grad_norm': 4.637659072875977, 'learning_rate': 7.242666666666667e-05, 'epoch': 0.1}\n",
      "{'loss': 3.4248, 'grad_norm': 5.035904884338379, 'learning_rate': 7.237333333333334e-05, 'epoch': 0.1}\n",
      "{'loss': 3.4883, 'grad_norm': 5.465686321258545, 'learning_rate': 7.232000000000001e-05, 'epoch': 0.1}\n",
      "{'loss': 3.3701, 'grad_norm': 4.720252990722656, 'learning_rate': 7.226666666666667e-05, 'epoch': 0.1}\n",
      "{'loss': 3.3223, 'grad_norm': 4.362887382507324, 'learning_rate': 7.221333333333333e-05, 'epoch': 0.1}\n",
      "{'loss': 3.3611, 'grad_norm': 4.801677703857422, 'learning_rate': 7.216000000000001e-05, 'epoch': 0.1}\n",
      "{'loss': 3.3225, 'grad_norm': 4.718226909637451, 'learning_rate': 7.210666666666667e-05, 'epoch': 0.1}\n",
      "{'loss': 3.4924, 'grad_norm': 5.39077091217041, 'learning_rate': 7.205333333333333e-05, 'epoch': 0.1}\n",
      "{'loss': 3.4617, 'grad_norm': 4.35094690322876, 'learning_rate': 7.2e-05, 'epoch': 0.1}\n",
      " 10%|███▌                                | 1500/15000 [21:50<2:50:17,  1.32it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.90s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:15<00:05,  5.82s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 33.52285, 'eval_rouge-2': 7.89318, 'eval_rouge-l': 25.906483999999995, 'eval_bleu-4': 0.03819313422155009, 'eval_runtime': 25.9916, 'eval_samples_per_second': 1.924, 'eval_steps_per_second': 0.154, 'epoch': 0.1}\n",
      " 10%|███▌                                | 1500/15000 [22:16<2:50:17,  1.32it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:17<00:00,  4.40s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-1500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.4637, 'grad_norm': 5.135016918182373, 'learning_rate': 7.194666666666667e-05, 'epoch': 0.11}\n",
      "{'loss': 3.3484, 'grad_norm': 4.374507904052734, 'learning_rate': 7.189333333333333e-05, 'epoch': 0.11}\n",
      "{'loss': 3.5215, 'grad_norm': 4.876531600952148, 'learning_rate': 7.184e-05, 'epoch': 0.11}\n",
      "{'loss': 3.3498, 'grad_norm': 5.454792022705078, 'learning_rate': 7.178666666666668e-05, 'epoch': 0.11}\n",
      "{'loss': 3.3572, 'grad_norm': 4.343905448913574, 'learning_rate': 7.173333333333333e-05, 'epoch': 0.11}\n",
      "{'loss': 3.382, 'grad_norm': 5.1135430335998535, 'learning_rate': 7.168e-05, 'epoch': 0.11}\n",
      "{'loss': 3.3854, 'grad_norm': 7.1236395835876465, 'learning_rate': 7.162666666666668e-05, 'epoch': 0.11}\n",
      "{'loss': 3.5041, 'grad_norm': 4.745553493499756, 'learning_rate': 7.157333333333335e-05, 'epoch': 0.11}\n",
      "{'loss': 3.3889, 'grad_norm': 4.841073989868164, 'learning_rate': 7.152e-05, 'epoch': 0.11}\n",
      "{'loss': 3.4363, 'grad_norm': 4.635847091674805, 'learning_rate': 7.146666666666666e-05, 'epoch': 0.11}\n",
      "{'loss': 3.4473, 'grad_norm': 5.004560470581055, 'learning_rate': 7.141333333333335e-05, 'epoch': 0.11}\n",
      "{'loss': 3.3826, 'grad_norm': 4.64663028717041, 'learning_rate': 7.136000000000001e-05, 'epoch': 0.11}\n",
      "{'loss': 3.4494, 'grad_norm': 5.2061381340026855, 'learning_rate': 7.130666666666667e-05, 'epoch': 0.11}\n",
      "{'loss': 3.3998, 'grad_norm': 4.340172290802002, 'learning_rate': 7.125333333333334e-05, 'epoch': 0.11}\n",
      "{'loss': 3.4002, 'grad_norm': 4.5513386726379395, 'learning_rate': 7.120000000000001e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3395, 'grad_norm': 4.914803981781006, 'learning_rate': 7.114666666666667e-05, 'epoch': 0.12}\n",
      "{'loss': 3.4273, 'grad_norm': 4.545842170715332, 'learning_rate': 7.109333333333334e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3939, 'grad_norm': 4.555296897888184, 'learning_rate': 7.104000000000001e-05, 'epoch': 0.12}\n",
      "{'loss': 3.4061, 'grad_norm': 4.667812824249268, 'learning_rate': 7.098666666666667e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3615, 'grad_norm': 4.4866557121276855, 'learning_rate': 7.093333333333334e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3055, 'grad_norm': 5.135975360870361, 'learning_rate': 7.088000000000001e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3715, 'grad_norm': 4.668708801269531, 'learning_rate': 7.082666666666667e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3838, 'grad_norm': 5.041476249694824, 'learning_rate': 7.077333333333334e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3031, 'grad_norm': 5.278650283813477, 'learning_rate': 7.072e-05, 'epoch': 0.12}\n",
      "{'loss': 3.4793, 'grad_norm': 5.116074085235596, 'learning_rate': 7.066666666666667e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3998, 'grad_norm': 5.118762969970703, 'learning_rate': 7.061333333333334e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3473, 'grad_norm': 5.069239139556885, 'learning_rate': 7.056e-05, 'epoch': 0.12}\n",
      "{'loss': 3.3225, 'grad_norm': 5.08617639541626, 'learning_rate': 7.050666666666667e-05, 'epoch': 0.12}\n",
      "{'loss': 3.4127, 'grad_norm': 5.4185614585876465, 'learning_rate': 7.045333333333335e-05, 'epoch': 0.12}\n",
      "{'loss': 3.2502, 'grad_norm': 4.905248165130615, 'learning_rate': 7.04e-05, 'epoch': 0.13}\n",
      "{'loss': 3.4588, 'grad_norm': 4.8547139167785645, 'learning_rate': 7.034666666666668e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3584, 'grad_norm': 4.842235565185547, 'learning_rate': 7.029333333333335e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3, 'grad_norm': 4.986169338226318, 'learning_rate': 7.024e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3818, 'grad_norm': 4.838183403015137, 'learning_rate': 7.018666666666666e-05, 'epoch': 0.13}\n",
      "{'loss': 3.4268, 'grad_norm': 5.28480339050293, 'learning_rate': 7.013333333333335e-05, 'epoch': 0.13}\n",
      "{'loss': 3.4186, 'grad_norm': 5.1605963706970215, 'learning_rate': 7.008e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3389, 'grad_norm': 4.952940940856934, 'learning_rate': 7.002666666666666e-05, 'epoch': 0.13}\n",
      "{'loss': 3.2396, 'grad_norm': 5.355233192443848, 'learning_rate': 6.997333333333334e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3084, 'grad_norm': 4.512715816497803, 'learning_rate': 6.992000000000001e-05, 'epoch': 0.13}\n",
      "{'loss': 3.4074, 'grad_norm': 4.923635959625244, 'learning_rate': 6.986666666666667e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3172, 'grad_norm': 4.711386680603027, 'learning_rate': 6.981333333333334e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3969, 'grad_norm': 4.883408069610596, 'learning_rate': 6.976000000000001e-05, 'epoch': 0.13}\n",
      "{'loss': 3.3359, 'grad_norm': 5.154426574707031, 'learning_rate': 6.970666666666667e-05, 'epoch': 0.13}\n",
      "{'loss': 3.5168, 'grad_norm': 4.80844783782959, 'learning_rate': 6.965333333333334e-05, 'epoch': 0.14}\n",
      "{'loss': 3.4695, 'grad_norm': 4.6424431800842285, 'learning_rate': 6.960000000000001e-05, 'epoch': 0.14}\n",
      "{'loss': 3.3551, 'grad_norm': 5.621227741241455, 'learning_rate': 6.954666666666667e-05, 'epoch': 0.14}\n",
      "{'loss': 3.4885, 'grad_norm': 4.954301357269287, 'learning_rate': 6.949333333333334e-05, 'epoch': 0.14}\n",
      "{'loss': 3.3609, 'grad_norm': 4.7164692878723145, 'learning_rate': 6.944e-05, 'epoch': 0.14}\n",
      "{'loss': 3.4439, 'grad_norm': 4.9278645515441895, 'learning_rate': 6.938666666666667e-05, 'epoch': 0.14}\n",
      "{'loss': 3.4539, 'grad_norm': 5.16158390045166, 'learning_rate': 6.933333333333334e-05, 'epoch': 0.14}\n",
      " 13%|████▊                               | 2000/15000 [29:00<2:50:21,  1.27it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.71s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:06<00:02,  2.31s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.441736, 'eval_rouge-2': 7.363194, 'eval_rouge-l': 25.106704, 'eval_bleu-4': 0.0374425168828638, 'eval_runtime': 28.5468, 'eval_samples_per_second': 1.752, 'eval_steps_per_second': 0.14, 'epoch': 0.14}\n",
      " 13%|████▊                               | 2000/15000 [29:29<2:50:21,  1.27it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:08<00:00,  2.22s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-2000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.3436, 'grad_norm': 4.963539123535156, 'learning_rate': 6.928e-05, 'epoch': 0.14}\n",
      "{'loss': 3.4379, 'grad_norm': 4.885372161865234, 'learning_rate': 6.922666666666667e-05, 'epoch': 0.14}\n",
      "{'loss': 3.327, 'grad_norm': 5.333014011383057, 'learning_rate': 6.917333333333334e-05, 'epoch': 0.14}\n",
      "{'loss': 3.3238, 'grad_norm': 5.240528583526611, 'learning_rate': 6.912e-05, 'epoch': 0.14}\n",
      "{'loss': 3.4264, 'grad_norm': 5.184511184692383, 'learning_rate': 6.906666666666667e-05, 'epoch': 0.14}\n",
      "{'loss': 3.3906, 'grad_norm': 4.8699212074279785, 'learning_rate': 6.901333333333335e-05, 'epoch': 0.14}\n",
      "{'loss': 3.3828, 'grad_norm': 4.43211555480957, 'learning_rate': 6.896e-05, 'epoch': 0.14}\n",
      "{'loss': 3.3687, 'grad_norm': 5.221590995788574, 'learning_rate': 6.890666666666666e-05, 'epoch': 0.15}\n",
      "{'loss': 3.3238, 'grad_norm': 4.540305137634277, 'learning_rate': 6.885333333333335e-05, 'epoch': 0.15}\n",
      "{'loss': 3.4775, 'grad_norm': 4.842572212219238, 'learning_rate': 6.88e-05, 'epoch': 0.15}\n",
      "{'loss': 3.373, 'grad_norm': 4.96262264251709, 'learning_rate': 6.874666666666666e-05, 'epoch': 0.15}\n",
      "{'loss': 3.4301, 'grad_norm': 4.407184600830078, 'learning_rate': 6.869333333333334e-05, 'epoch': 0.15}\n",
      "{'loss': 3.4248, 'grad_norm': 4.649362087249756, 'learning_rate': 6.864000000000001e-05, 'epoch': 0.15}\n",
      "{'loss': 3.2367, 'grad_norm': 5.012125015258789, 'learning_rate': 6.858666666666667e-05, 'epoch': 0.15}\n",
      "{'loss': 3.3771, 'grad_norm': 5.172879695892334, 'learning_rate': 6.853333333333334e-05, 'epoch': 0.15}\n",
      "{'loss': 3.3229, 'grad_norm': 5.078874588012695, 'learning_rate': 6.848000000000001e-05, 'epoch': 0.15}\n",
      "{'loss': 3.3361, 'grad_norm': 4.905421733856201, 'learning_rate': 6.842666666666667e-05, 'epoch': 0.15}\n",
      "{'loss': 3.3201, 'grad_norm': 4.714696884155273, 'learning_rate': 6.837333333333334e-05, 'epoch': 0.15}\n",
      "{'loss': 3.4045, 'grad_norm': 4.730009078979492, 'learning_rate': 6.832000000000001e-05, 'epoch': 0.15}\n",
      "{'loss': 3.3004, 'grad_norm': 4.678133964538574, 'learning_rate': 6.826666666666668e-05, 'epoch': 0.15}\n",
      "{'loss': 3.343, 'grad_norm': 4.829615116119385, 'learning_rate': 6.821333333333334e-05, 'epoch': 0.15}\n",
      "{'loss': 3.2986, 'grad_norm': 5.146580696105957, 'learning_rate': 6.816e-05, 'epoch': 0.15}\n",
      "{'loss': 3.3408, 'grad_norm': 5.316357135772705, 'learning_rate': 6.810666666666668e-05, 'epoch': 0.16}\n",
      "{'loss': 3.4355, 'grad_norm': 4.695398807525635, 'learning_rate': 6.805333333333334e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3861, 'grad_norm': 4.62492036819458, 'learning_rate': 6.8e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3453, 'grad_norm': 5.142467498779297, 'learning_rate': 6.794666666666667e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3986, 'grad_norm': 4.649212837219238, 'learning_rate': 6.789333333333334e-05, 'epoch': 0.16}\n",
      "{'loss': 3.2902, 'grad_norm': 5.5613861083984375, 'learning_rate': 6.784e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3885, 'grad_norm': 4.4880828857421875, 'learning_rate': 6.778666666666667e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3383, 'grad_norm': 4.713613986968994, 'learning_rate': 6.773333333333334e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3375, 'grad_norm': 4.934491157531738, 'learning_rate': 6.768e-05, 'epoch': 0.16}\n",
      "{'loss': 3.401, 'grad_norm': 5.337859153747559, 'learning_rate': 6.762666666666667e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3752, 'grad_norm': 4.855563640594482, 'learning_rate': 6.757333333333335e-05, 'epoch': 0.16}\n",
      "{'loss': 3.5107, 'grad_norm': 4.9320220947265625, 'learning_rate': 6.752e-05, 'epoch': 0.16}\n",
      "{'loss': 3.4508, 'grad_norm': 5.29586124420166, 'learning_rate': 6.746666666666668e-05, 'epoch': 0.16}\n",
      "{'loss': 3.398, 'grad_norm': 5.270134925842285, 'learning_rate': 6.741333333333333e-05, 'epoch': 0.16}\n",
      "{'loss': 3.3668, 'grad_norm': 4.93515157699585, 'learning_rate': 6.736e-05, 'epoch': 0.17}\n",
      "{'loss': 3.3879, 'grad_norm': 4.8918046951293945, 'learning_rate': 6.730666666666668e-05, 'epoch': 0.17}\n",
      "{'loss': 3.3563, 'grad_norm': 5.025860786437988, 'learning_rate': 6.725333333333334e-05, 'epoch': 0.17}\n",
      "{'loss': 3.4256, 'grad_norm': 5.133913040161133, 'learning_rate': 6.720000000000001e-05, 'epoch': 0.17}\n",
      "{'loss': 3.4703, 'grad_norm': 5.550250053405762, 'learning_rate': 6.714666666666668e-05, 'epoch': 0.17}\n",
      "{'loss': 3.2486, 'grad_norm': 4.754904747009277, 'learning_rate': 6.709333333333334e-05, 'epoch': 0.17}\n",
      "{'loss': 3.2885, 'grad_norm': 5.139050483703613, 'learning_rate': 6.704000000000001e-05, 'epoch': 0.17}\n",
      "{'loss': 3.3668, 'grad_norm': 4.7778472900390625, 'learning_rate': 6.698666666666668e-05, 'epoch': 0.17}\n",
      "{'loss': 3.3881, 'grad_norm': 4.627148151397705, 'learning_rate': 6.693333333333334e-05, 'epoch': 0.17}\n",
      "{'loss': 3.3643, 'grad_norm': 5.496486663818359, 'learning_rate': 6.688e-05, 'epoch': 0.17}\n",
      "{'loss': 3.2758, 'grad_norm': 5.126485824584961, 'learning_rate': 6.682666666666668e-05, 'epoch': 0.17}\n",
      "{'loss': 3.3771, 'grad_norm': 5.137759685516357, 'learning_rate': 6.677333333333334e-05, 'epoch': 0.17}\n",
      "{'loss': 3.3334, 'grad_norm': 4.964709758758545, 'learning_rate': 6.672e-05, 'epoch': 0.17}\n",
      "{'loss': 3.217, 'grad_norm': 5.6735639572143555, 'learning_rate': 6.666666666666667e-05, 'epoch': 0.17}\n",
      " 17%|██████                              | 2500/15000 [36:14<2:42:16,  1.28it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.66s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:37<00:13, 13.39s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 30.842944000000003, 'eval_rouge-2': 7.599040000000001, 'eval_rouge-l': 23.164644000000003, 'eval_bleu-4': 0.032704917382499095, 'eval_runtime': 60.7752, 'eval_samples_per_second': 0.823, 'eval_steps_per_second': 0.066, 'epoch': 0.17}\n",
      " 17%|██████                              | 2500/15000 [37:14<2:42:16,  1.28it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:41<00:00,  9.51s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-2500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.4119, 'grad_norm': 5.199995994567871, 'learning_rate': 6.661333333333334e-05, 'epoch': 0.18}\n",
      "{'loss': 3.3125, 'grad_norm': 5.388370513916016, 'learning_rate': 6.656e-05, 'epoch': 0.18}\n",
      "{'loss': 3.3738, 'grad_norm': 5.251852989196777, 'learning_rate': 6.650666666666667e-05, 'epoch': 0.18}\n",
      "{'loss': 3.4143, 'grad_norm': 5.037926197052002, 'learning_rate': 6.645333333333334e-05, 'epoch': 0.18}\n",
      "{'loss': 3.4012, 'grad_norm': 4.959262371063232, 'learning_rate': 6.64e-05, 'epoch': 0.18}\n",
      "{'loss': 3.3723, 'grad_norm': 4.787723064422607, 'learning_rate': 6.634666666666667e-05, 'epoch': 0.18}\n",
      "{'loss': 3.2838, 'grad_norm': 4.629983901977539, 'learning_rate': 6.629333333333334e-05, 'epoch': 0.18}\n",
      "{'loss': 3.3156, 'grad_norm': 4.906765460968018, 'learning_rate': 6.624e-05, 'epoch': 0.18}\n",
      "{'loss': 3.3434, 'grad_norm': 4.936568260192871, 'learning_rate': 6.618666666666667e-05, 'epoch': 0.18}\n",
      "{'loss': 3.4023, 'grad_norm': 4.897476673126221, 'learning_rate': 6.613333333333333e-05, 'epoch': 0.18}\n",
      "{'loss': 3.3951, 'grad_norm': 4.894589900970459, 'learning_rate': 6.608e-05, 'epoch': 0.18}\n",
      "{'loss': 3.4137, 'grad_norm': 5.160785675048828, 'learning_rate': 6.602666666666668e-05, 'epoch': 0.18}\n",
      "{'loss': 3.2549, 'grad_norm': 5.269944190979004, 'learning_rate': 6.597333333333333e-05, 'epoch': 0.18}\n",
      "{'loss': 3.1629, 'grad_norm': 4.534480571746826, 'learning_rate': 6.592e-05, 'epoch': 0.18}\n",
      "{'loss': 3.2656, 'grad_norm': 4.8737053871154785, 'learning_rate': 6.586666666666668e-05, 'epoch': 0.18}\n",
      "{'loss': 3.3199, 'grad_norm': 4.871804237365723, 'learning_rate': 6.581333333333334e-05, 'epoch': 0.19}\n",
      "{'loss': 3.3045, 'grad_norm': 4.806529998779297, 'learning_rate': 6.576000000000001e-05, 'epoch': 0.19}\n",
      "{'loss': 3.2785, 'grad_norm': 4.513823986053467, 'learning_rate': 6.570666666666668e-05, 'epoch': 0.19}\n",
      "{'loss': 3.3396, 'grad_norm': 5.189514636993408, 'learning_rate': 6.565333333333334e-05, 'epoch': 0.19}\n",
      "{'loss': 3.2807, 'grad_norm': 5.135669231414795, 'learning_rate': 6.56e-05, 'epoch': 0.19}\n",
      "{'loss': 3.4139, 'grad_norm': 5.138576984405518, 'learning_rate': 6.554666666666668e-05, 'epoch': 0.19}\n",
      "{'loss': 3.3031, 'grad_norm': 4.846019268035889, 'learning_rate': 6.549333333333334e-05, 'epoch': 0.19}\n",
      "{'loss': 3.415, 'grad_norm': 5.9901018142700195, 'learning_rate': 6.544e-05, 'epoch': 0.19}\n",
      "{'loss': 3.2625, 'grad_norm': 5.006072998046875, 'learning_rate': 6.538666666666667e-05, 'epoch': 0.19}\n",
      "{'loss': 3.3314, 'grad_norm': 4.916365146636963, 'learning_rate': 6.533333333333334e-05, 'epoch': 0.19}\n",
      "{'loss': 3.3729, 'grad_norm': 4.9258575439453125, 'learning_rate': 6.528e-05, 'epoch': 0.19}\n",
      "{'loss': 3.4383, 'grad_norm': 5.555711269378662, 'learning_rate': 6.522666666666667e-05, 'epoch': 0.19}\n",
      "{'loss': 3.2992, 'grad_norm': 5.450119972229004, 'learning_rate': 6.517333333333334e-05, 'epoch': 0.19}\n",
      "{'loss': 3.2869, 'grad_norm': 5.118287086486816, 'learning_rate': 6.512e-05, 'epoch': 0.19}\n",
      "{'loss': 3.3383, 'grad_norm': 4.939267635345459, 'learning_rate': 6.506666666666667e-05, 'epoch': 0.2}\n",
      "{'loss': 3.3279, 'grad_norm': 5.9087934494018555, 'learning_rate': 6.501333333333334e-05, 'epoch': 0.2}\n",
      "{'loss': 3.3533, 'grad_norm': 5.250077247619629, 'learning_rate': 6.496000000000002e-05, 'epoch': 0.2}\n",
      "{'loss': 3.425, 'grad_norm': 4.838453769683838, 'learning_rate': 6.490666666666667e-05, 'epoch': 0.2}\n",
      "{'loss': 3.3637, 'grad_norm': 5.038388729095459, 'learning_rate': 6.485333333333333e-05, 'epoch': 0.2}\n",
      "{'loss': 3.3463, 'grad_norm': 5.5410261154174805, 'learning_rate': 6.48e-05, 'epoch': 0.2}\n",
      "{'loss': 3.383, 'grad_norm': 5.2353901863098145, 'learning_rate': 6.474666666666667e-05, 'epoch': 0.2}\n",
      "{'loss': 3.3883, 'grad_norm': 4.886659145355225, 'learning_rate': 6.469333333333333e-05, 'epoch': 0.2}\n",
      "{'loss': 3.4094, 'grad_norm': 5.1744561195373535, 'learning_rate': 6.464e-05, 'epoch': 0.2}\n",
      "{'loss': 3.318, 'grad_norm': 5.69080114364624, 'learning_rate': 6.458666666666668e-05, 'epoch': 0.2}\n",
      "{'loss': 3.426, 'grad_norm': 5.266020774841309, 'learning_rate': 6.453333333333333e-05, 'epoch': 0.2}\n",
      "{'loss': 3.2811, 'grad_norm': 5.0117926597595215, 'learning_rate': 6.448e-05, 'epoch': 0.2}\n",
      "{'loss': 3.265, 'grad_norm': 5.649736404418945, 'learning_rate': 6.442666666666668e-05, 'epoch': 0.2}\n",
      "{'loss': 3.252, 'grad_norm': 4.662699222564697, 'learning_rate': 6.437333333333334e-05, 'epoch': 0.2}\n",
      "{'loss': 3.2723, 'grad_norm': 4.829565048217773, 'learning_rate': 6.432000000000001e-05, 'epoch': 0.21}\n",
      "{'loss': 3.2416, 'grad_norm': 4.785861015319824, 'learning_rate': 6.426666666666668e-05, 'epoch': 0.21}\n",
      "{'loss': 3.491, 'grad_norm': 5.054397106170654, 'learning_rate': 6.421333333333334e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3803, 'grad_norm': 4.884624481201172, 'learning_rate': 6.416000000000001e-05, 'epoch': 0.21}\n",
      "{'loss': 3.4467, 'grad_norm': 5.677978515625, 'learning_rate': 6.410666666666667e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3941, 'grad_norm': 5.060490608215332, 'learning_rate': 6.405333333333334e-05, 'epoch': 0.21}\n",
      "{'loss': 3.4053, 'grad_norm': 5.0317063331604, 'learning_rate': 6.400000000000001e-05, 'epoch': 0.21}\n",
      " 20%|███████▏                            | 3000/15000 [44:03<2:51:56,  1.16it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.97s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:14<00:05,  5.70s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 33.558136, 'eval_rouge-2': 8.022878, 'eval_rouge-l': 25.071704000000004, 'eval_bleu-4': 0.03813909469831085, 'eval_runtime': 37.0223, 'eval_samples_per_second': 1.351, 'eval_steps_per_second': 0.108, 'epoch': 0.21}\n",
      " 20%|███████▏                            | 3000/15000 [44:40<2:51:56,  1.16it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:17<00:00,  4.46s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-3000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.2947, 'grad_norm': 4.899571418762207, 'learning_rate': 6.394666666666667e-05, 'epoch': 0.21}\n",
      "{'loss': 3.2076, 'grad_norm': 5.3400983810424805, 'learning_rate': 6.389333333333334e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3502, 'grad_norm': 5.605780124664307, 'learning_rate': 6.384000000000001e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3703, 'grad_norm': 5.534975528717041, 'learning_rate': 6.378666666666667e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3619, 'grad_norm': 4.93312406539917, 'learning_rate': 6.373333333333334e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3027, 'grad_norm': 5.873647212982178, 'learning_rate': 6.368000000000001e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3014, 'grad_norm': 4.865786075592041, 'learning_rate': 6.362666666666667e-05, 'epoch': 0.21}\n",
      "{'loss': 3.4262, 'grad_norm': 5.128903865814209, 'learning_rate': 6.357333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3641, 'grad_norm': 5.597672939300537, 'learning_rate': 6.352e-05, 'epoch': 0.22}\n",
      "{'loss': 3.2607, 'grad_norm': 5.034061908721924, 'learning_rate': 6.346666666666667e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3215, 'grad_norm': 6.1946516036987305, 'learning_rate': 6.341333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3713, 'grad_norm': 4.791398525238037, 'learning_rate': 6.336e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3488, 'grad_norm': 5.033057689666748, 'learning_rate': 6.330666666666667e-05, 'epoch': 0.22}\n",
      "{'loss': 3.2971, 'grad_norm': 5.046621799468994, 'learning_rate': 6.325333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3221, 'grad_norm': 5.178755283355713, 'learning_rate': 6.32e-05, 'epoch': 0.22}\n",
      "{'loss': 3.4174, 'grad_norm': 5.624519348144531, 'learning_rate': 6.314666666666668e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3652, 'grad_norm': 5.684453010559082, 'learning_rate': 6.309333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.4221, 'grad_norm': 5.628515720367432, 'learning_rate': 6.304e-05, 'epoch': 0.22}\n",
      "{'loss': 3.4404, 'grad_norm': 4.81857967376709, 'learning_rate': 6.298666666666668e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3658, 'grad_norm': 4.876471042633057, 'learning_rate': 6.293333333333334e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3439, 'grad_norm': 5.468027114868164, 'learning_rate': 6.288000000000001e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3934, 'grad_norm': 4.947269439697266, 'learning_rate': 6.282666666666667e-05, 'epoch': 0.22}\n",
      "{'loss': 3.307, 'grad_norm': 5.2726593017578125, 'learning_rate': 6.277333333333334e-05, 'epoch': 0.23}\n",
      "{'loss': 3.4389, 'grad_norm': 5.040715217590332, 'learning_rate': 6.272000000000001e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3479, 'grad_norm': 4.971253871917725, 'learning_rate': 6.266666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3344, 'grad_norm': 5.3462395668029785, 'learning_rate': 6.261333333333334e-05, 'epoch': 0.23}\n",
      "{'loss': 3.285, 'grad_norm': 4.7990264892578125, 'learning_rate': 6.256000000000001e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2951, 'grad_norm': 5.503958702087402, 'learning_rate': 6.250666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3895, 'grad_norm': 5.045105934143066, 'learning_rate': 6.245333333333334e-05, 'epoch': 0.23}\n",
      "{'loss': 3.423, 'grad_norm': 5.264857769012451, 'learning_rate': 6.240000000000001e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2869, 'grad_norm': 4.70725679397583, 'learning_rate': 6.234666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2953, 'grad_norm': 5.470070838928223, 'learning_rate': 6.229333333333333e-05, 'epoch': 0.23}\n",
      "{'loss': 3.251, 'grad_norm': 5.4758806228637695, 'learning_rate': 6.224e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2945, 'grad_norm': 4.802947521209717, 'learning_rate': 6.218666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3416, 'grad_norm': 4.8902587890625, 'learning_rate': 6.213333333333333e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3982, 'grad_norm': 5.272087097167969, 'learning_rate': 6.208e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3088, 'grad_norm': 4.653298854827881, 'learning_rate': 6.202666666666667e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3475, 'grad_norm': 5.3598504066467285, 'learning_rate': 6.197333333333333e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3334, 'grad_norm': 5.2053117752075195, 'learning_rate': 6.192e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2854, 'grad_norm': 4.83317232131958, 'learning_rate': 6.186666666666668e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3926, 'grad_norm': 5.748948097229004, 'learning_rate': 6.181333333333333e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3324, 'grad_norm': 5.922445297241211, 'learning_rate': 6.176e-05, 'epoch': 0.24}\n",
      "{'loss': 3.4482, 'grad_norm': 5.0339555740356445, 'learning_rate': 6.170666666666668e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2996, 'grad_norm': 5.902514457702637, 'learning_rate': 6.165333333333335e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2324, 'grad_norm': 5.13586950302124, 'learning_rate': 6.16e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3354, 'grad_norm': 5.230270862579346, 'learning_rate': 6.154666666666666e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3205, 'grad_norm': 5.389515399932861, 'learning_rate': 6.149333333333334e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2945, 'grad_norm': 5.488376617431641, 'learning_rate': 6.144000000000001e-05, 'epoch': 0.24}\n",
      " 23%|████████▎                           | 3489/15000 [51:18<2:37:29,  1.22it/s]"
     ]
    }
   ],
   "source": [
    "!python finetune_hf.py  data/AdvertiseGen_fix THUDM/chatglm3-6b configs/lora-batch8.yaml"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "968bb43c-fa71-44d0-bc81-b47413fcd634",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:04<00:00,  1.65it/s]\n",
      "trainable params: 3,899,392 || all params: 6,247,483,392 || trainable%: 0.06241540401681151\n",
      "--> Model\n",
      "\n",
      "--> model has 3.899392M params\n",
      "\n",
      "train_dataset: Dataset({\n",
      "    features: ['input_ids', 'labels'],\n",
      "    num_rows: 114599\n",
      "})\n",
      "val_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "test_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "--> Sanity check\n",
      "           '[gMASK]': 64790 -> -100\n",
      "               'sop': 64792 -> -100\n",
      "          '<|user|>': 64795 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '\\n': 13 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '类型': 33467 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '版': 55090 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '宽松': 40833 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '风格': 32799 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '性感': 40589 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '图案': 37505 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '线条': 37216 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '阔': 56529 -> -100\n",
      "                 '腿': 56158 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "     '<|assistant|>': 64796 -> -100\n",
      "                  '': 30910 -> 30910\n",
      "                '\\n': 13 -> 13\n",
      "                  '': 30910 -> 30910\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '阔': 56529 -> 56529\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '这': 54551 -> 54551\n",
      "                '两年': 33808 -> 33808\n",
      "                '真的': 32041 -> 32041\n",
      "                 '吸': 55360 -> 55360\n",
      "                 '粉': 55486 -> 55486\n",
      "                '不少': 32138 -> 32138\n",
      "                 '，': 31123 -> 31123\n",
      "                '明星': 32943 -> 32943\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '达': 54880 -> 54880\n",
      "                '人的': 31664 -> 31664\n",
      "                '心头': 46565 -> 46565\n",
      "                 '爱': 54799 -> 54799\n",
      "                 '。': 31155 -> 31155\n",
      "                '毕竟': 33051 -> 33051\n",
      "                 '好': 54591 -> 54591\n",
      "                 '穿': 55432 -> 55432\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '，': 31123 -> 31123\n",
      "                 '谁': 55622 -> 55622\n",
      "                '都能': 32904 -> 32904\n",
      "                 '穿': 55432 -> 55432\n",
      "                 '出': 54557 -> 54557\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '长': 54625 -> 54625\n",
      "                 '2': 30943 -> 30943\n",
      "                 '米': 55055 -> 55055\n",
      "               '的效果': 35590 -> 35590\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '，': 31123 -> 31123\n",
      "               '当然是': 48466 -> 48466\n",
      "                 '遮': 57148 -> 57148\n",
      "                 '肉': 55343 -> 55343\n",
      "                 '小': 54603 -> 54603\n",
      "                '能手': 49355 -> 49355\n",
      "                 '啊': 55674 -> 55674\n",
      "                 '。': 31155 -> 31155\n",
      "                '上身': 51605 -> 51605\n",
      "                 '随': 55119 -> 55119\n",
      "                 '性': 54642 -> 54642\n",
      "                '自然': 31799 -> 31799\n",
      "                 '不': 54535 -> 54535\n",
      "                 '拘': 57036 -> 57036\n",
      "                 '束': 55625 -> 55625\n",
      "                 '，': 31123 -> 31123\n",
      "                '面料': 46839 -> 46839\n",
      "                 '亲': 55113 -> 55113\n",
      "                 '肤': 56089 -> 56089\n",
      "                '舒适': 33894 -> 33894\n",
      "                 '贴': 55778 -> 55778\n",
      "                '身体': 31902 -> 31902\n",
      "                 '验': 55017 -> 55017\n",
      "                 '感': 54706 -> 54706\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '哒': 59230 -> 59230\n",
      "                 '。': 31155 -> 31155\n",
      "                 '系': 54712 -> 54712\n",
      "                 '带': 54882 -> 54882\n",
      "                '部分': 31726 -> 31726\n",
      "                '增加': 31917 -> 31917\n",
      "                '设计': 31735 -> 31735\n",
      "                '看点': 45032 -> 45032\n",
      "                 '，': 31123 -> 31123\n",
      "                 '还': 54656 -> 54656\n",
      "                 '让': 54772 -> 54772\n",
      "                '单品': 46539 -> 46539\n",
      "               '的设计': 34481 -> 34481\n",
      "                 '感': 54706 -> 54706\n",
      "                '更强': 43084 -> 43084\n",
      "                 '。': 31155 -> 31155\n",
      "                '腿部': 46799 -> 46799\n",
      "                '线条': 37216 -> 37216\n",
      "                 '若': 55351 -> 55351\n",
      "                 '隐': 55733 -> 55733\n",
      "                 '若': 55351 -> 55351\n",
      "                 '现': 54600 -> 54600\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                '性感': 40589 -> 40589\n",
      "                 '撩': 58521 -> 58521\n",
      "                 '人': 54533 -> 54533\n",
      "                 '。': 31155 -> 31155\n",
      "                '颜色': 33692 -> 33692\n",
      "                 '敲': 57004 -> 57004\n",
      "                '温柔': 34678 -> 34678\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                 '与': 54619 -> 54619\n",
      "                '裤子': 44722 -> 44722\n",
      "                '本身': 32754 -> 32754\n",
      "                 '所': 54626 -> 54626\n",
      "                '呈现': 33169 -> 33169\n",
      "               '的风格': 48084 -> 48084\n",
      "                '有点': 33149 -> 33149\n",
      "                 '反': 54955 -> 54955\n",
      "                 '差': 55342 -> 55342\n",
      "                 '萌': 56842 -> 56842\n",
      "                 '。': 31155 -> 31155\n",
      "                  '': 2 -> 2\n",
      "/root/miniconda3/lib/python3.12/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead: \n",
      "dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)\n",
      "  warnings.warn(\n",
      "Detected kernel version 3.10.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n",
      "max_steps is given, it will override any value given in num_train_epochs\n",
      "resume checkpoint from  checkpoint-3000\n",
      "Loading model from ./output-models/checkpoint-3000.\n",
      "/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running training *****\n",
      "  Num examples = 114,599\n",
      "  Num Epochs = 2\n",
      "  Instantaneous batch size per device = 8\n",
      "  Total train batch size (w. parallel, distributed & accumulation) = 8\n",
      "  Gradient Accumulation steps = 1\n",
      "  Total optimization steps = 15,000\n",
      "  Number of trainable parameters = 3,899,392\n",
      "  Continuing training from checkpoint, will skip to saved global_step\n",
      "  Continuing training from epoch 0\n",
      "  Continuing training from global step 3000\n",
      "  Will skip the first 0 epochs then the first 3000 batches in the first epoch.\n",
      "  0%|                                                 | 0/15000 [00:00<?, ?it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "{'loss': 3.2949, 'grad_norm': 4.897956371307373, 'learning_rate': 6.394666666666667e-05, 'epoch': 0.21}\n",
      "{'loss': 3.2074, 'grad_norm': 5.32358980178833, 'learning_rate': 6.389333333333334e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3504, 'grad_norm': 5.601253032684326, 'learning_rate': 6.384000000000001e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3705, 'grad_norm': 5.5244975090026855, 'learning_rate': 6.378666666666667e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3621, 'grad_norm': 4.921754837036133, 'learning_rate': 6.373333333333334e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3025, 'grad_norm': 5.9109063148498535, 'learning_rate': 6.368000000000001e-05, 'epoch': 0.21}\n",
      "{'loss': 3.3012, 'grad_norm': 4.866969108581543, 'learning_rate': 6.362666666666667e-05, 'epoch': 0.21}\n",
      "{'loss': 3.4258, 'grad_norm': 5.118508338928223, 'learning_rate': 6.357333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3645, 'grad_norm': 5.595391750335693, 'learning_rate': 6.352e-05, 'epoch': 0.22}\n",
      "{'loss': 3.2609, 'grad_norm': 5.031598091125488, 'learning_rate': 6.346666666666667e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3215, 'grad_norm': 6.207012176513672, 'learning_rate': 6.341333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3713, 'grad_norm': 4.796629428863525, 'learning_rate': 6.336e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3492, 'grad_norm': 5.036026477813721, 'learning_rate': 6.330666666666667e-05, 'epoch': 0.22}\n",
      "{'loss': 3.2967, 'grad_norm': 5.069619178771973, 'learning_rate': 6.325333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3217, 'grad_norm': 5.175796985626221, 'learning_rate': 6.32e-05, 'epoch': 0.22}\n",
      "{'loss': 3.4162, 'grad_norm': 5.624761581420898, 'learning_rate': 6.314666666666668e-05, 'epoch': 0.22}\n",
      "{'loss': 3.365, 'grad_norm': 5.680983066558838, 'learning_rate': 6.309333333333333e-05, 'epoch': 0.22}\n",
      "{'loss': 3.4219, 'grad_norm': 5.6177239418029785, 'learning_rate': 6.304e-05, 'epoch': 0.22}\n",
      "{'loss': 3.4402, 'grad_norm': 4.824097633361816, 'learning_rate': 6.298666666666668e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3664, 'grad_norm': 4.8829874992370605, 'learning_rate': 6.293333333333334e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3439, 'grad_norm': 5.459296703338623, 'learning_rate': 6.288000000000001e-05, 'epoch': 0.22}\n",
      "{'loss': 3.3934, 'grad_norm': 4.9453911781311035, 'learning_rate': 6.282666666666667e-05, 'epoch': 0.22}\n",
      "{'loss': 3.307, 'grad_norm': 5.264010906219482, 'learning_rate': 6.277333333333334e-05, 'epoch': 0.23}\n",
      "{'loss': 3.4387, 'grad_norm': 5.069202423095703, 'learning_rate': 6.272000000000001e-05, 'epoch': 0.23}\n",
      "{'loss': 3.348, 'grad_norm': 4.992135524749756, 'learning_rate': 6.266666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.334, 'grad_norm': 5.3393402099609375, 'learning_rate': 6.261333333333334e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2857, 'grad_norm': 4.78869104385376, 'learning_rate': 6.256000000000001e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2953, 'grad_norm': 5.490837574005127, 'learning_rate': 6.250666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3889, 'grad_norm': 5.010001182556152, 'learning_rate': 6.245333333333334e-05, 'epoch': 0.23}\n",
      "{'loss': 3.4232, 'grad_norm': 5.2562456130981445, 'learning_rate': 6.240000000000001e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2865, 'grad_norm': 4.702332496643066, 'learning_rate': 6.234666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2941, 'grad_norm': 5.461806774139404, 'learning_rate': 6.229333333333333e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2502, 'grad_norm': 5.477420330047607, 'learning_rate': 6.224e-05, 'epoch': 0.23}\n",
      "{'loss': 3.2941, 'grad_norm': 4.798463821411133, 'learning_rate': 6.218666666666667e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3414, 'grad_norm': 4.879148483276367, 'learning_rate': 6.213333333333333e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3979, 'grad_norm': 5.261689186096191, 'learning_rate': 6.208e-05, 'epoch': 0.23}\n",
      "{'loss': 3.3084, 'grad_norm': 4.654191017150879, 'learning_rate': 6.202666666666667e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3475, 'grad_norm': 5.36069393157959, 'learning_rate': 6.197333333333333e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3326, 'grad_norm': 5.288066387176514, 'learning_rate': 6.192e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2854, 'grad_norm': 4.831036567687988, 'learning_rate': 6.186666666666668e-05, 'epoch': 0.24}\n",
      "{'loss': 3.392, 'grad_norm': 5.748779296875, 'learning_rate': 6.181333333333333e-05, 'epoch': 0.24}\n",
      "{'loss': 3.333, 'grad_norm': 5.929157257080078, 'learning_rate': 6.176e-05, 'epoch': 0.24}\n",
      "{'loss': 3.4477, 'grad_norm': 5.025808334350586, 'learning_rate': 6.170666666666668e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2994, 'grad_norm': 5.919805526733398, 'learning_rate': 6.165333333333335e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2324, 'grad_norm': 5.139415264129639, 'learning_rate': 6.16e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3352, 'grad_norm': 5.226100444793701, 'learning_rate': 6.154666666666666e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3201, 'grad_norm': 5.380056381225586, 'learning_rate': 6.149333333333334e-05, 'epoch': 0.24}\n",
      "{'loss': 3.2941, 'grad_norm': 5.467723846435547, 'learning_rate': 6.144000000000001e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3602, 'grad_norm': 5.391326427459717, 'learning_rate': 6.138666666666667e-05, 'epoch': 0.24}\n",
      "{'loss': 3.3436, 'grad_norm': 4.556539058685303, 'learning_rate': 6.133333333333334e-05, 'epoch': 0.24}\n",
      " 23%|████████▍                           | 3500/15000 [06:47<2:33:20,  1.25it/s]***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.65s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:38<00:13, 13.59s/it]\u001b[A\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:48<00:00, 12.19s/it]\u001b[ABuilding prefix dict from the default dictionary ...\n",
      "Loading model from cache /tmp/jieba.cache\n",
      "Loading model cost 0.771 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "                                                                                \n",
      "\u001b[A{'eval_rouge-1': 31.131573999999997, 'eval_rouge-2': 6.806568000000001, 'eval_rouge-l': 22.332908000000003, 'eval_bleu-4': 0.029000807771779106, 'eval_runtime': 69.1157, 'eval_samples_per_second': 0.723, 'eval_steps_per_second': 0.058, 'epoch': 0.24}\n",
      " 23%|████████▍                           | 3500/15000 [07:56<2:33:20,  1.25it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:49<00:00, 12.19s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-3500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.5295, 'grad_norm': 5.228490829467773, 'learning_rate': 6.128000000000001e-05, 'epoch': 0.25}\n",
      "{'loss': 3.1455, 'grad_norm': 4.978667259216309, 'learning_rate': 6.122666666666667e-05, 'epoch': 0.25}\n",
      "{'loss': 3.2984, 'grad_norm': 5.361175060272217, 'learning_rate': 6.117333333333334e-05, 'epoch': 0.25}\n",
      "{'loss': 3.3801, 'grad_norm': 5.886717319488525, 'learning_rate': 6.112000000000001e-05, 'epoch': 0.25}\n",
      "{'loss': 3.3164, 'grad_norm': 5.311455249786377, 'learning_rate': 6.106666666666667e-05, 'epoch': 0.25}\n",
      "{'loss': 3.3354, 'grad_norm': 5.578760147094727, 'learning_rate': 6.101333333333334e-05, 'epoch': 0.25}\n",
      "{'loss': 3.4023, 'grad_norm': 5.340369701385498, 'learning_rate': 6.0960000000000006e-05, 'epoch': 0.25}\n",
      "{'loss': 3.326, 'grad_norm': 5.87563419342041, 'learning_rate': 6.090666666666667e-05, 'epoch': 0.25}\n",
      "{'loss': 3.3561, 'grad_norm': 5.659726142883301, 'learning_rate': 6.085333333333334e-05, 'epoch': 0.25}\n",
      "{'loss': 3.3355, 'grad_norm': 5.37545108795166, 'learning_rate': 6.080000000000001e-05, 'epoch': 0.25}\n",
      "{'loss': 3.2279, 'grad_norm': 6.129094123840332, 'learning_rate': 6.074666666666667e-05, 'epoch': 0.25}\n",
      "{'loss': 3.2137, 'grad_norm': 5.635019779205322, 'learning_rate': 6.0693333333333344e-05, 'epoch': 0.25}\n",
      "{'loss': 3.3844, 'grad_norm': 5.388713836669922, 'learning_rate': 6.064e-05, 'epoch': 0.25}\n",
      "{'loss': 3.324, 'grad_norm': 4.872616291046143, 'learning_rate': 6.058666666666667e-05, 'epoch': 0.25}\n",
      "{'loss': 3.332, 'grad_norm': 5.141757488250732, 'learning_rate': 6.053333333333334e-05, 'epoch': 0.25}\n",
      "{'loss': 3.3838, 'grad_norm': 5.025088787078857, 'learning_rate': 6.0480000000000004e-05, 'epoch': 0.26}\n",
      "{'loss': 3.382, 'grad_norm': 5.649606227874756, 'learning_rate': 6.042666666666667e-05, 'epoch': 0.26}\n",
      "{'loss': 3.2393, 'grad_norm': 5.001243591308594, 'learning_rate': 6.037333333333334e-05, 'epoch': 0.26}\n",
      "{'loss': 3.2811, 'grad_norm': 5.0735859870910645, 'learning_rate': 6.0320000000000005e-05, 'epoch': 0.26}\n",
      "{'loss': 3.2898, 'grad_norm': 5.652776718139648, 'learning_rate': 6.026666666666667e-05, 'epoch': 0.26}\n",
      "{'loss': 3.3898, 'grad_norm': 5.25575590133667, 'learning_rate': 6.021333333333334e-05, 'epoch': 0.26}\n",
      "{'loss': 3.3479, 'grad_norm': 5.213857173919678, 'learning_rate': 6.016000000000001e-05, 'epoch': 0.26}\n",
      "{'loss': 3.2482, 'grad_norm': 5.306223392486572, 'learning_rate': 6.010666666666667e-05, 'epoch': 0.26}\n",
      "{'loss': 3.2934, 'grad_norm': 5.349955081939697, 'learning_rate': 6.005333333333334e-05, 'epoch': 0.26}\n",
      "{'loss': 3.15, 'grad_norm': 5.327282428741455, 'learning_rate': 6.000000000000001e-05, 'epoch': 0.26}\n",
      "{'loss': 3.3232, 'grad_norm': 5.3189873695373535, 'learning_rate': 5.9946666666666666e-05, 'epoch': 0.26}\n",
      "{'loss': 3.3584, 'grad_norm': 5.168078899383545, 'learning_rate': 5.989333333333334e-05, 'epoch': 0.26}\n",
      "{'loss': 3.4109, 'grad_norm': 5.27184534072876, 'learning_rate': 5.984e-05, 'epoch': 0.26}\n",
      "{'loss': 3.3473, 'grad_norm': 6.311079978942871, 'learning_rate': 5.978666666666667e-05, 'epoch': 0.26}\n",
      "{'loss': 3.4064, 'grad_norm': 5.199020862579346, 'learning_rate': 5.973333333333334e-05, 'epoch': 0.27}\n",
      "{'loss': 3.2838, 'grad_norm': 6.088330268859863, 'learning_rate': 5.9680000000000005e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3016, 'grad_norm': 5.490572929382324, 'learning_rate': 5.962666666666667e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3111, 'grad_norm': 5.52147912979126, 'learning_rate': 5.957333333333334e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3734, 'grad_norm': 4.94262170791626, 'learning_rate': 5.9520000000000006e-05, 'epoch': 0.27}\n",
      "{'loss': 3.308, 'grad_norm': 5.465088844299316, 'learning_rate': 5.946666666666667e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3578, 'grad_norm': 5.290957450866699, 'learning_rate': 5.941333333333334e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3189, 'grad_norm': 5.102423667907715, 'learning_rate': 5.936000000000001e-05, 'epoch': 0.27}\n",
      "{'loss': 3.4004, 'grad_norm': 5.408662796020508, 'learning_rate': 5.9306666666666666e-05, 'epoch': 0.27}\n",
      "{'loss': 3.2979, 'grad_norm': 4.894774913787842, 'learning_rate': 5.925333333333334e-05, 'epoch': 0.27}\n",
      "{'loss': 3.2506, 'grad_norm': 4.943631649017334, 'learning_rate': 5.92e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3848, 'grad_norm': 5.302524566650391, 'learning_rate': 5.914666666666667e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3654, 'grad_norm': 5.121675491333008, 'learning_rate': 5.909333333333334e-05, 'epoch': 0.27}\n",
      "{'loss': 3.3375, 'grad_norm': 4.803257465362549, 'learning_rate': 5.9040000000000004e-05, 'epoch': 0.27}\n",
      "{'loss': 3.2512, 'grad_norm': 5.2806525230407715, 'learning_rate': 5.898666666666667e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2527, 'grad_norm': 5.7107834815979, 'learning_rate': 5.893333333333334e-05, 'epoch': 0.28}\n",
      "{'loss': 3.4039, 'grad_norm': 5.092886447906494, 'learning_rate': 5.8880000000000005e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2797, 'grad_norm': 5.539216041564941, 'learning_rate': 5.882666666666667e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2738, 'grad_norm': 4.792675495147705, 'learning_rate': 5.877333333333334e-05, 'epoch': 0.28}\n",
      "{'loss': 3.3732, 'grad_norm': 5.366162300109863, 'learning_rate': 5.872000000000001e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2682, 'grad_norm': 5.2314982414245605, 'learning_rate': 5.8666666666666665e-05, 'epoch': 0.28}\n",
      " 27%|█████████▌                          | 4000/15000 [14:43<2:18:35,  1.32it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:04<00:04,  2.26s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:23<00:09,  9.43s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.986852, 'eval_rouge-2': 7.7644899999999994, 'eval_rouge-l': 24.630391999999997, 'eval_bleu-4': 0.03585513714652547, 'eval_runtime': 46.9937, 'eval_samples_per_second': 1.064, 'eval_steps_per_second': 0.085, 'epoch': 0.28}\n",
      " 27%|█████████▌                          | 4000/15000 [15:30<2:18:35,  1.32it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:27<00:00,  7.08s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-4000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.2852, 'grad_norm': 5.712098121643066, 'learning_rate': 5.861333333333334e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2545, 'grad_norm': 5.778397560119629, 'learning_rate': 5.856e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2764, 'grad_norm': 5.17695426940918, 'learning_rate': 5.850666666666667e-05, 'epoch': 0.28}\n",
      "{'loss': 3.3102, 'grad_norm': 5.526034832000732, 'learning_rate': 5.845333333333334e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2916, 'grad_norm': 5.378983974456787, 'learning_rate': 5.84e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2992, 'grad_norm': 5.150753974914551, 'learning_rate': 5.834666666666667e-05, 'epoch': 0.28}\n",
      "{'loss': 3.3859, 'grad_norm': 5.6347784996032715, 'learning_rate': 5.829333333333334e-05, 'epoch': 0.28}\n",
      "{'loss': 3.2826, 'grad_norm': 5.497135639190674, 'learning_rate': 5.8240000000000005e-05, 'epoch': 0.28}\n",
      "{'loss': 3.3141, 'grad_norm': 5.194784164428711, 'learning_rate': 5.8186666666666677e-05, 'epoch': 0.29}\n",
      "{'loss': 3.4092, 'grad_norm': 5.753146171569824, 'learning_rate': 5.813333333333334e-05, 'epoch': 0.29}\n",
      "{'loss': 3.3004, 'grad_norm': 6.026071071624756, 'learning_rate': 5.8080000000000006e-05, 'epoch': 0.29}\n",
      "{'loss': 3.2934, 'grad_norm': 5.1370978355407715, 'learning_rate': 5.802666666666668e-05, 'epoch': 0.29}\n",
      "{'loss': 3.375, 'grad_norm': 5.743355751037598, 'learning_rate': 5.7973333333333336e-05, 'epoch': 0.29}\n",
      "{'loss': 3.2719, 'grad_norm': 4.765685081481934, 'learning_rate': 5.792e-05, 'epoch': 0.29}\n",
      "{'loss': 3.3318, 'grad_norm': 5.339196681976318, 'learning_rate': 5.786666666666667e-05, 'epoch': 0.29}\n",
      "{'loss': 3.3393, 'grad_norm': 6.0346784591674805, 'learning_rate': 5.781333333333334e-05, 'epoch': 0.29}\n",
      "{'loss': 3.352, 'grad_norm': 5.663724422454834, 'learning_rate': 5.776e-05, 'epoch': 0.29}\n",
      "{'loss': 3.2559, 'grad_norm': 5.637661933898926, 'learning_rate': 5.7706666666666674e-05, 'epoch': 0.29}\n",
      "{'loss': 3.3078, 'grad_norm': 5.618467807769775, 'learning_rate': 5.765333333333334e-05, 'epoch': 0.29}\n",
      "{'loss': 3.2572, 'grad_norm': 5.685177803039551, 'learning_rate': 5.7600000000000004e-05, 'epoch': 0.29}\n",
      "{'loss': 3.2934, 'grad_norm': 4.868959903717041, 'learning_rate': 5.7546666666666676e-05, 'epoch': 0.29}\n",
      "{'loss': 3.3641, 'grad_norm': 5.418812274932861, 'learning_rate': 5.749333333333334e-05, 'epoch': 0.29}\n",
      "{'loss': 3.1889, 'grad_norm': 5.502513885498047, 'learning_rate': 5.7440000000000006e-05, 'epoch': 0.3}\n",
      "{'loss': 3.2996, 'grad_norm': 5.318094253540039, 'learning_rate': 5.738666666666668e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3199, 'grad_norm': 5.19420051574707, 'learning_rate': 5.7333333333333336e-05, 'epoch': 0.3}\n",
      "{'loss': 3.2711, 'grad_norm': 5.437623977661133, 'learning_rate': 5.728e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3572, 'grad_norm': 5.6878156661987305, 'learning_rate': 5.722666666666667e-05, 'epoch': 0.3}\n",
      "{'loss': 3.277, 'grad_norm': 5.510706901550293, 'learning_rate': 5.717333333333334e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3705, 'grad_norm': 5.288455009460449, 'learning_rate': 5.712e-05, 'epoch': 0.3}\n",
      "{'loss': 3.2875, 'grad_norm': 6.134466171264648, 'learning_rate': 5.7066666666666674e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3373, 'grad_norm': 5.551464557647705, 'learning_rate': 5.701333333333334e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3674, 'grad_norm': 5.097568035125732, 'learning_rate': 5.6960000000000004e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3646, 'grad_norm': 5.5473551750183105, 'learning_rate': 5.6906666666666675e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3186, 'grad_norm': 6.03604793548584, 'learning_rate': 5.685333333333334e-05, 'epoch': 0.3}\n",
      "{'loss': 3.2865, 'grad_norm': 5.456296920776367, 'learning_rate': 5.6800000000000005e-05, 'epoch': 0.3}\n",
      "{'loss': 3.3828, 'grad_norm': 5.920198440551758, 'learning_rate': 5.674666666666668e-05, 'epoch': 0.3}\n",
      "{'loss': 3.2201, 'grad_norm': 5.725460052490234, 'learning_rate': 5.6693333333333335e-05, 'epoch': 0.31}\n",
      "{'loss': 3.2951, 'grad_norm': 5.064412593841553, 'learning_rate': 5.664e-05, 'epoch': 0.31}\n",
      "{'loss': 3.2654, 'grad_norm': 5.109148025512695, 'learning_rate': 5.658666666666667e-05, 'epoch': 0.31}\n",
      "{'loss': 3.3523, 'grad_norm': 5.7234721183776855, 'learning_rate': 5.6533333333333336e-05, 'epoch': 0.31}\n",
      "{'loss': 3.3029, 'grad_norm': 5.003806114196777, 'learning_rate': 5.648e-05, 'epoch': 0.31}\n",
      "{'loss': 3.3166, 'grad_norm': 5.811991214752197, 'learning_rate': 5.642666666666667e-05, 'epoch': 0.31}\n",
      "{'loss': 3.2914, 'grad_norm': 5.472377777099609, 'learning_rate': 5.637333333333334e-05, 'epoch': 0.31}\n",
      "{'loss': 3.3674, 'grad_norm': 5.285922527313232, 'learning_rate': 5.632e-05, 'epoch': 0.31}\n",
      "{'loss': 3.2881, 'grad_norm': 5.461345195770264, 'learning_rate': 5.6266666666666675e-05, 'epoch': 0.31}\n",
      "{'loss': 3.2305, 'grad_norm': 5.347692012786865, 'learning_rate': 5.621333333333334e-05, 'epoch': 0.31}\n",
      "{'loss': 3.3914, 'grad_norm': 5.606584548950195, 'learning_rate': 5.6160000000000004e-05, 'epoch': 0.31}\n",
      "{'loss': 3.4051, 'grad_norm': 5.400802135467529, 'learning_rate': 5.6106666666666676e-05, 'epoch': 0.31}\n",
      "{'loss': 3.3553, 'grad_norm': 6.056102275848389, 'learning_rate': 5.6053333333333334e-05, 'epoch': 0.31}\n",
      "{'loss': 3.3225, 'grad_norm': 5.509881019592285, 'learning_rate': 5.6e-05, 'epoch': 0.31}\n",
      " 30%|██████████▊                         | 4500/15000 [22:18<2:34:36,  1.13it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.77s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:22<00:07,  7.01s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.352562, 'eval_rouge-2': 7.360436, 'eval_rouge-l': 25.588725999999998, 'eval_bleu-4': 0.0352452781220976, 'eval_runtime': 28.8002, 'eval_samples_per_second': 1.736, 'eval_steps_per_second': 0.139, 'epoch': 0.31}\n",
      " 30%|██████████▊                         | 4500/15000 [22:47<2:34:36,  1.13it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:25<00:00,  5.31s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-4500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.307, 'grad_norm': 5.632010459899902, 'learning_rate': 5.594666666666667e-05, 'epoch': 0.31}\n",
      "{'loss': 3.2375, 'grad_norm': 5.228156089782715, 'learning_rate': 5.5893333333333336e-05, 'epoch': 0.32}\n",
      "{'loss': 3.2834, 'grad_norm': 5.405455589294434, 'learning_rate': 5.584e-05, 'epoch': 0.32}\n",
      "{'loss': 3.3807, 'grad_norm': 5.499380111694336, 'learning_rate': 5.578666666666667e-05, 'epoch': 0.32}\n",
      "{'loss': 3.3057, 'grad_norm': 6.109163284301758, 'learning_rate': 5.573333333333334e-05, 'epoch': 0.32}\n",
      "{'loss': 3.2686, 'grad_norm': 5.082367897033691, 'learning_rate': 5.568e-05, 'epoch': 0.32}\n",
      "{'loss': 3.3121, 'grad_norm': 5.846036434173584, 'learning_rate': 5.5626666666666674e-05, 'epoch': 0.32}\n",
      "{'loss': 3.126, 'grad_norm': 5.341469764709473, 'learning_rate': 5.557333333333334e-05, 'epoch': 0.32}\n",
      "{'loss': 3.334, 'grad_norm': 5.63256311416626, 'learning_rate': 5.5520000000000004e-05, 'epoch': 0.32}\n",
      "{'loss': 3.2977, 'grad_norm': 5.895234107971191, 'learning_rate': 5.5466666666666675e-05, 'epoch': 0.32}\n",
      "{'loss': 3.3676, 'grad_norm': 5.577454566955566, 'learning_rate': 5.5413333333333334e-05, 'epoch': 0.32}\n",
      "{'loss': 3.2607, 'grad_norm': 5.449033260345459, 'learning_rate': 5.536e-05, 'epoch': 0.32}\n",
      "{'loss': 3.2686, 'grad_norm': 7.7011566162109375, 'learning_rate': 5.530666666666667e-05, 'epoch': 0.32}\n",
      "{'loss': 3.3094, 'grad_norm': 4.822831630706787, 'learning_rate': 5.5253333333333335e-05, 'epoch': 0.32}\n",
      "{'loss': 3.3381, 'grad_norm': 5.471823215484619, 'learning_rate': 5.52e-05, 'epoch': 0.32}\n",
      "{'loss': 3.335, 'grad_norm': 7.039752006530762, 'learning_rate': 5.514666666666667e-05, 'epoch': 0.33}\n",
      "{'loss': 3.284, 'grad_norm': 5.242940425872803, 'learning_rate': 5.509333333333334e-05, 'epoch': 0.33}\n",
      "{'loss': 3.2723, 'grad_norm': 5.927280902862549, 'learning_rate': 5.504e-05, 'epoch': 0.33}\n",
      "{'loss': 3.3146, 'grad_norm': 5.841073036193848, 'learning_rate': 5.498666666666667e-05, 'epoch': 0.33}\n",
      "{'loss': 3.3014, 'grad_norm': 5.804082870483398, 'learning_rate': 5.493333333333334e-05, 'epoch': 0.33}\n",
      "{'loss': 3.2002, 'grad_norm': 5.3993659019470215, 'learning_rate': 5.488000000000001e-05, 'epoch': 0.33}\n",
      "{'loss': 3.2348, 'grad_norm': 5.747361660003662, 'learning_rate': 5.4826666666666675e-05, 'epoch': 0.33}\n",
      "{'loss': 3.3139, 'grad_norm': 6.018914222717285, 'learning_rate': 5.477333333333333e-05, 'epoch': 0.33}\n",
      "{'loss': 3.2889, 'grad_norm': 5.262943267822266, 'learning_rate': 5.472000000000001e-05, 'epoch': 0.33}\n",
      "{'loss': 3.2027, 'grad_norm': 5.347361087799072, 'learning_rate': 5.466666666666667e-05, 'epoch': 0.33}\n",
      "{'loss': 3.327, 'grad_norm': 5.671024322509766, 'learning_rate': 5.4613333333333334e-05, 'epoch': 0.33}\n",
      "{'loss': 3.3033, 'grad_norm': 5.421864032745361, 'learning_rate': 5.4560000000000006e-05, 'epoch': 0.33}\n",
      "{'loss': 3.3047, 'grad_norm': 5.701122760772705, 'learning_rate': 5.450666666666667e-05, 'epoch': 0.33}\n",
      "{'loss': 3.2383, 'grad_norm': 5.293639659881592, 'learning_rate': 5.4453333333333336e-05, 'epoch': 0.33}\n",
      "{'loss': 3.4078, 'grad_norm': 5.460535526275635, 'learning_rate': 5.440000000000001e-05, 'epoch': 0.34}\n",
      "{'loss': 3.3041, 'grad_norm': 5.331636905670166, 'learning_rate': 5.434666666666667e-05, 'epoch': 0.34}\n",
      "{'loss': 3.308, 'grad_norm': 5.364526748657227, 'learning_rate': 5.429333333333334e-05, 'epoch': 0.34}\n",
      "{'loss': 3.3563, 'grad_norm': 5.44814395904541, 'learning_rate': 5.424000000000001e-05, 'epoch': 0.34}\n",
      "{'loss': 3.301, 'grad_norm': 5.3358306884765625, 'learning_rate': 5.4186666666666674e-05, 'epoch': 0.34}\n",
      "{'loss': 3.3443, 'grad_norm': 5.735746383666992, 'learning_rate': 5.413333333333333e-05, 'epoch': 0.34}\n",
      "{'loss': 3.4057, 'grad_norm': 5.619082450866699, 'learning_rate': 5.408000000000001e-05, 'epoch': 0.34}\n",
      "{'loss': 3.4104, 'grad_norm': 5.627418518066406, 'learning_rate': 5.402666666666667e-05, 'epoch': 0.34}\n",
      "{'loss': 3.3768, 'grad_norm': 5.334417343139648, 'learning_rate': 5.3973333333333334e-05, 'epoch': 0.34}\n",
      "{'loss': 3.2924, 'grad_norm': 5.446460723876953, 'learning_rate': 5.3920000000000006e-05, 'epoch': 0.34}\n",
      "{'loss': 3.2408, 'grad_norm': 5.490811824798584, 'learning_rate': 5.386666666666667e-05, 'epoch': 0.34}\n",
      "{'loss': 3.2359, 'grad_norm': 5.344498634338379, 'learning_rate': 5.3813333333333335e-05, 'epoch': 0.34}\n",
      "{'loss': 3.2357, 'grad_norm': 5.387383937835693, 'learning_rate': 5.376000000000001e-05, 'epoch': 0.34}\n",
      "{'loss': 3.0553, 'grad_norm': 6.208102226257324, 'learning_rate': 5.370666666666667e-05, 'epoch': 0.34}\n",
      "{'loss': 3.3604, 'grad_norm': 5.320590019226074, 'learning_rate': 5.365333333333334e-05, 'epoch': 0.34}\n",
      "{'loss': 3.3061, 'grad_norm': 5.9459757804870605, 'learning_rate': 5.360000000000001e-05, 'epoch': 0.35}\n",
      "{'loss': 3.3055, 'grad_norm': 6.297903537750244, 'learning_rate': 5.3546666666666674e-05, 'epoch': 0.35}\n",
      "{'loss': 3.3232, 'grad_norm': 6.55083703994751, 'learning_rate': 5.349333333333333e-05, 'epoch': 0.35}\n",
      "{'loss': 3.3375, 'grad_norm': 5.974088668823242, 'learning_rate': 5.344000000000001e-05, 'epoch': 0.35}\n",
      "{'loss': 3.2875, 'grad_norm': 6.08083963394165, 'learning_rate': 5.338666666666667e-05, 'epoch': 0.35}\n",
      "{'loss': 3.1961, 'grad_norm': 5.381326675415039, 'learning_rate': 5.333333333333333e-05, 'epoch': 0.35}\n",
      " 33%|████████████                        | 5000/15000 [29:31<2:21:22,  1.18it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:19<00:19,  9.63s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:22<00:06,  6.79s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.406324, 'eval_rouge-2': 7.964716000000001, 'eval_rouge-l': 24.243748, 'eval_bleu-4': 0.03567371518212268, 'eval_runtime': 44.2192, 'eval_samples_per_second': 1.131, 'eval_steps_per_second': 0.09, 'epoch': 0.35}\n",
      " 33%|████████████                        | 5000/15000 [30:15<2:21:22,  1.18it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:24<00:00,  5.09s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-5000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.3541, 'grad_norm': 5.553667068481445, 'learning_rate': 5.3280000000000005e-05, 'epoch': 0.35}\n",
      "{'loss': 3.3205, 'grad_norm': 5.374237537384033, 'learning_rate': 5.322666666666667e-05, 'epoch': 0.35}\n",
      "{'loss': 3.2736, 'grad_norm': 5.413828372955322, 'learning_rate': 5.3173333333333335e-05, 'epoch': 0.35}\n",
      "{'loss': 3.2408, 'grad_norm': 5.944416522979736, 'learning_rate': 5.3120000000000006e-05, 'epoch': 0.35}\n",
      "{'loss': 3.2529, 'grad_norm': 5.425754547119141, 'learning_rate': 5.306666666666667e-05, 'epoch': 0.35}\n",
      "{'loss': 3.3268, 'grad_norm': 5.583067893981934, 'learning_rate': 5.3013333333333336e-05, 'epoch': 0.35}\n",
      "{'loss': 3.3236, 'grad_norm': 5.379122257232666, 'learning_rate': 5.296000000000001e-05, 'epoch': 0.35}\n",
      "{'loss': 3.3076, 'grad_norm': 6.014839172363281, 'learning_rate': 5.290666666666667e-05, 'epoch': 0.35}\n",
      "{'loss': 3.232, 'grad_norm': 5.573797702789307, 'learning_rate': 5.285333333333333e-05, 'epoch': 0.36}\n",
      "{'loss': 3.3393, 'grad_norm': 5.330376148223877, 'learning_rate': 5.280000000000001e-05, 'epoch': 0.36}\n",
      "{'loss': 3.309, 'grad_norm': 5.016627788543701, 'learning_rate': 5.274666666666667e-05, 'epoch': 0.36}\n",
      "{'loss': 3.3326, 'grad_norm': 5.30073356628418, 'learning_rate': 5.269333333333333e-05, 'epoch': 0.36}\n",
      "{'loss': 3.235, 'grad_norm': 5.08848762512207, 'learning_rate': 5.2640000000000004e-05, 'epoch': 0.36}\n",
      "{'loss': 3.3, 'grad_norm': 5.406621932983398, 'learning_rate': 5.258666666666667e-05, 'epoch': 0.36}\n",
      "{'loss': 3.4422, 'grad_norm': 5.533618927001953, 'learning_rate': 5.2533333333333334e-05, 'epoch': 0.36}\n",
      "{'loss': 3.2633, 'grad_norm': 5.354005336761475, 'learning_rate': 5.2480000000000006e-05, 'epoch': 0.36}\n",
      "{'loss': 3.4086, 'grad_norm': 5.4056901931762695, 'learning_rate': 5.242666666666667e-05, 'epoch': 0.36}\n",
      "{'loss': 3.2695, 'grad_norm': 5.280728816986084, 'learning_rate': 5.2373333333333336e-05, 'epoch': 0.36}\n",
      "{'loss': 3.3316, 'grad_norm': 5.382948398590088, 'learning_rate': 5.232000000000001e-05, 'epoch': 0.36}\n",
      "{'loss': 3.2035, 'grad_norm': 6.033143997192383, 'learning_rate': 5.226666666666667e-05, 'epoch': 0.36}\n",
      "{'loss': 3.2879, 'grad_norm': 6.068592071533203, 'learning_rate': 5.221333333333333e-05, 'epoch': 0.36}\n",
      "{'loss': 3.24, 'grad_norm': 5.226294040679932, 'learning_rate': 5.216000000000001e-05, 'epoch': 0.36}\n",
      "{'loss': 3.2594, 'grad_norm': 6.378615856170654, 'learning_rate': 5.210666666666667e-05, 'epoch': 0.37}\n",
      "{'loss': 3.3439, 'grad_norm': 5.394690036773682, 'learning_rate': 5.205333333333333e-05, 'epoch': 0.37}\n",
      "{'loss': 3.3539, 'grad_norm': 5.578001022338867, 'learning_rate': 5.2000000000000004e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2645, 'grad_norm': 6.5069050788879395, 'learning_rate': 5.194666666666667e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2541, 'grad_norm': 5.917240142822266, 'learning_rate': 5.1893333333333333e-05, 'epoch': 0.37}\n",
      "{'loss': 3.3098, 'grad_norm': 5.53936243057251, 'learning_rate': 5.1840000000000005e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2492, 'grad_norm': 5.550012588500977, 'learning_rate': 5.178666666666667e-05, 'epoch': 0.37}\n",
      "{'loss': 3.3049, 'grad_norm': 5.320505619049072, 'learning_rate': 5.1733333333333335e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2961, 'grad_norm': 5.480510234832764, 'learning_rate': 5.168000000000001e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2857, 'grad_norm': 5.569889068603516, 'learning_rate': 5.162666666666667e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2508, 'grad_norm': 6.177143573760986, 'learning_rate': 5.157333333333334e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2582, 'grad_norm': 5.186578750610352, 'learning_rate': 5.152000000000001e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2846, 'grad_norm': 5.73923397064209, 'learning_rate': 5.1466666666666666e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2197, 'grad_norm': 5.961581707000732, 'learning_rate': 5.1413333333333345e-05, 'epoch': 0.37}\n",
      "{'loss': 3.2156, 'grad_norm': 5.445019245147705, 'learning_rate': 5.136e-05, 'epoch': 0.37}\n",
      "{'loss': 3.3521, 'grad_norm': 5.510255336761475, 'learning_rate': 5.130666666666667e-05, 'epoch': 0.38}\n",
      "{'loss': 3.2582, 'grad_norm': 5.235689640045166, 'learning_rate': 5.125333333333334e-05, 'epoch': 0.38}\n",
      "{'loss': 3.4121, 'grad_norm': 5.788441181182861, 'learning_rate': 5.1200000000000004e-05, 'epoch': 0.38}\n",
      "{'loss': 3.3371, 'grad_norm': 5.621924877166748, 'learning_rate': 5.114666666666667e-05, 'epoch': 0.38}\n",
      "{'loss': 3.2109, 'grad_norm': 5.65928316116333, 'learning_rate': 5.109333333333334e-05, 'epoch': 0.38}\n",
      "{'loss': 3.3551, 'grad_norm': 6.418652057647705, 'learning_rate': 5.1040000000000006e-05, 'epoch': 0.38}\n",
      "{'loss': 3.2609, 'grad_norm': 6.40399694442749, 'learning_rate': 5.098666666666667e-05, 'epoch': 0.38}\n",
      "{'loss': 3.3064, 'grad_norm': 5.792829990386963, 'learning_rate': 5.093333333333334e-05, 'epoch': 0.38}\n",
      "{'loss': 3.2049, 'grad_norm': 5.837188243865967, 'learning_rate': 5.088000000000001e-05, 'epoch': 0.38}\n",
      "{'loss': 3.2848, 'grad_norm': 5.195218563079834, 'learning_rate': 5.0826666666666666e-05, 'epoch': 0.38}\n",
      "{'loss': 3.3393, 'grad_norm': 5.5425519943237305, 'learning_rate': 5.0773333333333344e-05, 'epoch': 0.38}\n",
      "{'loss': 3.2305, 'grad_norm': 5.930248260498047, 'learning_rate': 5.072e-05, 'epoch': 0.38}\n",
      "{'loss': 3.1639, 'grad_norm': 5.716949462890625, 'learning_rate': 5.066666666666667e-05, 'epoch': 0.38}\n",
      " 37%|█████████████▏                      | 5500/15000 [36:59<2:09:56,  1.22it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.69s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:22<00:08,  8.98s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.8896, 'eval_rouge-2': 8.410794, 'eval_rouge-l': 25.596772, 'eval_bleu-4': 0.03832042123781569, 'eval_runtime': 44.6365, 'eval_samples_per_second': 1.12, 'eval_steps_per_second': 0.09, 'epoch': 0.38}\n",
      " 37%|█████████████▏                      | 5500/15000 [37:44<2:09:56,  1.22it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:25<00:00,  6.48s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-5500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.2863, 'grad_norm': 5.857490062713623, 'learning_rate': 5.061333333333334e-05, 'epoch': 0.38}\n",
      "{'loss': 3.252, 'grad_norm': 5.573298931121826, 'learning_rate': 5.0560000000000004e-05, 'epoch': 0.39}\n",
      "{'loss': 3.4049, 'grad_norm': 5.349109649658203, 'learning_rate': 5.050666666666667e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2437, 'grad_norm': 5.550709247589111, 'learning_rate': 5.045333333333334e-05, 'epoch': 0.39}\n",
      "{'loss': 3.3113, 'grad_norm': 5.978433609008789, 'learning_rate': 5.0400000000000005e-05, 'epoch': 0.39}\n",
      "{'loss': 3.3107, 'grad_norm': 5.4560112953186035, 'learning_rate': 5.034666666666667e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2609, 'grad_norm': 5.768387317657471, 'learning_rate': 5.029333333333334e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2508, 'grad_norm': 6.120139122009277, 'learning_rate': 5.024000000000001e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2248, 'grad_norm': 5.452657222747803, 'learning_rate': 5.0186666666666665e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2682, 'grad_norm': 5.968078136444092, 'learning_rate': 5.0133333333333343e-05, 'epoch': 0.39}\n",
      "{'loss': 3.3266, 'grad_norm': 5.730510234832764, 'learning_rate': 5.008e-05, 'epoch': 0.39}\n",
      "{'loss': 3.3084, 'grad_norm': 5.454583644866943, 'learning_rate': 5.0026666666666667e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2863, 'grad_norm': 5.980642318725586, 'learning_rate': 4.997333333333334e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2658, 'grad_norm': 5.710155963897705, 'learning_rate': 4.992e-05, 'epoch': 0.39}\n",
      "{'loss': 3.2959, 'grad_norm': 6.139435291290283, 'learning_rate': 4.986666666666667e-05, 'epoch': 0.39}\n",
      "{'loss': 3.3617, 'grad_norm': 5.5833845138549805, 'learning_rate': 4.981333333333334e-05, 'epoch': 0.4}\n",
      "{'loss': 3.19, 'grad_norm': 5.59699821472168, 'learning_rate': 4.9760000000000005e-05, 'epoch': 0.4}\n",
      "{'loss': 3.2771, 'grad_norm': 5.563565731048584, 'learning_rate': 4.970666666666667e-05, 'epoch': 0.4}\n",
      "{'loss': 3.2561, 'grad_norm': 5.979054927825928, 'learning_rate': 4.965333333333334e-05, 'epoch': 0.4}\n",
      "{'loss': 3.3373, 'grad_norm': 5.533463001251221, 'learning_rate': 4.9600000000000006e-05, 'epoch': 0.4}\n",
      "{'loss': 3.3066, 'grad_norm': 6.093795299530029, 'learning_rate': 4.9546666666666664e-05, 'epoch': 0.4}\n",
      "{'loss': 3.2979, 'grad_norm': 5.552426338195801, 'learning_rate': 4.949333333333334e-05, 'epoch': 0.4}\n",
      "{'loss': 3.1809, 'grad_norm': 5.817966461181641, 'learning_rate': 4.944e-05, 'epoch': 0.4}\n",
      "{'loss': 3.2992, 'grad_norm': 5.490100383758545, 'learning_rate': 4.9386666666666666e-05, 'epoch': 0.4}\n",
      "{'loss': 3.2012, 'grad_norm': 5.781850337982178, 'learning_rate': 4.933333333333334e-05, 'epoch': 0.4}\n",
      "{'loss': 3.4002, 'grad_norm': 5.804357051849365, 'learning_rate': 4.928e-05, 'epoch': 0.4}\n",
      "{'loss': 3.3119, 'grad_norm': 5.361230373382568, 'learning_rate': 4.922666666666667e-05, 'epoch': 0.4}\n",
      "{'loss': 3.4086, 'grad_norm': 5.787779331207275, 'learning_rate': 4.917333333333334e-05, 'epoch': 0.4}\n",
      "{'loss': 3.2875, 'grad_norm': 5.7218170166015625, 'learning_rate': 4.9120000000000004e-05, 'epoch': 0.4}\n",
      "{'loss': 3.3234, 'grad_norm': 5.281899452209473, 'learning_rate': 4.906666666666667e-05, 'epoch': 0.4}\n",
      "{'loss': 3.2729, 'grad_norm': 5.208200931549072, 'learning_rate': 4.901333333333334e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2537, 'grad_norm': 5.994268894195557, 'learning_rate': 4.8960000000000006e-05, 'epoch': 0.41}\n",
      "{'loss': 3.198, 'grad_norm': 6.329117298126221, 'learning_rate': 4.8906666666666664e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2143, 'grad_norm': 5.202082633972168, 'learning_rate': 4.885333333333334e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2674, 'grad_norm': 5.8927741050720215, 'learning_rate': 4.88e-05, 'epoch': 0.41}\n",
      "{'loss': 3.1275, 'grad_norm': 5.727474212646484, 'learning_rate': 4.8746666666666665e-05, 'epoch': 0.41}\n",
      "{'loss': 3.3119, 'grad_norm': 5.6534905433654785, 'learning_rate': 4.869333333333334e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2986, 'grad_norm': 5.96226692199707, 'learning_rate': 4.864e-05, 'epoch': 0.41}\n",
      "{'loss': 3.3506, 'grad_norm': 6.58071756362915, 'learning_rate': 4.858666666666667e-05, 'epoch': 0.41}\n",
      "{'loss': 3.3459, 'grad_norm': 5.494232177734375, 'learning_rate': 4.853333333333334e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2197, 'grad_norm': 6.51775598526001, 'learning_rate': 4.8480000000000003e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2967, 'grad_norm': 6.122310161590576, 'learning_rate': 4.842666666666667e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2412, 'grad_norm': 5.227849960327148, 'learning_rate': 4.837333333333334e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2598, 'grad_norm': 5.58767557144165, 'learning_rate': 4.8320000000000005e-05, 'epoch': 0.41}\n",
      "{'loss': 3.2398, 'grad_norm': 5.883005142211914, 'learning_rate': 4.826666666666668e-05, 'epoch': 0.42}\n",
      "{'loss': 3.1857, 'grad_norm': 5.267283916473389, 'learning_rate': 4.821333333333334e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2949, 'grad_norm': 5.602060794830322, 'learning_rate': 4.816e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2998, 'grad_norm': 5.2154412269592285, 'learning_rate': 4.810666666666668e-05, 'epoch': 0.42}\n",
      "{'loss': 3.1857, 'grad_norm': 6.07392692565918, 'learning_rate': 4.8053333333333336e-05, 'epoch': 0.42}\n",
      "{'loss': 3.324, 'grad_norm': 5.620429992675781, 'learning_rate': 4.8e-05, 'epoch': 0.42}\n",
      " 40%|██████████████▍                     | 6000/15000 [44:28<2:01:41,  1.23it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:04<00:04,  2.15s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:07<00:02,  2.52s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.915288000000004, 'eval_rouge-2': 7.621120000000001, 'eval_rouge-l': 25.362002000000004, 'eval_bleu-4': 0.03551728221773165, 'eval_runtime': 29.0337, 'eval_samples_per_second': 1.722, 'eval_steps_per_second': 0.138, 'epoch': 0.42}\n",
      " 40%|██████████████▍                     | 6000/15000 [44:57<2:01:41,  1.23it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:09<00:00,  2.33s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-6000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.2811, 'grad_norm': 6.243320465087891, 'learning_rate': 4.794666666666667e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2752, 'grad_norm': 5.4658050537109375, 'learning_rate': 4.789333333333334e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2217, 'grad_norm': 6.1733856201171875, 'learning_rate': 4.784e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2656, 'grad_norm': 5.322647571563721, 'learning_rate': 4.7786666666666674e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2346, 'grad_norm': 5.6672563552856445, 'learning_rate': 4.773333333333334e-05, 'epoch': 0.42}\n",
      "{'loss': 3.1992, 'grad_norm': 5.773327350616455, 'learning_rate': 4.7680000000000004e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2117, 'grad_norm': 6.0716776847839355, 'learning_rate': 4.7626666666666676e-05, 'epoch': 0.42}\n",
      "{'loss': 3.2699, 'grad_norm': 5.6708903312683105, 'learning_rate': 4.757333333333334e-05, 'epoch': 0.42}\n",
      "{'loss': 3.1783, 'grad_norm': 5.378319263458252, 'learning_rate': 4.752e-05, 'epoch': 0.43}\n",
      "{'loss': 3.3191, 'grad_norm': 5.810442924499512, 'learning_rate': 4.746666666666668e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2207, 'grad_norm': 5.816871166229248, 'learning_rate': 4.7413333333333336e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2885, 'grad_norm': 5.560327529907227, 'learning_rate': 4.736e-05, 'epoch': 0.43}\n",
      "{'loss': 3.226, 'grad_norm': 5.826242446899414, 'learning_rate': 4.730666666666667e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2439, 'grad_norm': 5.718124866485596, 'learning_rate': 4.725333333333334e-05, 'epoch': 0.43}\n",
      "{'loss': 3.3525, 'grad_norm': 5.6531171798706055, 'learning_rate': 4.72e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2873, 'grad_norm': 5.882595062255859, 'learning_rate': 4.7146666666666674e-05, 'epoch': 0.43}\n",
      "{'loss': 3.3064, 'grad_norm': 5.496496200561523, 'learning_rate': 4.709333333333334e-05, 'epoch': 0.43}\n",
      "{'loss': 3.3287, 'grad_norm': 5.717897891998291, 'learning_rate': 4.7040000000000004e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2104, 'grad_norm': 5.485508441925049, 'learning_rate': 4.6986666666666675e-05, 'epoch': 0.43}\n",
      "{'loss': 3.1949, 'grad_norm': 5.572957515716553, 'learning_rate': 4.693333333333334e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2291, 'grad_norm': 5.922454833984375, 'learning_rate': 4.688e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2689, 'grad_norm': 5.634580612182617, 'learning_rate': 4.682666666666668e-05, 'epoch': 0.43}\n",
      "{'loss': 3.2687, 'grad_norm': 6.724945545196533, 'learning_rate': 4.6773333333333335e-05, 'epoch': 0.43}\n",
      "{'loss': 3.3246, 'grad_norm': 6.187289237976074, 'learning_rate': 4.672e-05, 'epoch': 0.44}\n",
      "{'loss': 3.3645, 'grad_norm': 6.667413711547852, 'learning_rate': 4.666666666666667e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2271, 'grad_norm': 5.732685565948486, 'learning_rate': 4.6613333333333337e-05, 'epoch': 0.44}\n",
      "{'loss': 3.3283, 'grad_norm': 5.902423858642578, 'learning_rate': 4.656e-05, 'epoch': 0.44}\n",
      "{'loss': 3.3148, 'grad_norm': 6.202871322631836, 'learning_rate': 4.650666666666667e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2145, 'grad_norm': 5.883023738861084, 'learning_rate': 4.645333333333334e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2451, 'grad_norm': 5.43007230758667, 'learning_rate': 4.64e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2393, 'grad_norm': 6.099133491516113, 'learning_rate': 4.6346666666666675e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2328, 'grad_norm': 5.620474338531494, 'learning_rate': 4.629333333333334e-05, 'epoch': 0.44}\n",
      "{'loss': 3.3979, 'grad_norm': 5.471859931945801, 'learning_rate': 4.624e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2162, 'grad_norm': 5.554949760437012, 'learning_rate': 4.6186666666666676e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2377, 'grad_norm': 5.942038536071777, 'learning_rate': 4.6133333333333334e-05, 'epoch': 0.44}\n",
      "{'loss': 3.1771, 'grad_norm': 5.624863624572754, 'learning_rate': 4.608e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2861, 'grad_norm': 5.7202534675598145, 'learning_rate': 4.602666666666667e-05, 'epoch': 0.44}\n",
      "{'loss': 3.2764, 'grad_norm': 5.990339279174805, 'learning_rate': 4.5973333333333336e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2736, 'grad_norm': 6.262556552886963, 'learning_rate': 4.592e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2703, 'grad_norm': 5.701816082000732, 'learning_rate': 4.586666666666667e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2699, 'grad_norm': 6.632707595825195, 'learning_rate': 4.581333333333334e-05, 'epoch': 0.45}\n",
      "{'loss': 3.24, 'grad_norm': 5.922563076019287, 'learning_rate': 4.576e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2914, 'grad_norm': 5.947498798370361, 'learning_rate': 4.5706666666666674e-05, 'epoch': 0.45}\n",
      "{'loss': 3.1555, 'grad_norm': 7.7879533767700195, 'learning_rate': 4.565333333333334e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2645, 'grad_norm': 6.062169075012207, 'learning_rate': 4.56e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2799, 'grad_norm': 6.009004592895508, 'learning_rate': 4.5546666666666676e-05, 'epoch': 0.45}\n",
      "{'loss': 3.1803, 'grad_norm': 6.19952392578125, 'learning_rate': 4.5493333333333334e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2223, 'grad_norm': 5.417486667633057, 'learning_rate': 4.544e-05, 'epoch': 0.45}\n",
      "{'loss': 3.3268, 'grad_norm': 5.601931095123291, 'learning_rate': 4.538666666666667e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2107, 'grad_norm': 5.7229180335998535, 'learning_rate': 4.5333333333333335e-05, 'epoch': 0.45}\n",
      " 43%|███████████████▌                    | 6500/15000 [51:45<1:58:21,  1.20it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:04<00:04,  2.04s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:23<00:09,  9.20s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.630120000000005, 'eval_rouge-2': 7.789558, 'eval_rouge-l': 25.78761, 'eval_bleu-4': 0.03799622035500735, 'eval_runtime': 29.8719, 'eval_samples_per_second': 1.674, 'eval_steps_per_second': 0.134, 'epoch': 0.45}\n",
      " 43%|███████████████▌                    | 6500/15000 [52:15<1:58:21,  1.20it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:26<00:00,  6.83s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-6500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.2684, 'grad_norm': 5.995885848999023, 'learning_rate': 4.528e-05, 'epoch': 0.45}\n",
      "{'loss': 3.2873, 'grad_norm': 5.962080478668213, 'learning_rate': 4.522666666666667e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2602, 'grad_norm': 5.676670551300049, 'learning_rate': 4.517333333333334e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2002, 'grad_norm': 5.8636627197265625, 'learning_rate': 4.512e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2555, 'grad_norm': 6.316717147827148, 'learning_rate': 4.506666666666667e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2014, 'grad_norm': 5.541333198547363, 'learning_rate': 4.501333333333334e-05, 'epoch': 0.46}\n",
      "{'loss': 3.352, 'grad_norm': 6.286227226257324, 'learning_rate': 4.496000000000001e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2613, 'grad_norm': 5.85775899887085, 'learning_rate': 4.4906666666666675e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2166, 'grad_norm': 6.541617393493652, 'learning_rate': 4.485333333333333e-05, 'epoch': 0.46}\n",
      "{'loss': 3.3729, 'grad_norm': 6.382185459136963, 'learning_rate': 4.4800000000000005e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2311, 'grad_norm': 5.867575168609619, 'learning_rate': 4.474666666666667e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2469, 'grad_norm': 5.9456000328063965, 'learning_rate': 4.4693333333333335e-05, 'epoch': 0.46}\n",
      "{'loss': 3.3025, 'grad_norm': 6.298067569732666, 'learning_rate': 4.4640000000000006e-05, 'epoch': 0.46}\n",
      "{'loss': 3.2643, 'grad_norm': 6.183221817016602, 'learning_rate': 4.458666666666667e-05, 'epoch': 0.46}\n",
      "{'loss': 3.1709, 'grad_norm': 5.6027350425720215, 'learning_rate': 4.4533333333333336e-05, 'epoch': 0.46}\n",
      "{'loss': 3.3023, 'grad_norm': 5.178913593292236, 'learning_rate': 4.448000000000001e-05, 'epoch': 0.46}\n",
      "{'loss': 3.342, 'grad_norm': 5.931337356567383, 'learning_rate': 4.442666666666667e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2928, 'grad_norm': 6.200148582458496, 'learning_rate': 4.437333333333334e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2084, 'grad_norm': 5.677065372467041, 'learning_rate': 4.432000000000001e-05, 'epoch': 0.47}\n",
      "{'loss': 3.3199, 'grad_norm': 6.0048508644104, 'learning_rate': 4.4266666666666674e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2998, 'grad_norm': 5.671111106872559, 'learning_rate': 4.421333333333333e-05, 'epoch': 0.47}\n",
      "{'loss': 3.276, 'grad_norm': 5.7437005043029785, 'learning_rate': 4.4160000000000004e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2439, 'grad_norm': 6.4255828857421875, 'learning_rate': 4.410666666666667e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2836, 'grad_norm': 5.682497024536133, 'learning_rate': 4.4053333333333334e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2736, 'grad_norm': 5.7651143074035645, 'learning_rate': 4.4000000000000006e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2625, 'grad_norm': 6.366697788238525, 'learning_rate': 4.394666666666667e-05, 'epoch': 0.47}\n",
      "{'loss': 3.1576, 'grad_norm': 5.794517517089844, 'learning_rate': 4.3893333333333335e-05, 'epoch': 0.47}\n",
      "{'loss': 3.4613, 'grad_norm': 5.458343505859375, 'learning_rate': 4.384000000000001e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2592, 'grad_norm': 7.105029106140137, 'learning_rate': 4.378666666666667e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2391, 'grad_norm': 5.883274078369141, 'learning_rate': 4.373333333333334e-05, 'epoch': 0.47}\n",
      "{'loss': 3.2473, 'grad_norm': 5.876780033111572, 'learning_rate': 4.368000000000001e-05, 'epoch': 0.48}\n",
      "{'loss': 3.1404, 'grad_norm': 6.140675067901611, 'learning_rate': 4.3626666666666674e-05, 'epoch': 0.48}\n",
      "{'loss': 3.3314, 'grad_norm': 6.0520100593566895, 'learning_rate': 4.357333333333333e-05, 'epoch': 0.48}\n",
      "{'loss': 3.3441, 'grad_norm': 5.571782112121582, 'learning_rate': 4.3520000000000003e-05, 'epoch': 0.48}\n",
      "{'loss': 3.2652, 'grad_norm': 6.246925354003906, 'learning_rate': 4.346666666666667e-05, 'epoch': 0.48}\n",
      "{'loss': 3.2848, 'grad_norm': 5.945408821105957, 'learning_rate': 4.341333333333333e-05, 'epoch': 0.48}\n",
      "{'loss': 3.2365, 'grad_norm': 5.7486066818237305, 'learning_rate': 4.3360000000000005e-05, 'epoch': 0.48}\n",
      "{'loss': 3.1797, 'grad_norm': 5.745181560516357, 'learning_rate': 4.330666666666667e-05, 'epoch': 0.48}\n",
      "{'loss': 3.3408, 'grad_norm': 5.564694881439209, 'learning_rate': 4.3253333333333335e-05, 'epoch': 0.48}\n",
      "{'loss': 3.1881, 'grad_norm': 6.276146411895752, 'learning_rate': 4.3200000000000007e-05, 'epoch': 0.48}\n",
      "{'loss': 3.3041, 'grad_norm': 5.80435848236084, 'learning_rate': 4.314666666666667e-05, 'epoch': 0.48}\n",
      "{'loss': 3.258, 'grad_norm': 5.972243785858154, 'learning_rate': 4.3093333333333336e-05, 'epoch': 0.48}\n",
      "{'loss': 3.2357, 'grad_norm': 5.543959140777588, 'learning_rate': 4.304000000000001e-05, 'epoch': 0.48}\n",
      "{'loss': 3.2316, 'grad_norm': 5.543025016784668, 'learning_rate': 4.298666666666667e-05, 'epoch': 0.48}\n",
      "{'loss': 3.2668, 'grad_norm': 5.817123889923096, 'learning_rate': 4.293333333333333e-05, 'epoch': 0.49}\n",
      "{'loss': 3.3115, 'grad_norm': 6.077864646911621, 'learning_rate': 4.288e-05, 'epoch': 0.49}\n",
      "{'loss': 3.2014, 'grad_norm': 6.088281154632568, 'learning_rate': 4.282666666666667e-05, 'epoch': 0.49}\n",
      "{'loss': 3.3281, 'grad_norm': 5.744490146636963, 'learning_rate': 4.277333333333333e-05, 'epoch': 0.49}\n",
      "{'loss': 3.3324, 'grad_norm': 5.319087028503418, 'learning_rate': 4.2720000000000004e-05, 'epoch': 0.49}\n",
      "{'loss': 3.3344, 'grad_norm': 5.684542179107666, 'learning_rate': 4.266666666666667e-05, 'epoch': 0.49}\n",
      " 47%|████████████████▊                   | 7000/15000 [59:03<1:42:57,  1.29it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:07<00:07,  3.64s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:10<00:03,  3.34s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 33.105964, 'eval_rouge-2': 8.055708, 'eval_rouge-l': 25.197688000000003, 'eval_bleu-4': 0.037354378968237026, 'eval_runtime': 32.6413, 'eval_samples_per_second': 1.532, 'eval_steps_per_second': 0.123, 'epoch': 0.49}\n",
      " 47%|████████████████▊                   | 7000/15000 [59:36<1:42:57,  1.29it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:12<00:00,  2.95s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-7000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.1512, 'grad_norm': 6.6296892166137695, 'learning_rate': 4.2613333333333334e-05, 'epoch': 0.49}\n",
      "{'loss': 3.2111, 'grad_norm': 6.534343719482422, 'learning_rate': 4.2560000000000006e-05, 'epoch': 0.49}\n",
      "{'loss': 3.3482, 'grad_norm': 6.104092121124268, 'learning_rate': 4.250666666666667e-05, 'epoch': 0.49}\n",
      "{'loss': 3.1828, 'grad_norm': 6.028448581695557, 'learning_rate': 4.2453333333333336e-05, 'epoch': 0.49}\n",
      "{'loss': 3.1803, 'grad_norm': 5.884028434753418, 'learning_rate': 4.240000000000001e-05, 'epoch': 0.49}\n",
      "{'loss': 3.2553, 'grad_norm': 5.826660633087158, 'learning_rate': 4.234666666666667e-05, 'epoch': 0.49}\n",
      "{'loss': 3.309, 'grad_norm': 6.847716331481934, 'learning_rate': 4.229333333333333e-05, 'epoch': 0.49}\n",
      "{'loss': 3.2127, 'grad_norm': 6.288567543029785, 'learning_rate': 4.224e-05, 'epoch': 0.49}\n",
      "{'loss': 3.173, 'grad_norm': 5.805058002471924, 'learning_rate': 4.218666666666667e-05, 'epoch': 0.49}\n",
      "{'loss': 3.3139, 'grad_norm': 6.287442207336426, 'learning_rate': 4.213333333333333e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2258, 'grad_norm': 5.769672870635986, 'learning_rate': 4.2080000000000004e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2281, 'grad_norm': 6.2737908363342285, 'learning_rate': 4.202666666666667e-05, 'epoch': 0.5}\n",
      "{'loss': 3.1531, 'grad_norm': 5.706296443939209, 'learning_rate': 4.1973333333333334e-05, 'epoch': 0.5}\n",
      "{'loss': 3.3959, 'grad_norm': 5.8759894371032715, 'learning_rate': 4.1920000000000005e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2832, 'grad_norm': 6.3797926902771, 'learning_rate': 4.186666666666667e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2873, 'grad_norm': 5.758255481719971, 'learning_rate': 4.1813333333333335e-05, 'epoch': 0.5}\n",
      "{'loss': 3.1777, 'grad_norm': 5.9594550132751465, 'learning_rate': 4.176000000000001e-05, 'epoch': 0.5}\n",
      "{'loss': 3.233, 'grad_norm': 5.554365634918213, 'learning_rate': 4.170666666666667e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2418, 'grad_norm': 6.000166416168213, 'learning_rate': 4.165333333333334e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2469, 'grad_norm': 6.384584903717041, 'learning_rate': 4.16e-05, 'epoch': 0.5}\n",
      "{'loss': 3.3404, 'grad_norm': 6.202101230621338, 'learning_rate': 4.1546666666666666e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2084, 'grad_norm': 6.695379734039307, 'learning_rate': 4.149333333333334e-05, 'epoch': 0.5}\n",
      "{'loss': 3.2467, 'grad_norm': 5.888087749481201, 'learning_rate': 4.144e-05, 'epoch': 0.5}\n",
      "{'loss': 3.3164, 'grad_norm': 6.884426593780518, 'learning_rate': 4.138666666666667e-05, 'epoch': 0.51}\n",
      "{'loss': 3.252, 'grad_norm': 5.723911762237549, 'learning_rate': 4.133333333333334e-05, 'epoch': 0.51}\n",
      "{'loss': 3.2295, 'grad_norm': 5.790980339050293, 'learning_rate': 4.1280000000000005e-05, 'epoch': 0.51}\n",
      "{'loss': 3.2295, 'grad_norm': 5.859818458557129, 'learning_rate': 4.122666666666667e-05, 'epoch': 0.51}\n",
      "{'loss': 3.3068, 'grad_norm': 5.728257179260254, 'learning_rate': 4.117333333333334e-05, 'epoch': 0.51}\n",
      "{'loss': 3.2518, 'grad_norm': 6.178432941436768, 'learning_rate': 4.1120000000000006e-05, 'epoch': 0.51}\n",
      "{'loss': 3.2174, 'grad_norm': 5.748872756958008, 'learning_rate': 4.106666666666667e-05, 'epoch': 0.51}\n",
      "{'loss': 3.2053, 'grad_norm': 5.5951032638549805, 'learning_rate': 4.101333333333334e-05, 'epoch': 0.51}\n",
      "{'loss': 3.1148, 'grad_norm': 6.493244647979736, 'learning_rate': 4.096e-05, 'epoch': 0.51}\n",
      "{'loss': 3.1971, 'grad_norm': 5.79777717590332, 'learning_rate': 4.0906666666666666e-05, 'epoch': 0.51}\n",
      "{'loss': 3.3594, 'grad_norm': 7.337646961212158, 'learning_rate': 4.085333333333334e-05, 'epoch': 0.51}\n",
      "{'loss': 3.1506, 'grad_norm': 5.657313346862793, 'learning_rate': 4.08e-05, 'epoch': 0.51}\n",
      "{'loss': 3.3141, 'grad_norm': 6.116547107696533, 'learning_rate': 4.074666666666667e-05, 'epoch': 0.51}\n",
      "{'loss': 3.3766, 'grad_norm': 6.215630531311035, 'learning_rate': 4.069333333333334e-05, 'epoch': 0.51}\n",
      "{'loss': 3.2576, 'grad_norm': 6.336133003234863, 'learning_rate': 4.0640000000000004e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2473, 'grad_norm': 6.347008228302002, 'learning_rate': 4.058666666666667e-05, 'epoch': 0.52}\n",
      "{'loss': 3.3871, 'grad_norm': 5.928265571594238, 'learning_rate': 4.053333333333334e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2963, 'grad_norm': 5.788377285003662, 'learning_rate': 4.0480000000000005e-05, 'epoch': 0.52}\n",
      "{'loss': 3.3043, 'grad_norm': 5.621262073516846, 'learning_rate': 4.042666666666667e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2182, 'grad_norm': 6.166128158569336, 'learning_rate': 4.037333333333334e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2822, 'grad_norm': 6.383925437927246, 'learning_rate': 4.032e-05, 'epoch': 0.52}\n",
      "{'loss': 3.1902, 'grad_norm': 6.083869934082031, 'learning_rate': 4.0266666666666665e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2787, 'grad_norm': 5.944474697113037, 'learning_rate': 4.021333333333334e-05, 'epoch': 0.52}\n",
      "{'loss': 3.3037, 'grad_norm': 6.220727443695068, 'learning_rate': 4.016e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2498, 'grad_norm': 6.172818660736084, 'learning_rate': 4.010666666666667e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2566, 'grad_norm': 6.419987201690674, 'learning_rate': 4.005333333333334e-05, 'epoch': 0.52}\n",
      "{'loss': 3.3598, 'grad_norm': 5.869343280792236, 'learning_rate': 4e-05, 'epoch': 0.52}\n",
      " 50%|█████████████████                 | 7500/15000 [1:06:19<1:37:23,  1.28it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.93s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:07<00:02,  2.70s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 32.687152, 'eval_rouge-2': 7.031362000000001, 'eval_rouge-l': 25.227360000000004, 'eval_bleu-4': 0.034962267047416715, 'eval_runtime': 15.2174, 'eval_samples_per_second': 3.286, 'eval_steps_per_second': 0.263, 'epoch': 0.52}\n",
      " 50%|█████████████████                 | 7500/15000 [1:06:34<1:37:23,  1.28it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:11<00:00,  3.00s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-7500\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.3404, 'grad_norm': 6.884066104888916, 'learning_rate': 3.994666666666667e-05, 'epoch': 0.52}\n",
      "{'loss': 3.133, 'grad_norm': 6.044402122497559, 'learning_rate': 3.989333333333333e-05, 'epoch': 0.52}\n",
      "{'loss': 3.2803, 'grad_norm': 6.592453956604004, 'learning_rate': 3.9840000000000005e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2545, 'grad_norm': 6.205479145050049, 'learning_rate': 3.978666666666667e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2475, 'grad_norm': 6.423736095428467, 'learning_rate': 3.9733333333333335e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2219, 'grad_norm': 6.566736221313477, 'learning_rate': 3.9680000000000006e-05, 'epoch': 0.53}\n",
      "{'loss': 3.0945, 'grad_norm': 5.977275371551514, 'learning_rate': 3.962666666666667e-05, 'epoch': 0.53}\n",
      "{'loss': 3.274, 'grad_norm': 5.8976731300354, 'learning_rate': 3.9573333333333336e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2082, 'grad_norm': 6.322536468505859, 'learning_rate': 3.952e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2387, 'grad_norm': 6.999849796295166, 'learning_rate': 3.946666666666667e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2486, 'grad_norm': 5.478900909423828, 'learning_rate': 3.941333333333334e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2869, 'grad_norm': 5.982212543487549, 'learning_rate': 3.936e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2295, 'grad_norm': 6.333637714385986, 'learning_rate': 3.930666666666667e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2635, 'grad_norm': 6.050021648406982, 'learning_rate': 3.925333333333333e-05, 'epoch': 0.53}\n",
      "{'loss': 3.1885, 'grad_norm': 6.280707836151123, 'learning_rate': 3.9200000000000004e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2832, 'grad_norm': 5.793548107147217, 'learning_rate': 3.914666666666667e-05, 'epoch': 0.53}\n",
      "{'loss': 3.2963, 'grad_norm': 6.701138973236084, 'learning_rate': 3.909333333333334e-05, 'epoch': 0.54}\n",
      "{'loss': 3.2518, 'grad_norm': 6.7323079109191895, 'learning_rate': 3.9040000000000006e-05, 'epoch': 0.54}\n",
      "{'loss': 3.1523, 'grad_norm': 5.920531749725342, 'learning_rate': 3.898666666666667e-05, 'epoch': 0.54}\n",
      "{'loss': 3.2629, 'grad_norm': 7.632730960845947, 'learning_rate': 3.8933333333333336e-05, 'epoch': 0.54}\n",
      "{'loss': 3.3879, 'grad_norm': 6.133767604827881, 'learning_rate': 3.888e-05, 'epoch': 0.54}\n",
      "{'loss': 3.31, 'grad_norm': 6.580808639526367, 'learning_rate': 3.882666666666667e-05, 'epoch': 0.54}\n",
      "{'loss': 3.3957, 'grad_norm': 6.813842296600342, 'learning_rate': 3.877333333333334e-05, 'epoch': 0.54}\n",
      "{'loss': 3.1885, 'grad_norm': 5.758731842041016, 'learning_rate': 3.872e-05, 'epoch': 0.54}\n",
      "{'loss': 3.2617, 'grad_norm': 5.689703464508057, 'learning_rate': 3.866666666666667e-05, 'epoch': 0.54}\n",
      "{'loss': 3.2256, 'grad_norm': 6.282038688659668, 'learning_rate': 3.861333333333334e-05, 'epoch': 0.54}\n",
      "{'loss': 3.2291, 'grad_norm': 6.049738883972168, 'learning_rate': 3.8560000000000004e-05, 'epoch': 0.54}\n",
      "{'loss': 3.1945, 'grad_norm': 6.440904140472412, 'learning_rate': 3.850666666666667e-05, 'epoch': 0.54}\n",
      "{'loss': 3.2621, 'grad_norm': 6.053099632263184, 'learning_rate': 3.845333333333334e-05, 'epoch': 0.54}\n",
      "{'loss': 3.1551, 'grad_norm': 6.372476100921631, 'learning_rate': 3.8400000000000005e-05, 'epoch': 0.54}\n",
      "{'loss': 3.3828, 'grad_norm': 6.618485450744629, 'learning_rate': 3.834666666666667e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2473, 'grad_norm': 6.668698310852051, 'learning_rate': 3.8293333333333335e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2961, 'grad_norm': 6.5473313331604, 'learning_rate': 3.824e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2375, 'grad_norm': 6.517063140869141, 'learning_rate': 3.818666666666667e-05, 'epoch': 0.55}\n",
      "{'loss': 3.1947, 'grad_norm': 6.718963623046875, 'learning_rate': 3.8133333333333336e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2859, 'grad_norm': 5.877703666687012, 'learning_rate': 3.808e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2822, 'grad_norm': 6.190352916717529, 'learning_rate': 3.8026666666666666e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2686, 'grad_norm': 6.245283126831055, 'learning_rate': 3.797333333333334e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2963, 'grad_norm': 6.490174770355225, 'learning_rate': 3.792e-05, 'epoch': 0.55}\n",
      "{'loss': 3.216, 'grad_norm': 6.232293128967285, 'learning_rate': 3.786666666666667e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2322, 'grad_norm': 5.583700656890869, 'learning_rate': 3.781333333333334e-05, 'epoch': 0.55}\n",
      "{'loss': 3.1854, 'grad_norm': 6.095006465911865, 'learning_rate': 3.7760000000000004e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2676, 'grad_norm': 5.82967472076416, 'learning_rate': 3.770666666666667e-05, 'epoch': 0.55}\n",
      "{'loss': 3.3145, 'grad_norm': 6.022271633148193, 'learning_rate': 3.7653333333333334e-05, 'epoch': 0.55}\n",
      "{'loss': 3.3959, 'grad_norm': 6.701834201812744, 'learning_rate': 3.76e-05, 'epoch': 0.55}\n",
      "{'loss': 3.2059, 'grad_norm': 6.225270748138428, 'learning_rate': 3.754666666666667e-05, 'epoch': 0.56}\n",
      "{'loss': 3.3127, 'grad_norm': 7.500659465789795, 'learning_rate': 3.7493333333333336e-05, 'epoch': 0.56}\n",
      "{'loss': 3.2641, 'grad_norm': 6.894225597381592, 'learning_rate': 3.744000000000001e-05, 'epoch': 0.56}\n",
      "{'loss': 3.0846, 'grad_norm': 5.910096168518066, 'learning_rate': 3.7386666666666666e-05, 'epoch': 0.56}\n",
      "{'loss': 3.3248, 'grad_norm': 8.281294822692871, 'learning_rate': 3.733333333333334e-05, 'epoch': 0.56}\n",
      " 53%|██████████████████▏               | 8000/15000 [1:13:19<1:31:38,  1.27it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:04<00:04,  2.04s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:07<00:02,  2.69s/it]\u001b[A\n",
      "                                                                                \u001b[A\n",
      "\u001b[A{'eval_rouge-1': 33.167256, 'eval_rouge-2': 7.927701999999999, 'eval_rouge-l': 25.712538, 'eval_bleu-4': 0.0386889623750173, 'eval_runtime': 13.9211, 'eval_samples_per_second': 3.592, 'eval_steps_per_second': 0.287, 'epoch': 0.56}\n",
      " 53%|██████████████████▏               | 8000/15000 [1:13:33<1:31:38,  1.27it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:10<00:00,  2.63s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-8000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "{'loss': 3.1246, 'grad_norm': 6.17376184463501, 'learning_rate': 3.728e-05, 'epoch': 0.56}\n",
      "{'loss': 3.2002, 'grad_norm': 6.100371837615967, 'learning_rate': 3.722666666666667e-05, 'epoch': 0.56}\n",
      "{'loss': 3.2699, 'grad_norm': 5.743324279785156, 'learning_rate': 3.717333333333334e-05, 'epoch': 0.56}\n",
      "{'loss': 3.2748, 'grad_norm': 7.019000053405762, 'learning_rate': 3.7120000000000004e-05, 'epoch': 0.56}\n",
      "{'loss': 3.2164, 'grad_norm': 6.087615966796875, 'learning_rate': 3.706666666666667e-05, 'epoch': 0.56}\n",
      "{'loss': 3.2096, 'grad_norm': 6.176745891571045, 'learning_rate': 3.7013333333333334e-05, 'epoch': 0.56}\n",
      "{'loss': 3.167, 'grad_norm': 6.125838756561279, 'learning_rate': 3.6960000000000005e-05, 'epoch': 0.56}\n",
      "{'loss': 3.1363, 'grad_norm': 5.94995641708374, 'learning_rate': 3.690666666666667e-05, 'epoch': 0.56}\n",
      "{'loss': 3.1639, 'grad_norm': 6.157218933105469, 'learning_rate': 3.6853333333333335e-05, 'epoch': 0.56}\n",
      " 54%|██████████████████▎               | 8093/15000 [1:14:50<1:31:16,  1.26it/s]"
     ]
    }
   ],
   "source": [
    "#从checkpoint-3000保存点进行微调\n",
    "!python finetune_hf.py data/AdvertiseGen_fix THUDM/chatglm3-6b configs/lora-batch8.yaml yes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "48b03a08-b438-4e62-a558-7909ad594f8c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "15000\n"
     ]
    }
   ],
   "source": [
    "# 删除.ipynb_checkpoints文件\n",
    "\n",
    "import os\n",
    "dirlist = os.listdir(\"./output-models\")\n",
    "\n",
    "checkpoint_sn = 0\n",
    "for checkpoint_str in dirlist:\n",
    "    if checkpoint_str.find(\"eckpoint-\") > 0 and checkpoint_str.find(\"tmp\") == -1:\n",
    "        checkpoint = int(checkpoint_str.replace(\"checkpoint-\", \"\"))\n",
    "        if checkpoint > checkpoint_sn:\n",
    "            checkpoint_sn = checkpoint\n",
    "\n",
    "print(checkpoint_sn)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f497b578-2b57-4a18-8be4-5c4802cd4351",
   "metadata": {},
   "source": [
    "#### GPU显存峰值\n",
    "```bash\n",
    "Every 2.0s: nvidia-smi                                                                                                                                                          Thu Apr  4 10:55:38 2024\n",
    "\n",
    "Thu Apr  4 10:55:38 2024\n",
    "+-----------------------------------------------------------------------------------------+\n",
    "| NVIDIA-SMI 550.54.15              Driver Version: 550.54.15\t   CUDA Version: 12.4     |\n",
    "|-----------------------------------------+------------------------+----------------------+\n",
    "| GPU  Name                 Persistence-M | Bus-Id          Disp.A | Volatile Uncorr. ECC |\n",
    "| Fan  Temp   Perf          Pwr:Usage/Cap |           Memory-Usage | GPU-Util  Compute M. |\n",
    "|                                         |                        |               MIG M. |\n",
    "|=========================================+========================+======================|\n",
    "|   0  Tesla V100S-PCIE-32GB          Off |   00000000:00:03.0 Off |                    0 |\n",
    "| N/A   51C    P0            150W /  250W |   31891MiB /  32768MiB |     60%\t  Default |\n",
    "|                                         |                        |                  N/A |\n",
    "+-----------------------------------------+------------------------+----------------------+\n",
    "\n",
    "+-----------------------------------------------------------------------------------------+\n",
    "| Processes:                                                                              |\n",
    "|  GPU   GI   CI        PID   Type   Process name                              GPU Memory |\n",
    "|        ID   ID                                                               Usage\t  |\n",
    "|=========================================================================================|\n",
    "|    0   N/A  N/A      5116\t C   python                                      31888MiB |\n",
    "+-----------------------------------------------------------------------------------------+\n",
    "\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "fbc329a3-d591-4dee-a3d6-71929ce066cc",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:04<00:00,  1.70it/s]\n",
      "trainable params: 3,899,392 || all params: 6,247,483,392 || trainable%: 0.06241540401681151\n",
      "--> Model\n",
      "\n",
      "--> model has 3.899392M params\n",
      "\n",
      "train_dataset: Dataset({\n",
      "    features: ['input_ids', 'labels'],\n",
      "    num_rows: 114599\n",
      "})\n",
      "val_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "test_dataset: Dataset({\n",
      "    features: ['input_ids', 'output_ids'],\n",
      "    num_rows: 1070\n",
      "})\n",
      "--> Sanity check\n",
      "           '[gMASK]': 64790 -> -100\n",
      "               'sop': 64792 -> -100\n",
      "          '<|user|>': 64795 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '\\n': 13 -> -100\n",
      "                  '': 30910 -> -100\n",
      "                '类型': 33467 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '版': 55090 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '宽松': 40833 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '风格': 32799 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '性感': 40589 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                '图案': 37505 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                '线条': 37216 -> -100\n",
      "                 '*': 30998 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "                 '型': 54888 -> -100\n",
      "                 '#': 31010 -> -100\n",
      "                 '阔': 56529 -> -100\n",
      "                 '腿': 56158 -> -100\n",
      "                 '裤': 56532 -> -100\n",
      "     '<|assistant|>': 64796 -> -100\n",
      "                  '': 30910 -> 30910\n",
      "                '\\n': 13 -> 13\n",
      "                  '': 30910 -> 30910\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '阔': 56529 -> 56529\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '这': 54551 -> 54551\n",
      "                '两年': 33808 -> 33808\n",
      "                '真的': 32041 -> 32041\n",
      "                 '吸': 55360 -> 55360\n",
      "                 '粉': 55486 -> 55486\n",
      "                '不少': 32138 -> 32138\n",
      "                 '，': 31123 -> 31123\n",
      "                '明星': 32943 -> 32943\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '达': 54880 -> 54880\n",
      "                '人的': 31664 -> 31664\n",
      "                '心头': 46565 -> 46565\n",
      "                 '爱': 54799 -> 54799\n",
      "                 '。': 31155 -> 31155\n",
      "                '毕竟': 33051 -> 33051\n",
      "                 '好': 54591 -> 54591\n",
      "                 '穿': 55432 -> 55432\n",
      "                '时尚': 33481 -> 33481\n",
      "                 '，': 31123 -> 31123\n",
      "                 '谁': 55622 -> 55622\n",
      "                '都能': 32904 -> 32904\n",
      "                 '穿': 55432 -> 55432\n",
      "                 '出': 54557 -> 54557\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '长': 54625 -> 54625\n",
      "                 '2': 30943 -> 30943\n",
      "                 '米': 55055 -> 55055\n",
      "               '的效果': 35590 -> 35590\n",
      "                '宽松': 40833 -> 40833\n",
      "                 '的': 54530 -> 54530\n",
      "                 '裤': 56532 -> 56532\n",
      "                 '腿': 56158 -> 56158\n",
      "                 '，': 31123 -> 31123\n",
      "               '当然是': 48466 -> 48466\n",
      "                 '遮': 57148 -> 57148\n",
      "                 '肉': 55343 -> 55343\n",
      "                 '小': 54603 -> 54603\n",
      "                '能手': 49355 -> 49355\n",
      "                 '啊': 55674 -> 55674\n",
      "                 '。': 31155 -> 31155\n",
      "                '上身': 51605 -> 51605\n",
      "                 '随': 55119 -> 55119\n",
      "                 '性': 54642 -> 54642\n",
      "                '自然': 31799 -> 31799\n",
      "                 '不': 54535 -> 54535\n",
      "                 '拘': 57036 -> 57036\n",
      "                 '束': 55625 -> 55625\n",
      "                 '，': 31123 -> 31123\n",
      "                '面料': 46839 -> 46839\n",
      "                 '亲': 55113 -> 55113\n",
      "                 '肤': 56089 -> 56089\n",
      "                '舒适': 33894 -> 33894\n",
      "                 '贴': 55778 -> 55778\n",
      "                '身体': 31902 -> 31902\n",
      "                 '验': 55017 -> 55017\n",
      "                 '感': 54706 -> 54706\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '棒': 56382 -> 56382\n",
      "                 '哒': 59230 -> 59230\n",
      "                 '。': 31155 -> 31155\n",
      "                 '系': 54712 -> 54712\n",
      "                 '带': 54882 -> 54882\n",
      "                '部分': 31726 -> 31726\n",
      "                '增加': 31917 -> 31917\n",
      "                '设计': 31735 -> 31735\n",
      "                '看点': 45032 -> 45032\n",
      "                 '，': 31123 -> 31123\n",
      "                 '还': 54656 -> 54656\n",
      "                 '让': 54772 -> 54772\n",
      "                '单品': 46539 -> 46539\n",
      "               '的设计': 34481 -> 34481\n",
      "                 '感': 54706 -> 54706\n",
      "                '更强': 43084 -> 43084\n",
      "                 '。': 31155 -> 31155\n",
      "                '腿部': 46799 -> 46799\n",
      "                '线条': 37216 -> 37216\n",
      "                 '若': 55351 -> 55351\n",
      "                 '隐': 55733 -> 55733\n",
      "                 '若': 55351 -> 55351\n",
      "                 '现': 54600 -> 54600\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                '性感': 40589 -> 40589\n",
      "                 '撩': 58521 -> 58521\n",
      "                 '人': 54533 -> 54533\n",
      "                 '。': 31155 -> 31155\n",
      "                '颜色': 33692 -> 33692\n",
      "                 '敲': 57004 -> 57004\n",
      "                '温柔': 34678 -> 34678\n",
      "                 '的': 54530 -> 54530\n",
      "                 '，': 31123 -> 31123\n",
      "                 '与': 54619 -> 54619\n",
      "                '裤子': 44722 -> 44722\n",
      "                '本身': 32754 -> 32754\n",
      "                 '所': 54626 -> 54626\n",
      "                '呈现': 33169 -> 33169\n",
      "               '的风格': 48084 -> 48084\n",
      "                '有点': 33149 -> 33149\n",
      "                 '反': 54955 -> 54955\n",
      "                 '差': 55342 -> 55342\n",
      "                 '萌': 56842 -> 56842\n",
      "                 '。': 31155 -> 31155\n",
      "                  '': 2 -> 2\n",
      "/root/miniconda3/lib/python3.12/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead: \n",
      "dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)\n",
      "  warnings.warn(\n",
      "Detected kernel version 3.10.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n",
      "max_steps is given, it will override any value given in num_train_epochs\n",
      "resume checkpoint from  checkpoint-14500\n",
      "Loading model from ./output-models/checkpoint-14500.\n",
      "/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running training *****\n",
      "  Num examples = 114,599\n",
      "  Num Epochs = 2\n",
      "  Instantaneous batch size per device = 8\n",
      "  Total train batch size (w. parallel, distributed & accumulation) = 8\n",
      "  Gradient Accumulation steps = 1\n",
      "  Total optimization steps = 15,000\n",
      "  Number of trainable parameters = 3,899,392\n",
      "  Continuing training from checkpoint, will skip to saved global_step\n",
      "  Continuing training from epoch 1\n",
      "  Continuing training from global step 14500\n",
      "  Will skip the first 1 epochs then the first 175 batches in the first epoch.\n",
      "  0%|                                                 | 0/15000 [00:00<?, ?it/s]/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "{'loss': 3.1293, 'grad_norm': 7.605799674987793, 'learning_rate': 2.6133333333333334e-06, 'epoch': 1.01}\n",
      "{'loss': 3.2359, 'grad_norm': 6.535445690155029, 'learning_rate': 2.56e-06, 'epoch': 1.01}\n",
      "{'loss': 3.3072, 'grad_norm': 7.2573747634887695, 'learning_rate': 2.5066666666666667e-06, 'epoch': 1.01}\n",
      "{'loss': 3.1254, 'grad_norm': 7.534631252288818, 'learning_rate': 2.4533333333333333e-06, 'epoch': 1.02}\n",
      "{'loss': 3.1992, 'grad_norm': 7.4089274406433105, 'learning_rate': 2.4000000000000003e-06, 'epoch': 1.02}\n",
      "{'loss': 3.3191, 'grad_norm': 8.351901054382324, 'learning_rate': 2.346666666666667e-06, 'epoch': 1.02}\n",
      "{'loss': 3.0662, 'grad_norm': 7.661194324493408, 'learning_rate': 2.2933333333333335e-06, 'epoch': 1.02}\n",
      "{'loss': 3.1986, 'grad_norm': 7.151738166809082, 'learning_rate': 2.24e-06, 'epoch': 1.02}\n",
      "{'loss': 3.0996, 'grad_norm': 8.23842716217041, 'learning_rate': 2.1866666666666668e-06, 'epoch': 1.02}\n",
      "{'loss': 3.2051, 'grad_norm': 8.034001350402832, 'learning_rate': 2.133333333333334e-06, 'epoch': 1.02}\n",
      "{'loss': 3.273, 'grad_norm': 7.7353973388671875, 'learning_rate': 2.08e-06, 'epoch': 1.02}\n",
      "{'loss': 3.0488, 'grad_norm': 7.110805511474609, 'learning_rate': 2.0266666666666666e-06, 'epoch': 1.02}\n",
      "{'loss': 3.201, 'grad_norm': 7.585733413696289, 'learning_rate': 1.9733333333333336e-06, 'epoch': 1.02}\n",
      "{'loss': 3.1703, 'grad_norm': 8.289440155029297, 'learning_rate': 1.9200000000000003e-06, 'epoch': 1.02}\n",
      "{'loss': 3.1059, 'grad_norm': 8.109725952148438, 'learning_rate': 1.8666666666666669e-06, 'epoch': 1.02}\n",
      "{'loss': 3.1574, 'grad_norm': 7.159052848815918, 'learning_rate': 1.8133333333333337e-06, 'epoch': 1.02}\n",
      "{'loss': 3.19, 'grad_norm': 7.994387149810791, 'learning_rate': 1.76e-06, 'epoch': 1.02}\n",
      "{'loss': 3.1334, 'grad_norm': 8.100252151489258, 'learning_rate': 1.7066666666666667e-06, 'epoch': 1.02}\n",
      "{'loss': 3.2314, 'grad_norm': 7.646480083465576, 'learning_rate': 1.6533333333333335e-06, 'epoch': 1.03}\n",
      "{'loss': 3.2062, 'grad_norm': 7.797060489654541, 'learning_rate': 1.6000000000000001e-06, 'epoch': 1.03}\n",
      "{'loss': 3.2791, 'grad_norm': 8.472546577453613, 'learning_rate': 1.546666666666667e-06, 'epoch': 1.03}\n",
      "{'loss': 3.1855, 'grad_norm': 7.529377460479736, 'learning_rate': 1.4933333333333336e-06, 'epoch': 1.03}\n",
      "{'loss': 3.1746, 'grad_norm': 7.4977946281433105, 'learning_rate': 1.44e-06, 'epoch': 1.03}\n",
      "{'loss': 3.1986, 'grad_norm': 7.639484882354736, 'learning_rate': 1.3866666666666668e-06, 'epoch': 1.03}\n",
      "{'loss': 3.127, 'grad_norm': 7.4465556144714355, 'learning_rate': 1.3333333333333334e-06, 'epoch': 1.03}\n",
      "{'loss': 3.2338, 'grad_norm': 7.595545291900635, 'learning_rate': 1.28e-06, 'epoch': 1.03}\n",
      "{'loss': 3.2807, 'grad_norm': 8.539836883544922, 'learning_rate': 1.2266666666666666e-06, 'epoch': 1.03}\n",
      "{'loss': 3.2057, 'grad_norm': 7.989388942718506, 'learning_rate': 1.1733333333333335e-06, 'epoch': 1.03}\n",
      "{'loss': 3.1896, 'grad_norm': 7.47167444229126, 'learning_rate': 1.12e-06, 'epoch': 1.03}\n",
      "{'loss': 3.2018, 'grad_norm': 7.7078704833984375, 'learning_rate': 1.066666666666667e-06, 'epoch': 1.03}\n",
      "{'loss': 3.1568, 'grad_norm': 8.43995189666748, 'learning_rate': 1.0133333333333333e-06, 'epoch': 1.03}\n",
      "{'loss': 3.0752, 'grad_norm': 7.812742710113525, 'learning_rate': 9.600000000000001e-07, 'epoch': 1.03}\n",
      "{'loss': 3.184, 'grad_norm': 7.610111713409424, 'learning_rate': 9.066666666666668e-07, 'epoch': 1.04}\n",
      "{'loss': 3.1996, 'grad_norm': 8.297478675842285, 'learning_rate': 8.533333333333334e-07, 'epoch': 1.04}\n",
      "{'loss': 3.0699, 'grad_norm': 7.701662540435791, 'learning_rate': 8.000000000000001e-07, 'epoch': 1.04}\n",
      "{'loss': 3.1695, 'grad_norm': 8.79282283782959, 'learning_rate': 7.466666666666668e-07, 'epoch': 1.04}\n",
      "{'loss': 3.198, 'grad_norm': 8.108217239379883, 'learning_rate': 6.933333333333334e-07, 'epoch': 1.04}\n",
      "{'loss': 3.2213, 'grad_norm': 7.836912631988525, 'learning_rate': 6.4e-07, 'epoch': 1.04}\n",
      "{'loss': 3.1777, 'grad_norm': 7.6488423347473145, 'learning_rate': 5.866666666666667e-07, 'epoch': 1.04}\n",
      "{'loss': 3.2504, 'grad_norm': 7.50275993347168, 'learning_rate': 5.333333333333335e-07, 'epoch': 1.04}\n",
      "{'loss': 3.292, 'grad_norm': 7.93825101852417, 'learning_rate': 4.800000000000001e-07, 'epoch': 1.04}\n",
      "{'loss': 3.1236, 'grad_norm': 7.449670791625977, 'learning_rate': 4.266666666666667e-07, 'epoch': 1.04}\n",
      "{'loss': 3.152, 'grad_norm': 10.196556091308594, 'learning_rate': 3.733333333333334e-07, 'epoch': 1.04}\n",
      "{'loss': 3.1971, 'grad_norm': 8.241493225097656, 'learning_rate': 3.2e-07, 'epoch': 1.04}\n",
      "{'loss': 3.2314, 'grad_norm': 8.494584083557129, 'learning_rate': 2.666666666666667e-07, 'epoch': 1.04}\n",
      "{'loss': 3.1611, 'grad_norm': 7.3158979415893555, 'learning_rate': 2.1333333333333334e-07, 'epoch': 1.04}\n",
      "{'loss': 3.1434, 'grad_norm': 8.041438102722168, 'learning_rate': 1.6e-07, 'epoch': 1.05}\n",
      "{'loss': 3.1152, 'grad_norm': 9.186230659484863, 'learning_rate': 1.0666666666666667e-07, 'epoch': 1.05}\n",
      "{'loss': 3.2334, 'grad_norm': 7.350133419036865, 'learning_rate': 5.3333333333333334e-08, 'epoch': 1.05}\n",
      "{'loss': 3.1404, 'grad_norm': 7.87338399887085, 'learning_rate': 0.0, 'epoch': 1.05}\n",
      "100%|█████████████████████████████████████| 15000/15000 [06:48<00:00,  1.25it/s]***** Running Evaluation *****\n",
      "  Num examples = 50\n",
      "  Batch size = 16\n",
      "\n",
      "  0%|                                                     | 0/4 [00:00<?, ?it/s]\u001b[A\n",
      " 50%|██████████████████████▌                      | 2/4 [00:03<00:03,  1.86s/it]\u001b[A\n",
      " 75%|█████████████████████████████████▊           | 3/4 [00:06<00:02,  2.20s/it]\u001b[A\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:08<00:00,  2.05s/it]\u001b[ABuilding prefix dict from the default dictionary ...\n",
      "Loading model from cache /tmp/jieba.cache\n",
      "Loading model cost 0.711 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "                                                                                \n",
      "\u001b[A{'eval_rouge-1': 32.804346, 'eval_rouge-2': 7.396215999999999, 'eval_rouge-l': 26.104, 'eval_bleu-4': 0.035137374978255205, 'eval_runtime': 11.9484, 'eval_samples_per_second': 4.185, 'eval_steps_per_second': 0.335, 'epoch': 1.05}\n",
      "100%|█████████████████████████████████████| 15000/15000 [07:00<00:00,  1.25it/s]\n",
      "100%|█████████████████████████████████████████████| 4/4 [00:09<00:00,  2.05s/it]\u001b[A\n",
      "                                                                                \u001b[ASaving model checkpoint to ./output-models/checkpoint-15000\n",
      "loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--THUDM--chatglm3-6b/snapshots/a5ba5501eb873d40d48bd0983bd2a8dd006bb838/config.json\n",
      "Model config ChatGLMConfig {\n",
      "  \"_name_or_path\": \"THUDM/chatglm3-6b\",\n",
      "  \"add_bias_linear\": false,\n",
      "  \"add_qkv_bias\": true,\n",
      "  \"apply_query_key_layer_scaling\": true,\n",
      "  \"apply_residual_connection_post_layernorm\": false,\n",
      "  \"architectures\": [\n",
      "    \"ChatGLMModel\"\n",
      "  ],\n",
      "  \"attention_dropout\": 0.0,\n",
      "  \"attention_softmax_in_fp32\": true,\n",
      "  \"auto_map\": {\n",
      "    \"AutoConfig\": \"THUDM/chatglm3-6b--configuration_chatglm.ChatGLMConfig\",\n",
      "    \"AutoModel\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForCausalLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSeq2SeqLM\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForConditionalGeneration\",\n",
      "    \"AutoModelForSequenceClassification\": \"THUDM/chatglm3-6b--modeling_chatglm.ChatGLMForSequenceClassification\"\n",
      "  },\n",
      "  \"bias_dropout_fusion\": true,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"eos_token_id\": 2,\n",
      "  \"ffn_hidden_size\": 13696,\n",
      "  \"fp32_residual_connection\": false,\n",
      "  \"hidden_dropout\": 0.0,\n",
      "  \"hidden_size\": 4096,\n",
      "  \"kv_channels\": 128,\n",
      "  \"layernorm_epsilon\": 1e-05,\n",
      "  \"model_type\": \"chatglm\",\n",
      "  \"multi_query_attention\": true,\n",
      "  \"multi_query_group_num\": 2,\n",
      "  \"num_attention_heads\": 32,\n",
      "  \"num_layers\": 28,\n",
      "  \"original_rope\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"padded_vocab_size\": 65024,\n",
      "  \"post_layer_norm\": true,\n",
      "  \"pre_seq_len\": null,\n",
      "  \"prefix_projection\": false,\n",
      "  \"quantization_bit\": 0,\n",
      "  \"rmsnorm\": true,\n",
      "  \"seq_length\": 8192,\n",
      "  \"tie_word_embeddings\": false,\n",
      "  \"torch_dtype\": \"float16\",\n",
      "  \"transformers_version\": \"4.39.3\",\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 65024\n",
      "}\n",
      "\n",
      "\n",
      "\n",
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
      "\n",
      "\n",
      "{'train_runtime': 421.9556, 'train_samples_per_second': 284.39, 'train_steps_per_second': 35.549, 'train_loss': 0.10611458333333333, 'epoch': 1.05}\n",
      "100%|█████████████████████████████████████| 15000/15000 [07:01<00:00, 35.55it/s]\n",
      "/root/miniconda3/lib/python3.12/site-packages/torch/utils/data/dataloader.py:558: UserWarning: This DataLoader will create 16 worker processes in total. Our suggested max number of worker in current system is 10, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n",
      "***** Running Prediction *****\n",
      "  Num examples = 1070\n",
      "  Batch size = 16\n",
      "100%|███████████████████████████████████████████| 67/67 [05:14<00:00,  4.69s/it]\n"
     ]
    }
   ],
   "source": [
    "#从checkpoint-14500保存点进行微调\n",
    "!python finetune_hf.py data/AdvertiseGen_fix THUDM/chatglm3-6b configs/lora-batch8.yaml yes"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "155fc916-6e92-444d-ad74-dee93bac95bc",
   "metadata": {},
   "source": [
    "## 2使用微调后的模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "7a473d55-9d17-43b9-bdb5-89c86d4857a3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:07<00:00,  1.14s/it]\n",
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "这款来自BRAND的牛仔外套，以简约的版型，彰显出时尚的气息。衣身白色破洞的元素，尽显出活力与个性。胸口处刺绣的图案，让整体看起来更加有层次感，搭配上破洞的元素，显得更加帅气。\n"
     ]
    }
   ],
   "source": [
    "\n",
    "!python inference_hf.py output-models/checkpoint-15000 --prompt 类型#上衣*材质#牛仔布*颜色#白色*风格#简约*图案#刺绣*衣样式#外套*衣款式#破洞"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "fc1fe3cf-90e0-466d-afb6-b8b59e822574",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:07<00:00,  1.11s/it]\n",
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "这款中国风外套，采用了经典的双排扣设计，带来干练利落的视觉效果。而胸前采用精美的绣花装饰，则给外套增添了潮流的气息。\n"
     ]
    }
   ],
   "source": [
    "!python inference_hf.py output-models/checkpoint-15000 --prompt 类型#上衣*风格#潮*风格#中国风*图案#刺绣*衣样式#外套"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "2e7c0ed5-f22c-4ba0-b773-163aa7ad58b3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:07<00:00,  1.12s/it]\n",
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "茶香四溢的茉莉花茶，是茶香四溢的春日里，最好的陪伴。这款茶香茉莉花茶，选用精选茉莉花，搭配上精选的茶叶，让茶香更加浓郁。加上茶杯上印制的茉莉花图案，更是增添了几分雅致。而茶杯上还有简单的花纹，让这款茉莉花茶更加有格调。\n"
     ]
    }
   ],
   "source": [
    "!python inference_hf.py output-models/checkpoint-15000 --prompt 类型#饮料*口味#茉莉花茶茶"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "59e96d58-b78f-4072-92b9-65653321e125",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading checkpoint shards: 100%|██████████████████| 7/7 [00:07<00:00,  1.04s/it]\n",
      "Setting eos_token is not supported, use the default one.\n",
      "Setting pad_token is not supported, use the default one.\n",
      "Setting unk_token is not supported, use the default one.\n",
      "欢迎使用 ChatGLM3-6B 模型，输入内容即可进行对话，clear 清空对话历史，stop 终止程序\n",
      "\n",
      "用户：^C\n",
      "Traceback (most recent call last):\n",
      "  File \"/root/ChatGLM/lora_finetune_cli_demo.py\", line 95, in <module>\n",
      "    main()\n",
      "  File \"/root/ChatGLM/lora_finetune_cli_demo.py\", line 71, in main\n",
      "    query = input(\"\\n用户：\")\n",
      "            ^^^^^^^^^^^^^^^^^\n",
      "KeyboardInterrupt\n"
     ]
    }
   ],
   "source": [
    "!python lora_finetune_cli_demo.py"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
