{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 搭配环境"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://mirror.baidu.com/pypi/simple\n",
      "Collecting paddlex\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/d6/a2/07435f4aa1e51fe22bdf06c95d03bf1b78b7bc6625adbb51e35dc0804cc7/paddlex-1.3.11-py3-none-any.whl (516kB)\n",
      "\u001b[K     |████████████████████████████████| 522kB 15.8MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: psutil in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (5.7.2)\n",
      "Requirement already satisfied: opencv-python in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (4.1.1.26)\n",
      "Collecting shapely>=1.7.0 (from paddlex)\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/98/f8/db4d3426a1aba9d5dfcc83ed5a3e2935d2b1deb73d350642931791a61c37/Shapely-1.7.1-cp37-cp37m-manylinux1_x86_64.whl (1.0MB)\n",
      "\u001b[K     |████████████████████████████████| 1.0MB 13.1MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting xlwt (from paddlex)\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/44/48/def306413b25c3d01753603b1a222a011b8621aed27cd7f89cbc27e6b0f4/xlwt-1.3.0-py2.py3-none-any.whl (99kB)\n",
      "\u001b[K     |████████████████████████████████| 102kB 30.8MB/s ta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: pyyaml in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (5.1.2)\n",
      "Requirement already satisfied: tqdm in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (4.36.1)\n",
      "Requirement already satisfied: visualdl>=2.0.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (2.2.0)\n",
      "Requirement already satisfied: sklearn in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (0.0)\n",
      "Requirement already satisfied: colorama in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (0.4.4)\n",
      "Collecting paddleslim==1.1.1 (from paddlex)\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/d1/77/e257227bed9a70ff0d35a4a3c4e70ac2d2362c803834c4c52018f7c4b762/paddleslim-1.1.1-py2.py3-none-any.whl (145kB)\n",
      "\u001b[K     |████████████████████████████████| 153kB 17.1MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting paddlehub==2.1.0 (from paddlex)\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/7a/29/3bd0ca43c787181e9c22fe44b944b64d7fcb14ce66d3bf4602d9ad2ac76c/paddlehub-2.1.0-py3-none-any.whl (211kB)\n",
      "\u001b[K     |████████████████████████████████| 215kB 22.9MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: flask-cors in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlex) (3.0.8)\n",
      "Collecting pycocotools; platform_system != \"Windows\" (from paddlex)\n",
      "  Downloading https://mirror.baidu.com/pypi/packages/de/df/056875d697c45182ed6d2ae21f62015896fdb841906fe48e7268e791c467/pycocotools-2.0.2.tar.gz\n",
      "Requirement already satisfied: numpy>=1.14.5 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from opencv-python->paddlex) (1.20.3)\n",
      "Requirement already satisfied: matplotlib in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (2.2.3)\n",
      "Requirement already satisfied: flask>=1.1.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (1.1.1)\n",
      "Requirement already satisfied: flake8>=3.7.9 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (3.8.2)\n",
      "Requirement already satisfied: Flask-Babel>=1.0.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (1.0.0)\n",
      "Requirement already satisfied: Pillow>=7.0.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (7.1.2)\n",
      "Requirement already satisfied: bce-python-sdk in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (0.8.53)\n",
      "Requirement already satisfied: pre-commit in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (1.21.0)\n",
      "Requirement already satisfied: protobuf>=3.11.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (3.14.0)\n",
      "Requirement already satisfied: shellcheck-py in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (0.7.1.1)\n",
      "Requirement already satisfied: six>=1.14.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (1.15.0)\n",
      "Requirement already satisfied: pandas in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (1.1.5)\n",
      "Requirement already satisfied: requests in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from visualdl>=2.0.0->paddlex) (2.22.0)\n",
      "Requirement already satisfied: scikit-learn in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from sklearn->paddlex) (0.24.2)\n",
      "Requirement already satisfied: pyzmq in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddleslim==1.1.1->paddlex) (18.1.1)\n",
      "Collecting paddle2onnx>=0.5.1 (from paddlehub==2.1.0->paddlex)\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/37/80/aa6134b5f36aea45dc1b363e7af941dccabe4d7e167ac391ff046f34baf1/paddle2onnx-0.7-py3-none-any.whl (94kB)\n",
      "\u001b[K     |████████████████████████████████| 102kB 23.9MB/s ta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: gunicorn>=19.10.0; sys_platform != \"win32\" in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (20.0.4)\n",
      "Requirement already satisfied: filelock in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (3.0.12)\n",
      "Requirement already satisfied: packaging in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (20.9)\n",
      "Requirement already satisfied: gitpython in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (3.1.14)\n",
      "Requirement already satisfied: paddlenlp>=2.0.0rc5 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (2.0.7)\n",
      "Requirement already satisfied: colorlog in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (4.1.0)\n",
      "Requirement already satisfied: easydict in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (1.9)\n",
      "Requirement already satisfied: rarfile in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlehub==2.1.0->paddlex) (3.1)\n",
      "Requirement already satisfied: setuptools>=18.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pycocotools; platform_system != \"Windows\"->paddlex) (56.2.0)\n",
      "Requirement already satisfied: cython>=0.27.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pycocotools; platform_system != \"Windows\"->paddlex) (0.29)\n",
      "Requirement already satisfied: cycler>=0.10 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from matplotlib->visualdl>=2.0.0->paddlex) (0.10.0)\n",
      "Requirement already satisfied: python-dateutil>=2.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from matplotlib->visualdl>=2.0.0->paddlex) (2.8.0)\n",
      "Requirement already satisfied: kiwisolver>=1.0.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from matplotlib->visualdl>=2.0.0->paddlex) (1.1.0)\n",
      "Requirement already satisfied: pytz in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from matplotlib->visualdl>=2.0.0->paddlex) (2019.3)\n",
      "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from matplotlib->visualdl>=2.0.0->paddlex) (2.4.2)\n",
      "Requirement already satisfied: click>=5.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flask>=1.1.1->visualdl>=2.0.0->paddlex) (7.0)\n",
      "Requirement already satisfied: Jinja2>=2.10.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flask>=1.1.1->visualdl>=2.0.0->paddlex) (2.10.1)\n",
      "Requirement already satisfied: itsdangerous>=0.24 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flask>=1.1.1->visualdl>=2.0.0->paddlex) (1.1.0)\n",
      "Requirement already satisfied: Werkzeug>=0.15 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flask>=1.1.1->visualdl>=2.0.0->paddlex) (0.16.0)\n",
      "Requirement already satisfied: pycodestyle<2.7.0,>=2.6.0a1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flake8>=3.7.9->visualdl>=2.0.0->paddlex) (2.6.0)\n",
      "Requirement already satisfied: importlib-metadata; python_version < \"3.8\" in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flake8>=3.7.9->visualdl>=2.0.0->paddlex) (0.23)\n",
      "Requirement already satisfied: mccabe<0.7.0,>=0.6.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flake8>=3.7.9->visualdl>=2.0.0->paddlex) (0.6.1)\n",
      "Requirement already satisfied: pyflakes<2.3.0,>=2.2.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from flake8>=3.7.9->visualdl>=2.0.0->paddlex) (2.2.0)\n",
      "Requirement already satisfied: Babel>=2.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from Flask-Babel>=1.0.0->visualdl>=2.0.0->paddlex) (2.8.0)\n",
      "Requirement already satisfied: pycryptodome>=3.8.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from bce-python-sdk->visualdl>=2.0.0->paddlex) (3.9.9)\n",
      "Requirement already satisfied: future>=0.6.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from bce-python-sdk->visualdl>=2.0.0->paddlex) (0.18.0)\n",
      "Requirement already satisfied: identify>=1.0.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pre-commit->visualdl>=2.0.0->paddlex) (1.4.10)\n",
      "Requirement already satisfied: virtualenv>=15.2 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pre-commit->visualdl>=2.0.0->paddlex) (16.7.9)\n",
      "Requirement already satisfied: cfgv>=2.0.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pre-commit->visualdl>=2.0.0->paddlex) (2.0.1)\n",
      "Requirement already satisfied: toml in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pre-commit->visualdl>=2.0.0->paddlex) (0.10.0)\n",
      "Requirement already satisfied: nodeenv>=0.11.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pre-commit->visualdl>=2.0.0->paddlex) (1.3.4)\n",
      "Requirement already satisfied: aspy.yaml in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from pre-commit->visualdl>=2.0.0->paddlex) (1.3.0)\n",
      "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from requests->visualdl>=2.0.0->paddlex) (3.0.4)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from requests->visualdl>=2.0.0->paddlex) (2019.9.11)\n",
      "Requirement already satisfied: idna<2.9,>=2.5 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from requests->visualdl>=2.0.0->paddlex) (2.8)\n",
      "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from requests->visualdl>=2.0.0->paddlex) (1.25.6)\n",
      "Requirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from scikit-learn->sklearn->paddlex) (2.1.0)\n",
      "Requirement already satisfied: scipy>=0.19.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from scikit-learn->sklearn->paddlex) (1.6.3)\n",
      "Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from scikit-learn->sklearn->paddlex) (0.14.1)\n",
      "Requirement already satisfied: gitdb<5,>=4.0.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from gitpython->paddlehub==2.1.0->paddlex) (4.0.5)\n",
      "Requirement already satisfied: multiprocess in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp>=2.0.0rc5->paddlehub==2.1.0->paddlex) (0.70.11.1)\n",
      "Requirement already satisfied: seqeval in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp>=2.0.0rc5->paddlehub==2.1.0->paddlex) (1.2.2)\n",
      "Requirement already satisfied: jieba in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp>=2.0.0rc5->paddlehub==2.1.0->paddlex) (0.42.1)\n",
      "Requirement already satisfied: h5py in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp>=2.0.0rc5->paddlehub==2.1.0->paddlex) (2.9.0)\n",
      "Requirement already satisfied: MarkupSafe>=0.23 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from Jinja2>=2.10.1->flask>=1.1.1->visualdl>=2.0.0->paddlex) (1.1.1)\n",
      "Requirement already satisfied: zipp>=0.5 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from importlib-metadata; python_version < \"3.8\"->flake8>=3.7.9->visualdl>=2.0.0->paddlex) (0.6.0)\n",
      "Requirement already satisfied: smmap<4,>=3.0.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from gitdb<5,>=4.0.1->gitpython->paddlehub==2.1.0->paddlex) (3.0.5)\n",
      "Requirement already satisfied: dill>=0.3.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from multiprocess->paddlenlp>=2.0.0rc5->paddlehub==2.1.0->paddlex) (0.3.3)\n",
      "Requirement already satisfied: more-itertools in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from zipp>=0.5->importlib-metadata; python_version < \"3.8\"->flake8>=3.7.9->visualdl>=2.0.0->paddlex) (7.2.0)\n",
      "Building wheels for collected packages: pycocotools\n",
      "  Building wheel for pycocotools (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for pycocotools: filename=pycocotools-2.0.2-cp37-cp37m-linux_x86_64.whl size=278362 sha256=0fdd0e6b779923649102fc594ad6e581b6242eb1321a10637d937c04beba1b0c\n",
      "  Stored in directory: /home/aistudio/.cache/pip/wheels/fb/44/67/8baa69040569b1edbd7776ec6f82c387663e724908aaa60963\n",
      "Successfully built pycocotools\n",
      "Installing collected packages: shapely, xlwt, paddleslim, paddle2onnx, paddlehub, pycocotools, paddlex\n",
      "  Found existing installation: paddlehub 2.0.4\n",
      "    Uninstalling paddlehub-2.0.4:\n",
      "      Successfully uninstalled paddlehub-2.0.4\n",
      "Successfully installed paddle2onnx-0.7 paddlehub-2.1.0 paddleslim-1.1.1 paddlex-1.3.11 pycocotools-2.0.2 shapely-1.7.1 xlwt-1.3.0\n"
     ]
    }
   ],
   "source": [
    "!pip install paddlex -i https://mirror.baidu.com/pypi/simple"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 准备数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/aistudio/fire\n"
     ]
    }
   ],
   "source": [
    "#!mkdir ~/fire\r\n",
    "%cd ~/fire\r\n",
    "!unzip -oq /home/aistudio/data/data104155/fire.zip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "%cd ~/\r\n",
    "!paddlex --split_dataset --format VOC --dataset_dir fire --val_value 0.2 --test_value 0.1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/__init__.py:107: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import MutableMapping\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import Iterable, Mapping\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import Sized\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:24:37 [INFO]\tStarting to read file list from dataset...\n",
      "2021-08-15 16:24:37 [INFO]\t345 samples in file fire/train_list.txt\n",
      "creating index...\n",
      "index created!\n",
      "2021-08-15 16:24:37 [INFO]\tStarting to read file list from dataset...\n",
      "2021-08-15 16:24:38 [INFO]\t98 samples in file fire/val_list.txt\n",
      "creating index...\n",
      "index created!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py:706: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  elif dtype == np.bool:\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py:2043: UserWarning: The Attr(force_cpu) of Op(fill_constant) will be deprecated in the future, please use 'device_guard' instead. 'device_guard' has higher priority when they are used at the same time.\n",
      "  \"used at the same time.\" % type)\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/ops.py:131\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:155\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:172\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:172\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:174\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:174\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:178\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:178\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:180\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:180\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:216\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:217\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:218\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:219\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:97\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:97\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:99\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:101\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:101\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:101\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:102\n",
      "The behavior of expression A / B has been unified with elementwise_div(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_div(X, Y, axis=0) instead of A / B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/iou_loss.py:79\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:186\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:194\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:349\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:350\n",
      "The behavior of expression A - B has been unified with elementwise_sub(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_sub(X, Y, axis=0) instead of A - B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:351\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:352\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:383\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:385\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:209\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:210\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/loss/yolo_loss.py:212\n",
      "The behavior of expression A + B has been unified with elementwise_add(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_add(X, Y, axis=0) instead of A + B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/iou_aware.py:64\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/iou_aware.py:40\n",
      "The behavior of expression A / B has been unified with elementwise_div(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_div(X, Y, axis=0) instead of A / B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:24:51 [INFO]\tDownloading ResNet50_vd_ssld_pretrained.tar from https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_ssld_pretrained.tar\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 92837/92837 [00:01<00:00, 52461.14KB/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:24:53 [INFO]\tDecompressing output/ppyolo/pretrain/ResNet50_vd_ssld_pretrained.tar...\n",
      "2021-08-15 16:25:00 [INFO]\tLoad pretrain weights from output/ppyolo/pretrain/ResNet50_vd_ssld_pretrained.\n",
      "2021-08-15 16:25:00 [INFO]\tThere are 275 varaibles in output/ppyolo/pretrain/ResNet50_vd_ssld_pretrained are loaded.\n",
      "2021-08-15 16:25:10 [INFO]\t[TRAIN] Epoch=1/100, Step=2/21, loss=15346.645508, lr=0.0, time_each_step=4.6s, eta=2:43:25\n",
      "2021-08-15 16:25:11 [INFO]\t[TRAIN] Epoch=1/100, Step=4/21, loss=14921.394531, lr=0.0, time_each_step=2.6s, eta=1:32:19\n",
      "2021-08-15 16:25:13 [INFO]\t[TRAIN] Epoch=1/100, Step=6/21, loss=16948.974609, lr=1e-06, time_each_step=1.97s, eta=1:9:45\n",
      "2021-08-15 16:25:14 [INFO]\t[TRAIN] Epoch=1/100, Step=8/21, loss=4168.104004, lr=1e-06, time_each_step=1.59s, eta=0:56:15\n",
      "2021-08-15 16:25:15 [INFO]\t[TRAIN] Epoch=1/100, Step=10/21, loss=8688.293945, lr=1e-06, time_each_step=1.39s, eta=0:49:12\n",
      "2021-08-15 16:25:16 [INFO]\t[TRAIN] Epoch=1/100, Step=12/21, loss=6004.327148, lr=1e-06, time_each_step=1.27s, eta=0:45:3\n",
      "2021-08-15 16:25:17 [INFO]\t[TRAIN] Epoch=1/100, Step=14/21, loss=1698.398438, lr=2e-06, time_each_step=1.16s, eta=0:41:6\n",
      "2021-08-15 16:25:18 [INFO]\t[TRAIN] Epoch=1/100, Step=16/21, loss=1919.04187, lr=2e-06, time_each_step=1.09s, eta=0:38:23\n",
      "2021-08-15 16:25:19 [INFO]\t[TRAIN] Epoch=1/100, Step=18/21, loss=921.080627, lr=2e-06, time_each_step=1.02s, eta=0:36:4\n",
      "2021-08-15 16:25:20 [INFO]\t[TRAIN] Epoch=1/100, Step=20/21, loss=285.026398, lr=2e-06, time_each_step=0.96s, eta=0:33:44\n",
      "2021-08-15 16:25:20 [INFO]\t[TRAIN] Epoch 1 finished, loss=6104.087891, lr=1e-06 .\n",
      "2021-08-15 16:25:24 [INFO]\t[TRAIN] Epoch=2/100, Step=1/21, loss=116.547646, lr=3e-06, time_each_step=0.69s, eta=0:32:25\n",
      "2021-08-15 16:25:25 [INFO]\t[TRAIN] Epoch=2/100, Step=3/21, loss=219.158264, lr=3e-06, time_each_step=0.7s, eta=0:32:24\n",
      "2021-08-15 16:25:26 [INFO]\t[TRAIN] Epoch=2/100, Step=5/21, loss=132.329987, lr=3e-06, time_each_step=0.69s, eta=0:32:22\n",
      "2021-08-15 16:25:27 [INFO]\t[TRAIN] Epoch=2/100, Step=7/21, loss=105.436859, lr=3e-06, time_each_step=0.7s, eta=0:32:21\n",
      "2021-08-15 16:25:29 [INFO]\t[TRAIN] Epoch=2/100, Step=9/21, loss=80.694603, lr=4e-06, time_each_step=0.71s, eta=0:32:21\n",
      "2021-08-15 16:25:30 [INFO]\t[TRAIN] Epoch=2/100, Step=11/21, loss=42.693245, lr=4e-06, time_each_step=0.68s, eta=0:32:18\n",
      "2021-08-15 16:25:31 [INFO]\t[TRAIN] Epoch=2/100, Step=13/21, loss=73.256683, lr=4e-06, time_each_step=0.68s, eta=0:32:16\n",
      "2021-08-15 16:25:31 [INFO]\t[TRAIN] Epoch=2/100, Step=15/21, loss=38.249596, lr=4e-06, time_each_step=0.66s, eta=0:32:14\n",
      "2021-08-15 16:25:33 [INFO]\t[TRAIN] Epoch=2/100, Step=17/21, loss=59.058304, lr=5e-06, time_each_step=0.67s, eta=0:32:13\n",
      "2021-08-15 16:25:33 [INFO]\t[TRAIN] Epoch=2/100, Step=19/21, loss=56.701366, lr=5e-06, time_each_step=0.67s, eta=0:32:12\n",
      "2021-08-15 16:25:34 [INFO]\t[TRAIN] Epoch=2/100, Step=21/21, loss=54.856308, lr=5e-06, time_each_step=0.53s, eta=0:32:6\n",
      "2021-08-15 16:25:34 [INFO]\t[TRAIN] Epoch 2 finished, loss=81.382706, lr=4e-06 .\n",
      "2021-08-15 16:25:41 [INFO]\t[TRAIN] Epoch=3/100, Step=2/21, loss=51.486473, lr=5e-06, time_each_step=0.77s, eta=0:23:35\n",
      "2021-08-15 16:25:42 [INFO]\t[TRAIN] Epoch=3/100, Step=4/21, loss=49.511169, lr=6e-06, time_each_step=0.77s, eta=0:23:33\n",
      "2021-08-15 16:25:43 [INFO]\t[TRAIN] Epoch=3/100, Step=6/21, loss=32.522598, lr=6e-06, time_each_step=0.77s, eta=0:23:32\n",
      "2021-08-15 16:25:44 [INFO]\t[TRAIN] Epoch=3/100, Step=8/21, loss=34.615768, lr=6e-06, time_each_step=0.75s, eta=0:23:29\n",
      "2021-08-15 16:25:45 [INFO]\t[TRAIN] Epoch=3/100, Step=10/21, loss=32.403919, lr=6e-06, time_each_step=0.76s, eta=0:23:28\n",
      "2021-08-15 16:25:46 [INFO]\t[TRAIN] Epoch=3/100, Step=12/21, loss=26.414862, lr=7e-06, time_each_step=0.76s, eta=0:23:27\n",
      "2021-08-15 16:25:47 [INFO]\t[TRAIN] Epoch=3/100, Step=14/21, loss=36.327484, lr=7e-06, time_each_step=0.76s, eta=0:23:25\n",
      "2021-08-15 16:25:48 [INFO]\t[TRAIN] Epoch=3/100, Step=16/21, loss=43.902901, lr=7e-06, time_each_step=0.76s, eta=0:23:24\n",
      "2021-08-15 16:25:49 [INFO]\t[TRAIN] Epoch=3/100, Step=18/21, loss=37.972061, lr=7e-06, time_each_step=0.77s, eta=0:23:23\n",
      "2021-08-15 16:25:50 [INFO]\t[TRAIN] Epoch=3/100, Step=20/21, loss=47.377468, lr=8e-06, time_each_step=0.76s, eta=0:23:21\n",
      "2021-08-15 16:25:50 [INFO]\t[TRAIN] Epoch 3 finished, loss=39.306206, lr=7e-06 .\n",
      "2021-08-15 16:25:55 [INFO]\t[TRAIN] Epoch=4/100, Step=1/21, loss=37.935757, lr=8e-06, time_each_step=0.74s, eta=0:25:39\n",
      "2021-08-15 16:25:57 [INFO]\t[TRAIN] Epoch=4/100, Step=3/21, loss=39.826492, lr=8e-06, time_each_step=0.73s, eta=0:25:37\n",
      "2021-08-15 16:25:58 [INFO]\t[TRAIN] Epoch=4/100, Step=5/21, loss=38.376026, lr=8e-06, time_each_step=0.78s, eta=0:25:38\n",
      "2021-08-15 16:25:59 [INFO]\t[TRAIN] Epoch=4/100, Step=7/21, loss=30.176128, lr=9e-06, time_each_step=0.77s, eta=0:25:36\n",
      "2021-08-15 16:26:01 [INFO]\t[TRAIN] Epoch=4/100, Step=9/21, loss=30.926619, lr=9e-06, time_each_step=0.77s, eta=0:25:34\n",
      "2021-08-15 16:26:01 [INFO]\t[TRAIN] Epoch=4/100, Step=11/21, loss=39.563931, lr=9e-06, time_each_step=0.78s, eta=0:25:33\n",
      "2021-08-15 16:26:03 [INFO]\t[TRAIN] Epoch=4/100, Step=13/21, loss=42.511864, lr=9e-06, time_each_step=0.8s, eta=0:25:33\n",
      "2021-08-15 16:26:04 [INFO]\t[TRAIN] Epoch=4/100, Step=15/21, loss=36.306332, lr=1e-05, time_each_step=0.8s, eta=0:25:31\n",
      "2021-08-15 16:26:06 [INFO]\t[TRAIN] Epoch=4/100, Step=17/21, loss=44.630745, lr=1e-05, time_each_step=0.84s, eta=0:25:31\n",
      "2021-08-15 16:26:07 [INFO]\t[TRAIN] Epoch=4/100, Step=19/21, loss=45.890747, lr=1e-05, time_each_step=0.86s, eta=0:25:30\n",
      "2021-08-15 16:26:08 [INFO]\t[TRAIN] Epoch=4/100, Step=21/21, loss=39.00581, lr=1e-05, time_each_step=0.62s, eta=0:25:20\n",
      "2021-08-15 16:26:08 [INFO]\t[TRAIN] Epoch 4 finished, loss=38.896637, lr=9e-06 .\n",
      "2021-08-15 16:26:15 [INFO]\t[TRAIN] Epoch=5/100, Step=2/21, loss=23.596802, lr=1.1e-05, time_each_step=0.89s, eta=0:29:2\n",
      "2021-08-15 16:26:16 [INFO]\t[TRAIN] Epoch=5/100, Step=4/21, loss=45.060856, lr=1.1e-05, time_each_step=0.9s, eta=0:29:0\n",
      "2021-08-15 16:26:17 [INFO]\t[TRAIN] Epoch=5/100, Step=6/21, loss=41.503555, lr=1.1e-05, time_each_step=0.9s, eta=0:28:59\n",
      "2021-08-15 16:26:19 [INFO]\t[TRAIN] Epoch=5/100, Step=8/21, loss=33.63356, lr=1.1e-05, time_each_step=0.91s, eta=0:28:58\n",
      "2021-08-15 16:26:20 [INFO]\t[TRAIN] Epoch=5/100, Step=10/21, loss=26.704277, lr=1.2e-05, time_each_step=0.93s, eta=0:28:56\n",
      "2021-08-15 16:26:21 [INFO]\t[TRAIN] Epoch=5/100, Step=12/21, loss=25.2061, lr=1.2e-05, time_each_step=0.91s, eta=0:28:54\n",
      "2021-08-15 16:26:22 [INFO]\t[TRAIN] Epoch=5/100, Step=14/21, loss=45.856743, lr=1.2e-05, time_each_step=0.91s, eta=0:28:52\n",
      "2021-08-15 16:26:23 [INFO]\t[TRAIN] Epoch=5/100, Step=16/21, loss=27.744232, lr=1.2e-05, time_each_step=0.87s, eta=0:28:48\n",
      "2021-08-15 16:26:24 [INFO]\t[TRAIN] Epoch=5/100, Step=18/21, loss=25.499886, lr=1.3e-05, time_each_step=0.84s, eta=0:28:46\n",
      "2021-08-15 16:26:24 [INFO]\t[TRAIN] Epoch=5/100, Step=20/21, loss=31.142822, lr=1.3e-05, time_each_step=0.83s, eta=0:28:44\n",
      "2021-08-15 16:26:25 [INFO]\t[TRAIN] Epoch 5 finished, loss=35.976402, lr=1.2e-05 .\n",
      "2021-08-15 16:26:33 [INFO]\t[TRAIN] Epoch=6/100, Step=1/21, loss=28.509623, lr=1.3e-05, time_each_step=0.9s, eta=0:27:24\n",
      "2021-08-15 16:26:34 [INFO]\t[TRAIN] Epoch=6/100, Step=3/21, loss=25.528414, lr=1.3e-05, time_each_step=0.88s, eta=0:27:21\n",
      "2021-08-15 16:26:35 [INFO]\t[TRAIN] Epoch=6/100, Step=5/21, loss=27.8354, lr=1.4e-05, time_each_step=0.87s, eta=0:27:19\n",
      "2021-08-15 16:26:36 [INFO]\t[TRAIN] Epoch=6/100, Step=7/21, loss=25.17075, lr=1.4e-05, time_each_step=0.86s, eta=0:27:17\n",
      "2021-08-15 16:26:37 [INFO]\t[TRAIN] Epoch=6/100, Step=9/21, loss=27.950249, lr=1.4e-05, time_each_step=0.87s, eta=0:27:15\n",
      "2021-08-15 16:26:39 [INFO]\t[TRAIN] Epoch=6/100, Step=11/21, loss=50.310207, lr=1.4e-05, time_each_step=0.9s, eta=0:27:15\n",
      "2021-08-15 16:26:40 [INFO]\t[TRAIN] Epoch=6/100, Step=13/21, loss=48.228844, lr=1.5e-05, time_each_step=0.9s, eta=0:27:13\n",
      "2021-08-15 16:26:41 [INFO]\t[TRAIN] Epoch=6/100, Step=15/21, loss=33.799461, lr=1.5e-05, time_each_step=0.9s, eta=0:27:11\n",
      "2021-08-15 16:26:42 [INFO]\t[TRAIN] Epoch=6/100, Step=17/21, loss=38.314323, lr=1.5e-05, time_each_step=0.9s, eta=0:27:9\n",
      "2021-08-15 16:26:43 [INFO]\t[TRAIN] Epoch=6/100, Step=19/21, loss=32.72916, lr=1.5e-05, time_each_step=0.91s, eta=0:27:8\n",
      "2021-08-15 16:26:43 [INFO]\t[TRAIN] Epoch=6/100, Step=21/21, loss=39.39069, lr=1.6e-05, time_each_step=0.53s, eta=0:26:53\n",
      "2021-08-15 16:26:43 [INFO]\t[TRAIN] Epoch 6 finished, loss=34.316082, lr=1.4e-05 .\n",
      "2021-08-15 16:26:51 [INFO]\t[TRAIN] Epoch=7/100, Step=2/21, loss=34.577869, lr=1.6e-05, time_each_step=0.85s, eta=0:29:14\n",
      "2021-08-15 16:26:52 [INFO]\t[TRAIN] Epoch=7/100, Step=4/21, loss=31.231388, lr=1.6e-05, time_each_step=0.85s, eta=0:29:13\n",
      "2021-08-15 16:26:54 [INFO]\t[TRAIN] Epoch=7/100, Step=6/21, loss=45.722649, lr=1.6e-05, time_each_step=0.88s, eta=0:29:12\n",
      "2021-08-15 16:26:54 [INFO]\t[TRAIN] Epoch=7/100, Step=8/21, loss=33.6409, lr=1.7e-05, time_each_step=0.86s, eta=0:29:10\n",
      "2021-08-15 16:26:55 [INFO]\t[TRAIN] Epoch=7/100, Step=10/21, loss=33.87674, lr=1.7e-05, time_each_step=0.83s, eta=0:29:6\n",
      "2021-08-15 16:26:57 [INFO]\t[TRAIN] Epoch=7/100, Step=12/21, loss=34.743313, lr=1.7e-05, time_each_step=0.83s, eta=0:29:5\n",
      "2021-08-15 16:26:58 [INFO]\t[TRAIN] Epoch=7/100, Step=14/21, loss=40.709019, lr=1.7e-05, time_each_step=0.84s, eta=0:29:4\n",
      "2021-08-15 16:26:59 [INFO]\t[TRAIN] Epoch=7/100, Step=16/21, loss=36.429604, lr=1.8e-05, time_each_step=0.85s, eta=0:29:2\n",
      "2021-08-15 16:27:00 [INFO]\t[TRAIN] Epoch=7/100, Step=18/21, loss=35.76577, lr=1.8e-05, time_each_step=0.86s, eta=0:29:1\n",
      "2021-08-15 16:27:01 [INFO]\t[TRAIN] Epoch=7/100, Step=20/21, loss=26.387802, lr=1.8e-05, time_each_step=0.87s, eta=0:29:0\n",
      "2021-08-15 16:27:01 [INFO]\t[TRAIN] Epoch 7 finished, loss=34.644707, lr=1.7e-05 .\n",
      "2021-08-15 16:27:07 [INFO]\t[TRAIN] Epoch=8/100, Step=1/21, loss=25.089867, lr=1.8e-05, time_each_step=0.78s, eta=0:27:59\n",
      "2021-08-15 16:27:08 [INFO]\t[TRAIN] Epoch=8/100, Step=3/21, loss=25.445391, lr=1.9e-05, time_each_step=0.79s, eta=0:27:58\n",
      "2021-08-15 16:27:09 [INFO]\t[TRAIN] Epoch=8/100, Step=5/21, loss=30.470804, lr=1.9e-05, time_each_step=0.77s, eta=0:27:55\n",
      "2021-08-15 16:27:10 [INFO]\t[TRAIN] Epoch=8/100, Step=7/21, loss=27.851891, lr=1.9e-05, time_each_step=0.79s, eta=0:27:55\n",
      "2021-08-15 16:27:12 [INFO]\t[TRAIN] Epoch=8/100, Step=9/21, loss=43.146923, lr=1.9e-05, time_each_step=0.83s, eta=0:27:55\n",
      "2021-08-15 16:27:13 [INFO]\t[TRAIN] Epoch=8/100, Step=11/21, loss=38.166943, lr=2e-05, time_each_step=0.82s, eta=0:27:53\n",
      "2021-08-15 16:27:14 [INFO]\t[TRAIN] Epoch=8/100, Step=13/21, loss=23.308205, lr=2e-05, time_each_step=0.81s, eta=0:27:51\n",
      "2021-08-15 16:27:15 [INFO]\t[TRAIN] Epoch=8/100, Step=15/21, loss=32.045963, lr=2e-05, time_each_step=0.8s, eta=0:27:49\n",
      "2021-08-15 16:27:16 [INFO]\t[TRAIN] Epoch=8/100, Step=17/21, loss=32.049088, lr=2e-05, time_each_step=0.79s, eta=0:27:47\n",
      "2021-08-15 16:27:17 [INFO]\t[TRAIN] Epoch=8/100, Step=19/21, loss=33.568363, lr=2.1e-05, time_each_step=0.81s, eta=0:27:46\n",
      "2021-08-15 16:27:17 [INFO]\t[TRAIN] Epoch=8/100, Step=21/21, loss=22.687029, lr=2.1e-05, time_each_step=0.53s, eta=0:27:35\n",
      "2021-08-15 16:27:17 [INFO]\t[TRAIN] Epoch 8 finished, loss=31.056992, lr=2e-05 .\n",
      "2021-08-15 16:27:23 [INFO]\t[TRAIN] Epoch=9/100, Step=2/21, loss=29.18454, lr=2.1e-05, time_each_step=0.79s, eta=0:25:25\n",
      "2021-08-15 16:27:24 [INFO]\t[TRAIN] Epoch=9/100, Step=4/21, loss=29.978683, lr=2.1e-05, time_each_step=0.77s, eta=0:25:23\n",
      "2021-08-15 16:27:26 [INFO]\t[TRAIN] Epoch=9/100, Step=6/21, loss=35.59697, lr=2.2e-05, time_each_step=0.77s, eta=0:25:21\n",
      "2021-08-15 16:27:27 [INFO]\t[TRAIN] Epoch=9/100, Step=8/21, loss=29.669764, lr=2.2e-05, time_each_step=0.76s, eta=0:25:20\n",
      "2021-08-15 16:27:29 [INFO]\t[TRAIN] Epoch=9/100, Step=10/21, loss=30.052841, lr=2.2e-05, time_each_step=0.77s, eta=0:25:18\n",
      "2021-08-15 16:27:29 [INFO]\t[TRAIN] Epoch=9/100, Step=12/21, loss=27.189606, lr=2.2e-05, time_each_step=0.77s, eta=0:25:17\n",
      "2021-08-15 16:27:30 [INFO]\t[TRAIN] Epoch=9/100, Step=14/21, loss=30.192093, lr=2.3e-05, time_each_step=0.77s, eta=0:25:15\n",
      "2021-08-15 16:27:31 [INFO]\t[TRAIN] Epoch=9/100, Step=16/21, loss=28.199261, lr=2.3e-05, time_each_step=0.79s, eta=0:25:14\n",
      "2021-08-15 16:27:32 [INFO]\t[TRAIN] Epoch=9/100, Step=18/21, loss=37.180111, lr=2.3e-05, time_each_step=0.76s, eta=0:25:12\n",
      "2021-08-15 16:27:33 [INFO]\t[TRAIN] Epoch=9/100, Step=20/21, loss=20.447094, lr=2.3e-05, time_each_step=0.79s, eta=0:25:11\n",
      "2021-08-15 16:27:33 [INFO]\t[TRAIN] Epoch 9 finished, loss=30.728998, lr=2.2e-05 .\n",
      "2021-08-15 16:27:38 [INFO]\t[TRAIN] Epoch=10/100, Step=1/21, loss=40.177547, lr=2.4e-05, time_each_step=0.75s, eta=0:24:48\n",
      "2021-08-15 16:27:40 [INFO]\t[TRAIN] Epoch=10/100, Step=3/21, loss=35.032169, lr=2.4e-05, time_each_step=0.77s, eta=0:24:48\n",
      "2021-08-15 16:27:41 [INFO]\t[TRAIN] Epoch=10/100, Step=5/21, loss=33.433914, lr=2.4e-05, time_each_step=0.79s, eta=0:24:47\n",
      "2021-08-15 16:27:43 [INFO]\t[TRAIN] Epoch=10/100, Step=7/21, loss=44.92218, lr=2.4e-05, time_each_step=0.78s, eta=0:24:45\n",
      "2021-08-15 16:27:44 [INFO]\t[TRAIN] Epoch=10/100, Step=9/21, loss=40.911453, lr=2.5e-05, time_each_step=0.79s, eta=0:24:44\n",
      "2021-08-15 16:27:46 [INFO]\t[TRAIN] Epoch=10/100, Step=11/21, loss=30.673603, lr=2.5e-05, time_each_step=0.81s, eta=0:24:43\n",
      "2021-08-15 16:27:46 [INFO]\t[TRAIN] Epoch=10/100, Step=13/21, loss=27.741446, lr=2.5e-05, time_each_step=0.8s, eta=0:24:41\n",
      "2021-08-15 16:27:47 [INFO]\t[TRAIN] Epoch=10/100, Step=15/21, loss=27.696363, lr=2.5e-05, time_each_step=0.79s, eta=0:24:39\n",
      "2021-08-15 16:27:48 [INFO]\t[TRAIN] Epoch=10/100, Step=17/21, loss=34.2015, lr=2.6e-05, time_each_step=0.79s, eta=0:24:38\n",
      "2021-08-15 16:27:49 [INFO]\t[TRAIN] Epoch=10/100, Step=19/21, loss=31.585171, lr=2.6e-05, time_each_step=0.78s, eta=0:24:36\n",
      "2021-08-15 16:27:49 [INFO]\t[TRAIN] Epoch=10/100, Step=21/21, loss=27.559149, lr=2.6e-05, time_each_step=0.54s, eta=0:24:26\n",
      "2021-08-15 16:27:49 [INFO]\t[TRAIN] Epoch 10 finished, loss=34.636566, lr=2.5e-05 .\n",
      "2021-08-15 16:27:58 [INFO]\t[TRAIN] Epoch=11/100, Step=2/21, loss=22.525215, lr=2.6e-05, time_each_step=0.89s, eta=0:24:11\n",
      "2021-08-15 16:27:59 [INFO]\t[TRAIN] Epoch=11/100, Step=4/21, loss=24.335308, lr=2.7e-05, time_each_step=0.89s, eta=0:24:10\n",
      "2021-08-15 16:28:01 [INFO]\t[TRAIN] Epoch=11/100, Step=6/21, loss=30.64044, lr=2.7e-05, time_each_step=0.89s, eta=0:24:8\n",
      "2021-08-15 16:28:02 [INFO]\t[TRAIN] Epoch=11/100, Step=8/21, loss=31.282425, lr=2.7e-05, time_each_step=0.87s, eta=0:24:5\n",
      "2021-08-15 16:28:03 [INFO]\t[TRAIN] Epoch=11/100, Step=10/21, loss=30.627733, lr=2.7e-05, time_each_step=0.84s, eta=0:24:2\n",
      "2021-08-15 16:28:04 [INFO]\t[TRAIN] Epoch=11/100, Step=12/21, loss=25.064114, lr=2.8e-05, time_each_step=0.87s, eta=0:24:2\n",
      "2021-08-15 16:28:05 [INFO]\t[TRAIN] Epoch=11/100, Step=14/21, loss=26.210831, lr=2.8e-05, time_each_step=0.88s, eta=0:24:0\n",
      "2021-08-15 16:28:06 [INFO]\t[TRAIN] Epoch=11/100, Step=16/21, loss=31.567966, lr=2.8e-05, time_each_step=0.89s, eta=0:23:59\n",
      "2021-08-15 16:28:06 [INFO]\t[TRAIN] Epoch=11/100, Step=18/21, loss=26.592102, lr=2.8e-05, time_each_step=0.89s, eta=0:23:57\n",
      "2021-08-15 16:28:07 [INFO]\t[TRAIN] Epoch=11/100, Step=20/21, loss=23.37302, lr=2.9e-05, time_each_step=0.91s, eta=0:23:56\n",
      "2021-08-15 16:28:08 [INFO]\t[TRAIN] Epoch 11 finished, loss=29.224272, lr=2.8e-05 .\n",
      "2021-08-15 16:28:16 [INFO]\t[TRAIN] Epoch=12/100, Step=1/21, loss=25.215374, lr=2.9e-05, time_each_step=0.9s, eta=0:27:52\n",
      "2021-08-15 16:28:17 [INFO]\t[TRAIN] Epoch=12/100, Step=3/21, loss=24.380575, lr=2.9e-05, time_each_step=0.88s, eta=0:27:49\n",
      "2021-08-15 16:28:18 [INFO]\t[TRAIN] Epoch=12/100, Step=5/21, loss=38.074104, lr=2.9e-05, time_each_step=0.88s, eta=0:27:47\n",
      "2021-08-15 16:28:19 [INFO]\t[TRAIN] Epoch=12/100, Step=7/21, loss=29.015059, lr=3e-05, time_each_step=0.88s, eta=0:27:45\n",
      "2021-08-15 16:28:20 [INFO]\t[TRAIN] Epoch=12/100, Step=9/21, loss=24.869711, lr=3e-05, time_each_step=0.89s, eta=0:27:44\n",
      "2021-08-15 16:28:22 [INFO]\t[TRAIN] Epoch=12/100, Step=11/21, loss=29.664753, lr=3e-05, time_each_step=0.91s, eta=0:27:43\n",
      "2021-08-15 16:28:23 [INFO]\t[TRAIN] Epoch=12/100, Step=13/21, loss=26.85424, lr=3e-05, time_each_step=0.9s, eta=0:27:41\n",
      "2021-08-15 16:28:24 [INFO]\t[TRAIN] Epoch=12/100, Step=15/21, loss=24.837803, lr=3.1e-05, time_each_step=0.91s, eta=0:27:40\n",
      "2021-08-15 16:28:25 [INFO]\t[TRAIN] Epoch=12/100, Step=17/21, loss=33.19545, lr=3.1e-05, time_each_step=0.93s, eta=0:27:38\n",
      "2021-08-15 16:28:26 [INFO]\t[TRAIN] Epoch=12/100, Step=19/21, loss=27.291723, lr=3.1e-05, time_each_step=0.93s, eta=0:27:36\n",
      "2021-08-15 16:28:27 [INFO]\t[TRAIN] Epoch=12/100, Step=21/21, loss=25.479965, lr=3.1e-05, time_each_step=0.56s, eta=0:27:22\n",
      "2021-08-15 16:28:27 [INFO]\t[TRAIN] Epoch 12 finished, loss=29.446951, lr=3e-05 .\n",
      "2021-08-15 16:28:36 [INFO]\t[TRAIN] Epoch=13/100, Step=2/21, loss=34.106365, lr=3.2e-05, time_each_step=0.96s, eta=0:28:44\n",
      "2021-08-15 16:28:38 [INFO]\t[TRAIN] Epoch=13/100, Step=4/21, loss=31.325287, lr=3.2e-05, time_each_step=0.96s, eta=0:28:42\n",
      "2021-08-15 16:28:39 [INFO]\t[TRAIN] Epoch=13/100, Step=6/21, loss=23.061802, lr=3.2e-05, time_each_step=1.0s, eta=0:28:42\n",
      "2021-08-15 16:28:41 [INFO]\t[TRAIN] Epoch=13/100, Step=8/21, loss=32.356457, lr=3.2e-05, time_each_step=1.01s, eta=0:28:40\n",
      "2021-08-15 16:28:42 [INFO]\t[TRAIN] Epoch=13/100, Step=10/21, loss=29.876423, lr=3.3e-05, time_each_step=1.0s, eta=0:28:38\n",
      "2021-08-15 16:28:43 [INFO]\t[TRAIN] Epoch=13/100, Step=12/21, loss=26.902676, lr=3.3e-05, time_each_step=0.99s, eta=0:28:36\n",
      "2021-08-15 16:28:44 [INFO]\t[TRAIN] Epoch=13/100, Step=14/21, loss=26.844698, lr=3.3e-05, time_each_step=0.98s, eta=0:28:33\n",
      "2021-08-15 16:28:45 [INFO]\t[TRAIN] Epoch=13/100, Step=16/21, loss=32.35144, lr=3.3e-05, time_each_step=0.98s, eta=0:28:31\n",
      "2021-08-15 16:28:46 [INFO]\t[TRAIN] Epoch=13/100, Step=18/21, loss=28.049816, lr=3.4e-05, time_each_step=1.0s, eta=0:28:30\n",
      "2021-08-15 16:28:47 [INFO]\t[TRAIN] Epoch=13/100, Step=20/21, loss=25.188957, lr=3.4e-05, time_each_step=0.99s, eta=0:28:28\n",
      "2021-08-15 16:28:47 [INFO]\t[TRAIN] Epoch 13 finished, loss=27.531651, lr=3.3e-05 .\n",
      "2021-08-15 16:28:54 [INFO]\t[TRAIN] Epoch=14/100, Step=1/21, loss=21.63722, lr=3.4e-05, time_each_step=0.87s, eta=0:29:37\n",
      "2021-08-15 16:28:55 [INFO]\t[TRAIN] Epoch=14/100, Step=3/21, loss=22.125761, lr=3.4e-05, time_each_step=0.88s, eta=0:29:36\n",
      "2021-08-15 16:28:57 [INFO]\t[TRAIN] Epoch=14/100, Step=5/21, loss=30.856205, lr=3.5e-05, time_each_step=0.86s, eta=0:29:33\n",
      "2021-08-15 16:28:58 [INFO]\t[TRAIN] Epoch=14/100, Step=7/21, loss=26.95253, lr=3.5e-05, time_each_step=0.86s, eta=0:29:31\n",
      "2021-08-15 16:28:59 [INFO]\t[TRAIN] Epoch=14/100, Step=9/21, loss=23.82272, lr=3.5e-05, time_each_step=0.85s, eta=0:29:29\n",
      "2021-08-15 16:29:00 [INFO]\t[TRAIN] Epoch=14/100, Step=11/21, loss=30.995405, lr=3.5e-05, time_each_step=0.88s, eta=0:29:29\n",
      "2021-08-15 16:29:01 [INFO]\t[TRAIN] Epoch=14/100, Step=13/21, loss=25.533892, lr=3.6e-05, time_each_step=0.89s, eta=0:29:28\n",
      "2021-08-15 16:29:02 [INFO]\t[TRAIN] Epoch=14/100, Step=15/21, loss=27.620903, lr=3.6e-05, time_each_step=0.88s, eta=0:29:26\n",
      "2021-08-15 16:29:03 [INFO]\t[TRAIN] Epoch=14/100, Step=17/21, loss=25.712566, lr=3.6e-05, time_each_step=0.88s, eta=0:29:23\n",
      "2021-08-15 16:29:05 [INFO]\t[TRAIN] Epoch=14/100, Step=19/21, loss=20.38739, lr=3.6e-05, time_each_step=0.91s, eta=0:29:23\n",
      "2021-08-15 16:29:05 [INFO]\t[TRAIN] Epoch=14/100, Step=21/21, loss=28.554203, lr=3.7e-05, time_each_step=0.6s, eta=0:29:10\n",
      "2021-08-15 16:29:05 [INFO]\t[TRAIN] Epoch 14 finished, loss=26.961313, lr=3.5e-05 .\n",
      "2021-08-15 16:29:12 [INFO]\t[TRAIN] Epoch=15/100, Step=2/21, loss=37.828293, lr=3.7e-05, time_each_step=0.82s, eta=0:26:59\n",
      "2021-08-15 16:29:13 [INFO]\t[TRAIN] Epoch=15/100, Step=4/21, loss=25.397305, lr=3.7e-05, time_each_step=0.79s, eta=0:26:55\n",
      "2021-08-15 16:29:14 [INFO]\t[TRAIN] Epoch=15/100, Step=6/21, loss=26.709593, lr=3.7e-05, time_each_step=0.8s, eta=0:26:54\n",
      "2021-08-15 16:29:15 [INFO]\t[TRAIN] Epoch=15/100, Step=8/21, loss=26.318588, lr=3.8e-05, time_each_step=0.79s, eta=0:26:52\n",
      "2021-08-15 16:29:16 [INFO]\t[TRAIN] Epoch=15/100, Step=10/21, loss=24.411135, lr=3.8e-05, time_each_step=0.79s, eta=0:26:51\n",
      "2021-08-15 16:29:17 [INFO]\t[TRAIN] Epoch=15/100, Step=12/21, loss=30.668005, lr=3.8e-05, time_each_step=0.79s, eta=0:26:49\n",
      "2021-08-15 16:29:18 [INFO]\t[TRAIN] Epoch=15/100, Step=14/21, loss=26.403553, lr=3.8e-05, time_each_step=0.78s, eta=0:26:47\n",
      "2021-08-15 16:29:19 [INFO]\t[TRAIN] Epoch=15/100, Step=16/21, loss=31.850033, lr=3.9e-05, time_each_step=0.79s, eta=0:26:46\n",
      "2021-08-15 16:29:20 [INFO]\t[TRAIN] Epoch=15/100, Step=18/21, loss=19.054918, lr=3.9e-05, time_each_step=0.79s, eta=0:26:44\n",
      "2021-08-15 16:29:21 [INFO]\t[TRAIN] Epoch=15/100, Step=20/21, loss=35.30212, lr=3.9e-05, time_each_step=0.79s, eta=0:26:42\n",
      "2021-08-15 16:29:22 [INFO]\t[TRAIN] Epoch 15 finished, loss=26.522537, lr=3.8e-05 .\n",
      "2021-08-15 16:29:29 [INFO]\t[TRAIN] Epoch=16/100, Step=1/21, loss=26.373371, lr=3.9e-05, time_each_step=0.85s, eta=0:23:35\n",
      "2021-08-15 16:29:30 [INFO]\t[TRAIN] Epoch=16/100, Step=3/21, loss=26.171429, lr=4e-05, time_each_step=0.87s, eta=0:23:34\n",
      "2021-08-15 16:29:31 [INFO]\t[TRAIN] Epoch=16/100, Step=5/21, loss=25.432774, lr=4e-05, time_each_step=0.86s, eta=0:23:32\n",
      "2021-08-15 16:29:32 [INFO]\t[TRAIN] Epoch=16/100, Step=7/21, loss=22.594954, lr=4e-05, time_each_step=0.87s, eta=0:23:31\n",
      "2021-08-15 16:29:34 [INFO]\t[TRAIN] Epoch=16/100, Step=9/21, loss=30.241121, lr=4e-05, time_each_step=0.87s, eta=0:23:29\n",
      "2021-08-15 16:29:35 [INFO]\t[TRAIN] Epoch=16/100, Step=11/21, loss=28.01952, lr=4.1e-05, time_each_step=0.87s, eta=0:23:27\n",
      "2021-08-15 16:29:36 [INFO]\t[TRAIN] Epoch=16/100, Step=13/21, loss=25.521463, lr=4.1e-05, time_each_step=0.88s, eta=0:23:26\n",
      "2021-08-15 16:29:36 [INFO]\t[TRAIN] Epoch=16/100, Step=15/21, loss=26.044357, lr=4.1e-05, time_each_step=0.86s, eta=0:23:24\n",
      "2021-08-15 16:29:37 [INFO]\t[TRAIN] Epoch=16/100, Step=17/21, loss=31.71524, lr=4.1e-05, time_each_step=0.85s, eta=0:23:21\n",
      "2021-08-15 16:29:38 [INFO]\t[TRAIN] Epoch=16/100, Step=19/21, loss=28.47636, lr=4.2e-05, time_each_step=0.85s, eta=0:23:20\n",
      "2021-08-15 16:29:39 [INFO]\t[TRAIN] Epoch=16/100, Step=21/21, loss=21.294724, lr=4.2e-05, time_each_step=0.52s, eta=0:23:6\n",
      "2021-08-15 16:29:39 [INFO]\t[TRAIN] Epoch 16 finished, loss=26.879854, lr=4.1e-05 .\n",
      "2021-08-15 16:29:44 [INFO]\t[TRAIN] Epoch=17/100, Step=2/21, loss=22.694193, lr=4.2e-05, time_each_step=0.7s, eta=0:24:26\n",
      "2021-08-15 16:29:45 [INFO]\t[TRAIN] Epoch=17/100, Step=4/21, loss=23.321568, lr=4.2e-05, time_each_step=0.71s, eta=0:24:25\n",
      "2021-08-15 16:29:46 [INFO]\t[TRAIN] Epoch=17/100, Step=6/21, loss=25.033792, lr=4.3e-05, time_each_step=0.71s, eta=0:24:23\n",
      "2021-08-15 16:29:48 [INFO]\t[TRAIN] Epoch=17/100, Step=8/21, loss=27.378273, lr=4.3e-05, time_each_step=0.69s, eta=0:24:21\n",
      "2021-08-15 16:29:49 [INFO]\t[TRAIN] Epoch=17/100, Step=10/21, loss=30.890083, lr=4.3e-05, time_each_step=0.7s, eta=0:24:20\n",
      "2021-08-15 16:29:50 [INFO]\t[TRAIN] Epoch=17/100, Step=12/21, loss=21.602005, lr=4.3e-05, time_each_step=0.72s, eta=0:24:19\n",
      "2021-08-15 16:29:51 [INFO]\t[TRAIN] Epoch=17/100, Step=14/21, loss=22.863461, lr=4.4e-05, time_each_step=0.72s, eta=0:24:18\n",
      "2021-08-15 16:29:52 [INFO]\t[TRAIN] Epoch=17/100, Step=16/21, loss=26.748219, lr=4.4e-05, time_each_step=0.71s, eta=0:24:16\n",
      "2021-08-15 16:29:52 [INFO]\t[TRAIN] Epoch=17/100, Step=18/21, loss=24.525921, lr=4.4e-05, time_each_step=0.7s, eta=0:24:14\n",
      "2021-08-15 16:29:53 [INFO]\t[TRAIN] Epoch=17/100, Step=20/21, loss=27.481834, lr=4.4e-05, time_each_step=0.7s, eta=0:24:13\n",
      "2021-08-15 16:29:54 [INFO]\t[TRAIN] Epoch 17 finished, loss=25.621355, lr=4.3e-05 .\n",
      "2021-08-15 16:29:58 [INFO]\t[TRAIN] Epoch=18/100, Step=1/21, loss=23.127674, lr=4.5e-05, time_each_step=0.71s, eta=0:20:32\n",
      "2021-08-15 16:29:59 [INFO]\t[TRAIN] Epoch=18/100, Step=3/21, loss=26.805344, lr=4.5e-05, time_each_step=0.71s, eta=0:20:30\n",
      "2021-08-15 16:30:01 [INFO]\t[TRAIN] Epoch=18/100, Step=5/21, loss=26.772913, lr=4.5e-05, time_each_step=0.71s, eta=0:20:28\n",
      "2021-08-15 16:30:02 [INFO]\t[TRAIN] Epoch=18/100, Step=7/21, loss=19.35791, lr=4.5e-05, time_each_step=0.73s, eta=0:20:28\n",
      "2021-08-15 16:30:03 [INFO]\t[TRAIN] Epoch=18/100, Step=9/21, loss=29.959568, lr=4.6e-05, time_each_step=0.72s, eta=0:20:26\n",
      "2021-08-15 16:30:04 [INFO]\t[TRAIN] Epoch=18/100, Step=11/21, loss=22.334991, lr=4.6e-05, time_each_step=0.72s, eta=0:20:25\n",
      "2021-08-15 16:30:06 [INFO]\t[TRAIN] Epoch=18/100, Step=13/21, loss=21.22315, lr=4.6e-05, time_each_step=0.74s, eta=0:20:24\n",
      "2021-08-15 16:30:07 [INFO]\t[TRAIN] Epoch=18/100, Step=15/21, loss=22.173168, lr=4.6e-05, time_each_step=0.76s, eta=0:20:23\n",
      "2021-08-15 16:30:08 [INFO]\t[TRAIN] Epoch=18/100, Step=17/21, loss=21.199322, lr=4.7e-05, time_each_step=0.77s, eta=0:20:22\n",
      "2021-08-15 16:30:09 [INFO]\t[TRAIN] Epoch=18/100, Step=19/21, loss=21.435333, lr=4.7e-05, time_each_step=0.79s, eta=0:20:21\n",
      "2021-08-15 16:30:10 [INFO]\t[TRAIN] Epoch=18/100, Step=21/21, loss=20.307459, lr=4.7e-05, time_each_step=0.57s, eta=0:20:12\n",
      "2021-08-15 16:30:10 [INFO]\t[TRAIN] Epoch 18 finished, loss=24.216511, lr=4.6e-05 .\n",
      "2021-08-15 16:30:15 [INFO]\t[TRAIN] Epoch=19/100, Step=2/21, loss=19.02515, lr=4.7e-05, time_each_step=0.79s, eta=0:22:21\n",
      "2021-08-15 16:30:16 [INFO]\t[TRAIN] Epoch=19/100, Step=4/21, loss=22.674051, lr=4.8e-05, time_each_step=0.78s, eta=0:22:19\n",
      "2021-08-15 16:30:17 [INFO]\t[TRAIN] Epoch=19/100, Step=6/21, loss=34.689278, lr=4.8e-05, time_each_step=0.76s, eta=0:22:16\n",
      "2021-08-15 16:30:19 [INFO]\t[TRAIN] Epoch=19/100, Step=8/21, loss=32.935043, lr=4.8e-05, time_each_step=0.78s, eta=0:22:16\n",
      "2021-08-15 16:30:20 [INFO]\t[TRAIN] Epoch=19/100, Step=10/21, loss=23.246685, lr=4.8e-05, time_each_step=0.78s, eta=0:22:14\n",
      "2021-08-15 16:30:21 [INFO]\t[TRAIN] Epoch=19/100, Step=12/21, loss=19.097979, lr=4.9e-05, time_each_step=0.75s, eta=0:22:12\n",
      "2021-08-15 16:30:22 [INFO]\t[TRAIN] Epoch=19/100, Step=14/21, loss=23.50877, lr=4.9e-05, time_each_step=0.77s, eta=0:22:11\n",
      "2021-08-15 16:30:23 [INFO]\t[TRAIN] Epoch=19/100, Step=16/21, loss=24.932533, lr=4.9e-05, time_each_step=0.76s, eta=0:22:9\n",
      "2021-08-15 16:30:24 [INFO]\t[TRAIN] Epoch=19/100, Step=18/21, loss=25.498104, lr=4.9e-05, time_each_step=0.73s, eta=0:22:6\n",
      "2021-08-15 16:30:25 [INFO]\t[TRAIN] Epoch=19/100, Step=20/21, loss=27.209333, lr=5e-05, time_each_step=0.75s, eta=0:22:5\n",
      "2021-08-15 16:30:25 [INFO]\t[TRAIN] Epoch 19 finished, loss=24.765959, lr=4.9e-05 .\n",
      "2021-08-15 16:30:31 [INFO]\t[TRAIN] Epoch=20/100, Step=1/21, loss=17.110479, lr=5e-05, time_each_step=0.78s, eta=0:21:8\n",
      "2021-08-15 16:30:32 [INFO]\t[TRAIN] Epoch=20/100, Step=3/21, loss=21.108955, lr=5e-05, time_each_step=0.79s, eta=0:21:7\n",
      "2021-08-15 16:30:34 [INFO]\t[TRAIN] Epoch=20/100, Step=5/21, loss=20.922529, lr=5e-05, time_each_step=0.81s, eta=0:21:7\n",
      "2021-08-15 16:30:35 [INFO]\t[TRAIN] Epoch=20/100, Step=7/21, loss=25.341093, lr=5.1e-05, time_each_step=0.81s, eta=0:21:5\n",
      "2021-08-15 16:30:36 [INFO]\t[TRAIN] Epoch=20/100, Step=9/21, loss=39.198071, lr=5.1e-05, time_each_step=0.8s, eta=0:21:3\n",
      "2021-08-15 16:30:37 [INFO]\t[TRAIN] Epoch=20/100, Step=11/21, loss=29.284279, lr=5.1e-05, time_each_step=0.81s, eta=0:21:2\n",
      "2021-08-15 16:30:38 [INFO]\t[TRAIN] Epoch=20/100, Step=13/21, loss=26.538664, lr=5.1e-05, time_each_step=0.79s, eta=0:20:59\n",
      "2021-08-15 16:30:39 [INFO]\t[TRAIN] Epoch=20/100, Step=15/21, loss=28.681648, lr=5.2e-05, time_each_step=0.79s, eta=0:20:58\n",
      "2021-08-15 16:30:40 [INFO]\t[TRAIN] Epoch=20/100, Step=17/21, loss=29.176111, lr=5.2e-05, time_each_step=0.81s, eta=0:20:57\n",
      "2021-08-15 16:30:41 [INFO]\t[TRAIN] Epoch=20/100, Step=19/21, loss=25.596436, lr=5.2e-05, time_each_step=0.82s, eta=0:20:56\n",
      "2021-08-15 16:30:42 [INFO]\t[TRAIN] Epoch=20/100, Step=21/21, loss=21.868093, lr=5.2e-05, time_each_step=0.57s, eta=0:20:45\n",
      "2021-08-15 16:30:42 [INFO]\t[TRAIN] Epoch 20 finished, loss=25.226929, lr=5.1e-05 .\n",
      "2021-08-15 16:30:42 [INFO]\tStart to evaluating(total_samples=98, total_steps=7)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 7/7 [00:06<00:00,  1.09it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:30:49 [INFO]\t[EVAL] Finished, Epoch=20, bbox_map=0.74148 .\n",
      "2021-08-15 16:30:53 [INFO]\tModel saved in output/ppyolo/best_model.\n",
      "2021-08-15 16:30:55 [INFO]\tModel saved in output/ppyolo/epoch_20.\n",
      "2021-08-15 16:30:55 [INFO]\tCurrent evaluated best model in eval_dataset is epoch_20, bbox_map=0.7414798688612267\n",
      "2021-08-15 16:31:00 [INFO]\t[TRAIN] Epoch=21/100, Step=2/21, loss=19.419821, lr=5.3e-05, time_each_step=0.75s, eta=0:23:32\n",
      "2021-08-15 16:31:02 [INFO]\t[TRAIN] Epoch=21/100, Step=4/21, loss=20.205702, lr=5.3e-05, time_each_step=0.75s, eta=0:23:31\n",
      "2021-08-15 16:31:03 [INFO]\t[TRAIN] Epoch=21/100, Step=6/21, loss=22.129246, lr=5.3e-05, time_each_step=0.77s, eta=0:23:29\n",
      "2021-08-15 16:31:05 [INFO]\t[TRAIN] Epoch=21/100, Step=8/21, loss=30.621241, lr=5.3e-05, time_each_step=0.79s, eta=0:23:28\n",
      "2021-08-15 16:31:06 [INFO]\t[TRAIN] Epoch=21/100, Step=10/21, loss=42.015862, lr=5.4e-05, time_each_step=0.78s, eta=0:23:26\n",
      "2021-08-15 16:31:07 [INFO]\t[TRAIN] Epoch=21/100, Step=12/21, loss=23.48694, lr=5.4e-05, time_each_step=0.78s, eta=0:23:25\n",
      "2021-08-15 16:31:08 [INFO]\t[TRAIN] Epoch=21/100, Step=14/21, loss=26.198685, lr=5.4e-05, time_each_step=0.78s, eta=0:23:23\n",
      "2021-08-15 16:31:08 [INFO]\t[TRAIN] Epoch=21/100, Step=16/21, loss=23.005604, lr=5.4e-05, time_each_step=0.77s, eta=0:23:22\n",
      "2021-08-15 16:31:09 [INFO]\t[TRAIN] Epoch=21/100, Step=18/21, loss=19.227613, lr=5.5e-05, time_each_step=0.75s, eta=0:23:20\n",
      "2021-08-15 16:31:10 [INFO]\t[TRAIN] Epoch=21/100, Step=20/21, loss=21.823423, lr=5.5e-05, time_each_step=0.73s, eta=0:23:18\n",
      "2021-08-15 16:31:10 [INFO]\t[TRAIN] Epoch 21 finished, loss=23.967041, lr=5.4e-05 .\n",
      "2021-08-15 16:31:17 [INFO]\t[TRAIN] Epoch=22/100, Step=1/21, loss=28.976915, lr=5.5e-05, time_each_step=0.84s, eta=0:20:35\n",
      "2021-08-15 16:31:19 [INFO]\t[TRAIN] Epoch=22/100, Step=3/21, loss=25.543558, lr=5.5e-05, time_each_step=0.86s, eta=0:20:34\n",
      "2021-08-15 16:31:20 [INFO]\t[TRAIN] Epoch=22/100, Step=5/21, loss=26.206059, lr=5.6e-05, time_each_step=0.85s, eta=0:20:32\n",
      "2021-08-15 16:31:21 [INFO]\t[TRAIN] Epoch=22/100, Step=7/21, loss=17.069277, lr=5.6e-05, time_each_step=0.82s, eta=0:20:30\n",
      "2021-08-15 16:31:23 [INFO]\t[TRAIN] Epoch=22/100, Step=9/21, loss=18.104591, lr=5.6e-05, time_each_step=0.83s, eta=0:20:28\n",
      "2021-08-15 16:31:24 [INFO]\t[TRAIN] Epoch=22/100, Step=11/21, loss=25.262644, lr=5.6e-05, time_each_step=0.86s, eta=0:20:27\n",
      "2021-08-15 16:31:25 [INFO]\t[TRAIN] Epoch=22/100, Step=13/21, loss=25.216915, lr=5.7e-05, time_each_step=0.86s, eta=0:20:25\n",
      "2021-08-15 16:31:25 [INFO]\t[TRAIN] Epoch=22/100, Step=15/21, loss=22.837494, lr=5.7e-05, time_each_step=0.86s, eta=0:20:23\n",
      "2021-08-15 16:31:26 [INFO]\t[TRAIN] Epoch=22/100, Step=17/21, loss=27.346003, lr=5.7e-05, time_each_step=0.85s, eta=0:20:22\n",
      "2021-08-15 16:31:27 [INFO]\t[TRAIN] Epoch=22/100, Step=19/21, loss=21.797905, lr=5.7e-05, time_each_step=0.85s, eta=0:20:20\n",
      "2021-08-15 16:31:28 [INFO]\t[TRAIN] Epoch=22/100, Step=21/21, loss=24.400276, lr=5.8e-05, time_each_step=0.53s, eta=0:20:18\n",
      "2021-08-15 16:31:28 [INFO]\t[TRAIN] Epoch 22 finished, loss=24.263773, lr=5.6e-05 .\n",
      "2021-08-15 16:31:35 [INFO]\t[TRAIN] Epoch=23/100, Step=2/21, loss=24.212828, lr=5.8e-05, time_each_step=0.78s, eta=0:23:54\n",
      "2021-08-15 16:31:36 [INFO]\t[TRAIN] Epoch=23/100, Step=4/21, loss=20.841061, lr=5.8e-05, time_each_step=0.8s, eta=0:23:53\n",
      "2021-08-15 16:31:38 [INFO]\t[TRAIN] Epoch=23/100, Step=6/21, loss=25.719856, lr=5.8e-05, time_each_step=0.81s, eta=0:23:51\n",
      "2021-08-15 16:31:39 [INFO]\t[TRAIN] Epoch=23/100, Step=8/21, loss=25.837055, lr=5.9e-05, time_each_step=0.8s, eta=0:23:49\n",
      "2021-08-15 16:31:40 [INFO]\t[TRAIN] Epoch=23/100, Step=10/21, loss=23.250099, lr=5.9e-05, time_each_step=0.79s, eta=0:23:48\n",
      "2021-08-15 16:31:41 [INFO]\t[TRAIN] Epoch=23/100, Step=12/21, loss=29.991024, lr=5.9e-05, time_each_step=0.81s, eta=0:23:46\n",
      "2021-08-15 16:31:42 [INFO]\t[TRAIN] Epoch=23/100, Step=14/21, loss=21.482742, lr=5.9e-05, time_each_step=0.83s, eta=0:23:45\n",
      "2021-08-15 16:31:43 [INFO]\t[TRAIN] Epoch=23/100, Step=16/21, loss=20.960714, lr=6e-05, time_each_step=0.83s, eta=0:23:43\n",
      "2021-08-15 16:31:43 [INFO]\t[TRAIN] Epoch=23/100, Step=18/21, loss=31.903288, lr=6e-05, time_each_step=0.83s, eta=0:23:42\n",
      "2021-08-15 16:31:44 [INFO]\t[TRAIN] Epoch=23/100, Step=20/21, loss=23.90313, lr=6e-05, time_each_step=0.83s, eta=0:23:40\n",
      "2021-08-15 16:31:45 [INFO]\t[TRAIN] Epoch 23 finished, loss=24.087351, lr=5.9e-05 .\n",
      "2021-08-15 16:31:52 [INFO]\t[TRAIN] Epoch=24/100, Step=1/21, loss=21.964809, lr=6e-05, time_each_step=0.87s, eta=0:22:51\n",
      "2021-08-15 16:31:53 [INFO]\t[TRAIN] Epoch=24/100, Step=3/21, loss=26.685614, lr=6.1e-05, time_each_step=0.84s, eta=0:22:48\n",
      "2021-08-15 16:31:54 [INFO]\t[TRAIN] Epoch=24/100, Step=5/21, loss=18.669069, lr=6.1e-05, time_each_step=0.83s, eta=0:22:46\n",
      "2021-08-15 16:31:55 [INFO]\t[TRAIN] Epoch=24/100, Step=7/21, loss=22.068932, lr=6.1e-05, time_each_step=0.83s, eta=0:22:45\n",
      "2021-08-15 16:31:57 [INFO]\t[TRAIN] Epoch=24/100, Step=9/21, loss=23.543465, lr=6.1e-05, time_each_step=0.85s, eta=0:22:43\n",
      "2021-08-15 16:31:58 [INFO]\t[TRAIN] Epoch=24/100, Step=11/21, loss=26.81529, lr=6.2e-05, time_each_step=0.85s, eta=0:22:42\n",
      "2021-08-15 16:31:59 [INFO]\t[TRAIN] Epoch=24/100, Step=13/21, loss=24.678339, lr=6.2e-05, time_each_step=0.84s, eta=0:22:40\n",
      "2021-08-15 16:32:00 [INFO]\t[TRAIN] Epoch=24/100, Step=15/21, loss=19.935654, lr=6.2e-05, time_each_step=0.86s, eta=0:22:38\n",
      "2021-08-15 16:32:01 [INFO]\t[TRAIN] Epoch=24/100, Step=17/21, loss=23.504519, lr=6.2e-05, time_each_step=0.86s, eta=0:22:37\n",
      "2021-08-15 16:32:01 [INFO]\t[TRAIN] Epoch=24/100, Step=19/21, loss=29.880503, lr=6.3e-05, time_each_step=0.85s, eta=0:22:35\n",
      "2021-08-15 16:32:02 [INFO]\t[TRAIN] Epoch=24/100, Step=21/21, loss=20.420935, lr=6.3e-05, time_each_step=0.5s, eta=0:22:33\n",
      "2021-08-15 16:32:02 [INFO]\t[TRAIN] Epoch 24 finished, loss=23.936281, lr=6.2e-05 .\n",
      "2021-08-15 16:32:09 [INFO]\t[TRAIN] Epoch=25/100, Step=2/21, loss=18.540386, lr=6.3e-05, time_each_step=0.81s, eta=0:22:32\n",
      "2021-08-15 16:32:10 [INFO]\t[TRAIN] Epoch=25/100, Step=4/21, loss=22.756636, lr=6.3e-05, time_each_step=0.82s, eta=0:22:30\n",
      "2021-08-15 16:32:11 [INFO]\t[TRAIN] Epoch=25/100, Step=6/21, loss=27.87126, lr=6.4e-05, time_each_step=0.81s, eta=0:22:29\n",
      "2021-08-15 16:32:13 [INFO]\t[TRAIN] Epoch=25/100, Step=8/21, loss=22.308317, lr=6.4e-05, time_each_step=0.79s, eta=0:22:27\n",
      "2021-08-15 16:32:14 [INFO]\t[TRAIN] Epoch=25/100, Step=10/21, loss=25.75853, lr=6.4e-05, time_each_step=0.82s, eta=0:22:26\n",
      "2021-08-15 16:32:15 [INFO]\t[TRAIN] Epoch=25/100, Step=12/21, loss=21.701252, lr=6.4e-05, time_each_step=0.82s, eta=0:22:24\n",
      "2021-08-15 16:32:16 [INFO]\t[TRAIN] Epoch=25/100, Step=14/21, loss=17.449495, lr=6.5e-05, time_each_step=0.82s, eta=0:22:22\n",
      "2021-08-15 16:32:17 [INFO]\t[TRAIN] Epoch=25/100, Step=16/21, loss=25.80385, lr=6.5e-05, time_each_step=0.83s, eta=0:22:21\n",
      "2021-08-15 16:32:18 [INFO]\t[TRAIN] Epoch=25/100, Step=18/21, loss=16.369368, lr=6.5e-05, time_each_step=0.84s, eta=0:22:19\n",
      "2021-08-15 16:32:19 [INFO]\t[TRAIN] Epoch=25/100, Step=20/21, loss=23.753103, lr=6.5e-05, time_each_step=0.86s, eta=0:22:17\n",
      "2021-08-15 16:32:20 [INFO]\t[TRAIN] Epoch 25 finished, loss=23.746883, lr=6.4e-05 .\n",
      "2021-08-15 16:32:25 [INFO]\t[TRAIN] Epoch=26/100, Step=1/21, loss=22.421968, lr=6.6e-05, time_each_step=0.77s, eta=0:22:52\n",
      "2021-08-15 16:32:26 [INFO]\t[TRAIN] Epoch=26/100, Step=3/21, loss=26.361645, lr=6.6e-05, time_each_step=0.79s, eta=0:22:50\n",
      "2021-08-15 16:32:28 [INFO]\t[TRAIN] Epoch=26/100, Step=5/21, loss=30.510294, lr=6.6e-05, time_each_step=0.81s, eta=0:22:49\n",
      "2021-08-15 16:32:29 [INFO]\t[TRAIN] Epoch=26/100, Step=7/21, loss=19.152727, lr=6.6e-05, time_each_step=0.81s, eta=0:22:47\n",
      "2021-08-15 16:32:30 [INFO]\t[TRAIN] Epoch=26/100, Step=9/21, loss=25.535257, lr=6.7e-05, time_each_step=0.77s, eta=0:22:45\n",
      "2021-08-15 16:32:31 [INFO]\t[TRAIN] Epoch=26/100, Step=11/21, loss=20.091425, lr=6.7e-05, time_each_step=0.78s, eta=0:22:44\n",
      "2021-08-15 16:32:32 [INFO]\t[TRAIN] Epoch=26/100, Step=13/21, loss=23.164783, lr=6.7e-05, time_each_step=0.79s, eta=0:22:42\n",
      "2021-08-15 16:32:33 [INFO]\t[TRAIN] Epoch=26/100, Step=15/21, loss=24.450853, lr=6.7e-05, time_each_step=0.79s, eta=0:22:41\n",
      "2021-08-15 16:32:34 [INFO]\t[TRAIN] Epoch=26/100, Step=17/21, loss=27.303993, lr=6.8e-05, time_each_step=0.79s, eta=0:22:39\n",
      "2021-08-15 16:32:35 [INFO]\t[TRAIN] Epoch=26/100, Step=19/21, loss=17.313934, lr=6.8e-05, time_each_step=0.8s, eta=0:22:38\n",
      "2021-08-15 16:32:36 [INFO]\t[TRAIN] Epoch=26/100, Step=21/21, loss=25.051325, lr=6.8e-05, time_each_step=0.57s, eta=0:22:36\n",
      "2021-08-15 16:32:36 [INFO]\t[TRAIN] Epoch 26 finished, loss=24.066774, lr=6.7e-05 .\n",
      "2021-08-15 16:32:47 [INFO]\t[TRAIN] Epoch=27/100, Step=2/21, loss=25.565496, lr=6.8e-05, time_each_step=1.02s, eta=0:21:4\n",
      "2021-08-15 16:32:48 [INFO]\t[TRAIN] Epoch=27/100, Step=4/21, loss=21.906055, lr=6.9e-05, time_each_step=1.0s, eta=0:21:2\n",
      "2021-08-15 16:32:49 [INFO]\t[TRAIN] Epoch=27/100, Step=6/21, loss=17.802059, lr=6.9e-05, time_each_step=1.02s, eta=0:21:0\n",
      "2021-08-15 16:32:51 [INFO]\t[TRAIN] Epoch=27/100, Step=8/21, loss=19.753807, lr=6.9e-05, time_each_step=1.04s, eta=0:20:59\n",
      "2021-08-15 16:32:52 [INFO]\t[TRAIN] Epoch=27/100, Step=10/21, loss=17.001986, lr=6.9e-05, time_each_step=1.04s, eta=0:20:57\n",
      "2021-08-15 16:32:53 [INFO]\t[TRAIN] Epoch=27/100, Step=12/21, loss=24.066694, lr=7e-05, time_each_step=1.05s, eta=0:20:55\n",
      "2021-08-15 16:32:54 [INFO]\t[TRAIN] Epoch=27/100, Step=14/21, loss=17.82078, lr=7e-05, time_each_step=1.06s, eta=0:20:52\n",
      "2021-08-15 16:32:55 [INFO]\t[TRAIN] Epoch=27/100, Step=16/21, loss=19.964579, lr=7e-05, time_each_step=1.07s, eta=0:20:50\n",
      "2021-08-15 16:32:56 [INFO]\t[TRAIN] Epoch=27/100, Step=18/21, loss=20.355522, lr=7e-05, time_each_step=1.04s, eta=0:20:48\n",
      "2021-08-15 16:32:57 [INFO]\t[TRAIN] Epoch=27/100, Step=20/21, loss=30.71484, lr=7.1e-05, time_each_step=1.03s, eta=0:20:46\n",
      "2021-08-15 16:32:57 [INFO]\t[TRAIN] Epoch 27 finished, loss=22.851156, lr=7e-05 .\n",
      "2021-08-15 16:33:02 [INFO]\t[TRAIN] Epoch=28/100, Step=1/21, loss=25.059948, lr=7.1e-05, time_each_step=0.77s, eta=0:26:31\n",
      "2021-08-15 16:33:04 [INFO]\t[TRAIN] Epoch=28/100, Step=3/21, loss=25.52989, lr=7.1e-05, time_each_step=0.81s, eta=0:26:30\n",
      "2021-08-15 16:33:05 [INFO]\t[TRAIN] Epoch=28/100, Step=5/21, loss=19.806892, lr=7.1e-05, time_each_step=0.81s, eta=0:26:28\n",
      "2021-08-15 16:33:06 [INFO]\t[TRAIN] Epoch=28/100, Step=7/21, loss=15.139178, lr=7.2e-05, time_each_step=0.79s, eta=0:26:26\n",
      "2021-08-15 16:33:08 [INFO]\t[TRAIN] Epoch=28/100, Step=9/21, loss=21.203089, lr=7.2e-05, time_each_step=0.81s, eta=0:26:25\n",
      "2021-08-15 16:33:09 [INFO]\t[TRAIN] Epoch=28/100, Step=11/21, loss=20.044254, lr=7.2e-05, time_each_step=0.8s, eta=0:26:23\n",
      "2021-08-15 16:33:10 [INFO]\t[TRAIN] Epoch=28/100, Step=13/21, loss=24.410904, lr=7.2e-05, time_each_step=0.8s, eta=0:26:22\n",
      "2021-08-15 16:33:11 [INFO]\t[TRAIN] Epoch=28/100, Step=15/21, loss=26.172007, lr=7.3e-05, time_each_step=0.77s, eta=0:26:20\n",
      "2021-08-15 16:33:12 [INFO]\t[TRAIN] Epoch=28/100, Step=17/21, loss=15.832289, lr=7.3e-05, time_each_step=0.78s, eta=0:26:18\n",
      "2021-08-15 16:33:12 [INFO]\t[TRAIN] Epoch=28/100, Step=19/21, loss=21.347069, lr=7.3e-05, time_each_step=0.79s, eta=0:26:17\n",
      "2021-08-15 16:33:14 [INFO]\t[TRAIN] Epoch=28/100, Step=21/21, loss=22.330976, lr=7.3e-05, time_each_step=0.57s, eta=0:26:15\n",
      "2021-08-15 16:33:14 [INFO]\t[TRAIN] Epoch 28 finished, loss=22.357794, lr=7.2e-05 .\n",
      "2021-08-15 16:33:21 [INFO]\t[TRAIN] Epoch=29/100, Step=2/21, loss=20.035305, lr=7.4e-05, time_each_step=0.84s, eta=0:20:31\n",
      "2021-08-15 16:33:22 [INFO]\t[TRAIN] Epoch=29/100, Step=4/21, loss=17.260536, lr=7.4e-05, time_each_step=0.81s, eta=0:20:29\n",
      "2021-08-15 16:33:23 [INFO]\t[TRAIN] Epoch=29/100, Step=6/21, loss=22.288496, lr=7.4e-05, time_each_step=0.82s, eta=0:20:28\n",
      "2021-08-15 16:33:24 [INFO]\t[TRAIN] Epoch=29/100, Step=8/21, loss=31.048492, lr=7.4e-05, time_each_step=0.81s, eta=0:20:26\n",
      "2021-08-15 16:33:26 [INFO]\t[TRAIN] Epoch=29/100, Step=10/21, loss=23.91185, lr=7.5e-05, time_each_step=0.83s, eta=0:20:25\n",
      "2021-08-15 16:33:27 [INFO]\t[TRAIN] Epoch=29/100, Step=12/21, loss=21.597126, lr=7.5e-05, time_each_step=0.83s, eta=0:20:23\n",
      "2021-08-15 16:33:28 [INFO]\t[TRAIN] Epoch=29/100, Step=14/21, loss=21.944405, lr=7.5e-05, time_each_step=0.84s, eta=0:20:21\n",
      "2021-08-15 16:33:28 [INFO]\t[TRAIN] Epoch=29/100, Step=16/21, loss=24.480255, lr=7.5e-05, time_each_step=0.83s, eta=0:20:20\n",
      "2021-08-15 16:33:29 [INFO]\t[TRAIN] Epoch=29/100, Step=18/21, loss=18.316469, lr=7.6e-05, time_each_step=0.83s, eta=0:20:18\n",
      "2021-08-15 16:33:30 [INFO]\t[TRAIN] Epoch=29/100, Step=20/21, loss=18.133829, lr=7.6e-05, time_each_step=0.81s, eta=0:20:16\n",
      "2021-08-15 16:33:30 [INFO]\t[TRAIN] Epoch 29 finished, loss=22.10952, lr=7.5e-05 .\n",
      "2021-08-15 16:33:35 [INFO]\t[TRAIN] Epoch=30/100, Step=1/21, loss=15.781222, lr=7.6e-05, time_each_step=0.71s, eta=0:20:21\n",
      "2021-08-15 16:33:36 [INFO]\t[TRAIN] Epoch=30/100, Step=3/21, loss=29.176756, lr=7.6e-05, time_each_step=0.72s, eta=0:20:20\n",
      "2021-08-15 16:33:37 [INFO]\t[TRAIN] Epoch=30/100, Step=5/21, loss=21.852745, lr=7.7e-05, time_each_step=0.71s, eta=0:20:18\n",
      "2021-08-15 16:33:39 [INFO]\t[TRAIN] Epoch=30/100, Step=7/21, loss=23.460316, lr=7.7e-05, time_each_step=0.72s, eta=0:20:17\n",
      "2021-08-15 16:33:40 [INFO]\t[TRAIN] Epoch=30/100, Step=9/21, loss=23.789495, lr=7.7e-05, time_each_step=0.72s, eta=0:20:16\n",
      "2021-08-15 16:33:41 [INFO]\t[TRAIN] Epoch=30/100, Step=11/21, loss=28.479828, lr=7.7e-05, time_each_step=0.73s, eta=0:20:14\n",
      "2021-08-15 16:33:42 [INFO]\t[TRAIN] Epoch=30/100, Step=13/21, loss=23.797354, lr=7.8e-05, time_each_step=0.74s, eta=0:20:13\n",
      "2021-08-15 16:33:44 [INFO]\t[TRAIN] Epoch=30/100, Step=15/21, loss=22.142609, lr=7.8e-05, time_each_step=0.76s, eta=0:20:12\n",
      "2021-08-15 16:33:44 [INFO]\t[TRAIN] Epoch=30/100, Step=17/21, loss=19.691158, lr=7.8e-05, time_each_step=0.77s, eta=0:20:10\n",
      "2021-08-15 16:33:45 [INFO]\t[TRAIN] Epoch=30/100, Step=19/21, loss=20.769621, lr=7.8e-05, time_each_step=0.77s, eta=0:20:9\n",
      "2021-08-15 16:33:46 [INFO]\t[TRAIN] Epoch=30/100, Step=21/21, loss=20.291996, lr=7.9e-05, time_each_step=0.56s, eta=0:20:7\n",
      "2021-08-15 16:33:46 [INFO]\t[TRAIN] Epoch 30 finished, loss=22.66177, lr=7.7e-05 .\n",
      "2021-08-15 16:33:56 [INFO]\t[TRAIN] Epoch=31/100, Step=2/21, loss=24.95154, lr=7.9e-05, time_each_step=1.01s, eta=0:19:34\n",
      "2021-08-15 16:33:57 [INFO]\t[TRAIN] Epoch=31/100, Step=4/21, loss=21.199121, lr=7.9e-05, time_each_step=1.01s, eta=0:19:32\n",
      "2021-08-15 16:33:59 [INFO]\t[TRAIN] Epoch=31/100, Step=6/21, loss=21.903114, lr=7.9e-05, time_each_step=1.02s, eta=0:19:30\n",
      "2021-08-15 16:34:00 [INFO]\t[TRAIN] Epoch=31/100, Step=8/21, loss=20.994005, lr=8e-05, time_each_step=1.0s, eta=0:19:28\n",
      "2021-08-15 16:34:01 [INFO]\t[TRAIN] Epoch=31/100, Step=10/21, loss=26.4519, lr=8e-05, time_each_step=1.0s, eta=0:19:26\n",
      "2021-08-15 16:34:03 [INFO]\t[TRAIN] Epoch=31/100, Step=12/21, loss=18.859987, lr=8e-05, time_each_step=1.01s, eta=0:19:24\n",
      "2021-08-15 16:34:03 [INFO]\t[TRAIN] Epoch=31/100, Step=14/21, loss=19.961205, lr=8e-05, time_each_step=0.99s, eta=0:19:22\n",
      "2021-08-15 16:34:04 [INFO]\t[TRAIN] Epoch=31/100, Step=16/21, loss=22.098532, lr=8.1e-05, time_each_step=0.98s, eta=0:19:20\n",
      "2021-08-15 16:34:05 [INFO]\t[TRAIN] Epoch=31/100, Step=18/21, loss=21.52561, lr=8.1e-05, time_each_step=0.99s, eta=0:19:18\n",
      "2021-08-15 16:34:06 [INFO]\t[TRAIN] Epoch=31/100, Step=20/21, loss=19.969028, lr=8.1e-05, time_each_step=0.99s, eta=0:19:16\n",
      "2021-08-15 16:34:06 [INFO]\t[TRAIN] Epoch 31 finished, loss=20.924084, lr=8e-05 .\n",
      "2021-08-15 16:34:12 [INFO]\t[TRAIN] Epoch=32/100, Step=1/21, loss=23.870075, lr=8.1e-05, time_each_step=0.76s, eta=0:24:0\n",
      "2021-08-15 16:34:13 [INFO]\t[TRAIN] Epoch=32/100, Step=3/21, loss=17.7374, lr=8.2e-05, time_each_step=0.76s, eta=0:23:59\n",
      "2021-08-15 16:34:14 [INFO]\t[TRAIN] Epoch=32/100, Step=5/21, loss=18.047321, lr=8.2e-05, time_each_step=0.73s, eta=0:23:56\n",
      "2021-08-15 16:34:14 [INFO]\t[TRAIN] Epoch=32/100, Step=7/21, loss=25.092991, lr=8.2e-05, time_each_step=0.72s, eta=0:23:55\n",
      "2021-08-15 16:34:16 [INFO]\t[TRAIN] Epoch=32/100, Step=9/21, loss=17.793346, lr=8.2e-05, time_each_step=0.72s, eta=0:23:54\n",
      "2021-08-15 16:34:17 [INFO]\t[TRAIN] Epoch=32/100, Step=11/21, loss=18.603237, lr=8.3e-05, time_each_step=0.73s, eta=0:23:52\n",
      "2021-08-15 16:34:18 [INFO]\t[TRAIN] Epoch=32/100, Step=13/21, loss=26.2647, lr=8.3e-05, time_each_step=0.75s, eta=0:23:51\n",
      "2021-08-15 16:34:19 [INFO]\t[TRAIN] Epoch=32/100, Step=15/21, loss=22.34853, lr=8.3e-05, time_each_step=0.75s, eta=0:23:49\n",
      "2021-08-15 16:34:20 [INFO]\t[TRAIN] Epoch=32/100, Step=17/21, loss=18.111578, lr=8.3e-05, time_each_step=0.74s, eta=0:23:48\n",
      "2021-08-15 16:34:21 [INFO]\t[TRAIN] Epoch=32/100, Step=19/21, loss=19.995472, lr=8.4e-05, time_each_step=0.73s, eta=0:23:46\n",
      "2021-08-15 16:34:22 [INFO]\t[TRAIN] Epoch=32/100, Step=21/21, loss=17.342617, lr=8.4e-05, time_each_step=0.49s, eta=0:23:45\n",
      "2021-08-15 16:34:22 [INFO]\t[TRAIN] Epoch 32 finished, loss=20.748215, lr=8.3e-05 .\n",
      "2021-08-15 16:34:27 [INFO]\t[TRAIN] Epoch=33/100, Step=2/21, loss=19.636843, lr=8.4e-05, time_each_step=0.72s, eta=0:18:11\n",
      "2021-08-15 16:34:29 [INFO]\t[TRAIN] Epoch=33/100, Step=4/21, loss=19.345713, lr=8.4e-05, time_each_step=0.75s, eta=0:18:10\n",
      "2021-08-15 16:34:30 [INFO]\t[TRAIN] Epoch=33/100, Step=6/21, loss=15.444921, lr=8.5e-05, time_each_step=0.76s, eta=0:18:9\n",
      "2021-08-15 16:34:31 [INFO]\t[TRAIN] Epoch=33/100, Step=8/21, loss=21.69355, lr=8.5e-05, time_each_step=0.75s, eta=0:18:7\n",
      "2021-08-15 16:34:32 [INFO]\t[TRAIN] Epoch=33/100, Step=10/21, loss=22.645287, lr=8.5e-05, time_each_step=0.74s, eta=0:18:6\n",
      "2021-08-15 16:34:33 [INFO]\t[TRAIN] Epoch=33/100, Step=12/21, loss=22.481964, lr=8.5e-05, time_each_step=0.74s, eta=0:18:4\n",
      "2021-08-15 16:34:34 [INFO]\t[TRAIN] Epoch=33/100, Step=14/21, loss=27.932537, lr=8.6e-05, time_each_step=0.73s, eta=0:18:3\n",
      "2021-08-15 16:34:35 [INFO]\t[TRAIN] Epoch=33/100, Step=16/21, loss=18.395851, lr=8.6e-05, time_each_step=0.73s, eta=0:18:1\n",
      "2021-08-15 16:34:35 [INFO]\t[TRAIN] Epoch=33/100, Step=18/21, loss=21.200981, lr=8.6e-05, time_each_step=0.74s, eta=0:18:0\n",
      "2021-08-15 16:34:36 [INFO]\t[TRAIN] Epoch=33/100, Step=20/21, loss=22.089764, lr=8.6e-05, time_each_step=0.73s, eta=0:17:58\n",
      "2021-08-15 16:34:37 [INFO]\t[TRAIN] Epoch 33 finished, loss=20.326294, lr=8.5e-05 .\n",
      "2021-08-15 16:34:47 [INFO]\t[TRAIN] Epoch=34/100, Step=1/21, loss=20.165411, lr=8.7e-05, time_each_step=1.01s, eta=0:18:3\n",
      "2021-08-15 16:34:50 [INFO]\t[TRAIN] Epoch=34/100, Step=3/21, loss=25.876562, lr=8.7e-05, time_each_step=1.05s, eta=0:18:1\n",
      "2021-08-15 16:34:51 [INFO]\t[TRAIN] Epoch=34/100, Step=5/21, loss=20.299557, lr=8.7e-05, time_each_step=1.07s, eta=0:18:0\n",
      "2021-08-15 16:34:52 [INFO]\t[TRAIN] Epoch=34/100, Step=7/21, loss=16.07943, lr=8.7e-05, time_each_step=1.06s, eta=0:17:57\n",
      "2021-08-15 16:34:54 [INFO]\t[TRAIN] Epoch=34/100, Step=9/21, loss=19.392784, lr=8.8e-05, time_each_step=1.09s, eta=0:17:56\n",
      "2021-08-15 16:34:55 [INFO]\t[TRAIN] Epoch=34/100, Step=11/21, loss=26.605968, lr=8.8e-05, time_each_step=1.11s, eta=0:17:54\n",
      "2021-08-15 16:34:56 [INFO]\t[TRAIN] Epoch=34/100, Step=13/21, loss=22.449913, lr=8.8e-05, time_each_step=1.13s, eta=0:17:52\n",
      "2021-08-15 16:34:57 [INFO]\t[TRAIN] Epoch=34/100, Step=15/21, loss=22.715212, lr=8.8e-05, time_each_step=1.12s, eta=0:17:49\n",
      "2021-08-15 16:34:58 [INFO]\t[TRAIN] Epoch=34/100, Step=17/21, loss=23.264101, lr=8.9e-05, time_each_step=1.14s, eta=0:17:47\n",
      "2021-08-15 16:34:59 [INFO]\t[TRAIN] Epoch=34/100, Step=19/21, loss=18.166817, lr=8.9e-05, time_each_step=1.13s, eta=0:17:45\n",
      "2021-08-15 16:35:00 [INFO]\t[TRAIN] Epoch=34/100, Step=21/21, loss=21.898335, lr=8.9e-05, time_each_step=0.63s, eta=0:17:43\n",
      "2021-08-15 16:35:00 [INFO]\t[TRAIN] Epoch 34 finished, loss=20.455681, lr=8.8e-05 .\n",
      "2021-08-15 16:35:10 [INFO]\t[TRAIN] Epoch=35/100, Step=2/21, loss=22.861614, lr=8.9e-05, time_each_step=1.05s, eta=0:26:26\n",
      "2021-08-15 16:35:11 [INFO]\t[TRAIN] Epoch=35/100, Step=4/21, loss=21.068274, lr=9e-05, time_each_step=1.02s, eta=0:26:24\n",
      "2021-08-15 16:35:13 [INFO]\t[TRAIN] Epoch=35/100, Step=6/21, loss=19.885561, lr=9e-05, time_each_step=1.04s, eta=0:26:22\n",
      "2021-08-15 16:35:14 [INFO]\t[TRAIN] Epoch=35/100, Step=8/21, loss=20.604723, lr=9e-05, time_each_step=1.02s, eta=0:26:20\n",
      "2021-08-15 16:35:15 [INFO]\t[TRAIN] Epoch=35/100, Step=10/21, loss=20.489029, lr=9e-05, time_each_step=1.01s, eta=0:26:18\n",
      "2021-08-15 16:35:17 [INFO]\t[TRAIN] Epoch=35/100, Step=12/21, loss=24.032173, lr=9.1e-05, time_each_step=1.02s, eta=0:26:16\n",
      "2021-08-15 16:35:18 [INFO]\t[TRAIN] Epoch=35/100, Step=14/21, loss=17.147564, lr=9.1e-05, time_each_step=1.03s, eta=0:26:14\n",
      "2021-08-15 16:35:19 [INFO]\t[TRAIN] Epoch=35/100, Step=16/21, loss=15.407114, lr=9.1e-05, time_each_step=1.03s, eta=0:26:12\n",
      "2021-08-15 16:35:19 [INFO]\t[TRAIN] Epoch=35/100, Step=18/21, loss=19.471367, lr=9.1e-05, time_each_step=1.03s, eta=0:26:10\n",
      "2021-08-15 16:35:20 [INFO]\t[TRAIN] Epoch=35/100, Step=20/21, loss=19.933016, lr=9.2e-05, time_each_step=1.01s, eta=0:26:7\n",
      "2021-08-15 16:35:21 [INFO]\t[TRAIN] Epoch 35 finished, loss=19.85368, lr=9.1e-05 .\n",
      "2021-08-15 16:35:28 [INFO]\t[TRAIN] Epoch=36/100, Step=1/21, loss=21.928574, lr=9.2e-05, time_each_step=0.87s, eta=0:23:20\n",
      "2021-08-15 16:35:29 [INFO]\t[TRAIN] Epoch=36/100, Step=3/21, loss=18.107843, lr=9.2e-05, time_each_step=0.88s, eta=0:23:18\n",
      "2021-08-15 16:35:30 [INFO]\t[TRAIN] Epoch=36/100, Step=5/21, loss=16.797567, lr=9.2e-05, time_each_step=0.88s, eta=0:23:16\n",
      "2021-08-15 16:35:31 [INFO]\t[TRAIN] Epoch=36/100, Step=7/21, loss=19.05504, lr=9.3e-05, time_each_step=0.87s, eta=0:23:14\n",
      "2021-08-15 16:35:33 [INFO]\t[TRAIN] Epoch=36/100, Step=9/21, loss=21.702282, lr=9.3e-05, time_each_step=0.87s, eta=0:23:13\n",
      "2021-08-15 16:35:34 [INFO]\t[TRAIN] Epoch=36/100, Step=11/21, loss=25.702406, lr=9.3e-05, time_each_step=0.87s, eta=0:23:11\n",
      "2021-08-15 16:35:35 [INFO]\t[TRAIN] Epoch=36/100, Step=13/21, loss=16.595991, lr=9.3e-05, time_each_step=0.88s, eta=0:23:9\n",
      "2021-08-15 16:35:36 [INFO]\t[TRAIN] Epoch=36/100, Step=15/21, loss=19.201307, lr=9.4e-05, time_each_step=0.87s, eta=0:23:7\n",
      "2021-08-15 16:35:37 [INFO]\t[TRAIN] Epoch=36/100, Step=17/21, loss=23.219027, lr=9.4e-05, time_each_step=0.88s, eta=0:23:6\n",
      "2021-08-15 16:35:38 [INFO]\t[TRAIN] Epoch=36/100, Step=19/21, loss=25.251564, lr=9.4e-05, time_each_step=0.86s, eta=0:23:4\n",
      "2021-08-15 16:35:38 [INFO]\t[TRAIN] Epoch=36/100, Step=21/21, loss=21.822399, lr=9.4e-05, time_each_step=0.52s, eta=0:23:2\n",
      "2021-08-15 16:35:38 [INFO]\t[TRAIN] Epoch 36 finished, loss=21.023035, lr=9.3e-05 .\n",
      "2021-08-15 16:35:45 [INFO]\t[TRAIN] Epoch=37/100, Step=2/21, loss=15.885089, lr=9.5e-05, time_each_step=0.81s, eta=0:19:28\n",
      "2021-08-15 16:35:47 [INFO]\t[TRAIN] Epoch=37/100, Step=4/21, loss=17.75284, lr=9.5e-05, time_each_step=0.81s, eta=0:19:26\n",
      "2021-08-15 16:35:48 [INFO]\t[TRAIN] Epoch=37/100, Step=6/21, loss=25.221609, lr=9.5e-05, time_each_step=0.8s, eta=0:19:25\n",
      "2021-08-15 16:35:48 [INFO]\t[TRAIN] Epoch=37/100, Step=8/21, loss=21.887737, lr=9.5e-05, time_each_step=0.77s, eta=0:19:23\n",
      "2021-08-15 16:35:49 [INFO]\t[TRAIN] Epoch=37/100, Step=10/21, loss=19.892721, lr=9.6e-05, time_each_step=0.76s, eta=0:19:21\n",
      "2021-08-15 16:35:51 [INFO]\t[TRAIN] Epoch=37/100, Step=12/21, loss=22.552082, lr=9.6e-05, time_each_step=0.78s, eta=0:19:20\n",
      "2021-08-15 16:35:52 [INFO]\t[TRAIN] Epoch=37/100, Step=14/21, loss=22.747126, lr=9.6e-05, time_each_step=0.78s, eta=0:19:18\n",
      "2021-08-15 16:35:52 [INFO]\t[TRAIN] Epoch=37/100, Step=16/21, loss=22.596258, lr=9.6e-05, time_each_step=0.76s, eta=0:19:17\n",
      "2021-08-15 16:35:53 [INFO]\t[TRAIN] Epoch=37/100, Step=18/21, loss=22.696144, lr=9.7e-05, time_each_step=0.79s, eta=0:19:15\n",
      "2021-08-15 16:35:54 [INFO]\t[TRAIN] Epoch=37/100, Step=20/21, loss=15.750745, lr=9.7e-05, time_each_step=0.8s, eta=0:19:14\n",
      "2021-08-15 16:35:55 [INFO]\t[TRAIN] Epoch 37 finished, loss=20.724005, lr=9.6e-05 .\n",
      "2021-08-15 16:36:01 [INFO]\t[TRAIN] Epoch=38/100, Step=1/21, loss=20.24029, lr=9.7e-05, time_each_step=0.77s, eta=0:18:2\n",
      "2021-08-15 16:36:02 [INFO]\t[TRAIN] Epoch=38/100, Step=3/21, loss=16.805861, lr=9.7e-05, time_each_step=0.78s, eta=0:18:0\n",
      "2021-08-15 16:36:04 [INFO]\t[TRAIN] Epoch=38/100, Step=5/21, loss=19.600288, lr=9.8e-05, time_each_step=0.81s, eta=0:17:59\n",
      "2021-08-15 16:36:05 [INFO]\t[TRAIN] Epoch=38/100, Step=7/21, loss=18.015314, lr=9.8e-05, time_each_step=0.84s, eta=0:17:58\n",
      "2021-08-15 16:36:07 [INFO]\t[TRAIN] Epoch=38/100, Step=9/21, loss=18.529688, lr=9.8e-05, time_each_step=0.87s, eta=0:17:57\n",
      "2021-08-15 16:36:08 [INFO]\t[TRAIN] Epoch=38/100, Step=11/21, loss=19.829767, lr=9.8e-05, time_each_step=0.84s, eta=0:17:55\n",
      "2021-08-15 16:36:08 [INFO]\t[TRAIN] Epoch=38/100, Step=13/21, loss=20.959274, lr=9.9e-05, time_each_step=0.83s, eta=0:17:53\n",
      "2021-08-15 16:36:09 [INFO]\t[TRAIN] Epoch=38/100, Step=15/21, loss=22.665819, lr=9.9e-05, time_each_step=0.86s, eta=0:17:51\n",
      "2021-08-15 16:36:10 [INFO]\t[TRAIN] Epoch=38/100, Step=17/21, loss=17.796629, lr=9.9e-05, time_each_step=0.85s, eta=0:17:50\n",
      "2021-08-15 16:36:11 [INFO]\t[TRAIN] Epoch=38/100, Step=19/21, loss=15.818472, lr=9.9e-05, time_each_step=0.84s, eta=0:17:48\n",
      "2021-08-15 16:36:12 [INFO]\t[TRAIN] Epoch=38/100, Step=21/21, loss=15.325717, lr=0.0001, time_each_step=0.56s, eta=0:17:46\n",
      "2021-08-15 16:36:12 [INFO]\t[TRAIN] Epoch 38 finished, loss=19.659502, lr=9.8e-05 .\n",
      "2021-08-15 16:36:18 [INFO]\t[TRAIN] Epoch=39/100, Step=2/21, loss=15.388164, lr=0.0001, time_each_step=0.78s, eta=0:18:44\n",
      "2021-08-15 16:36:20 [INFO]\t[TRAIN] Epoch=39/100, Step=4/21, loss=19.095181, lr=0.0001, time_each_step=0.8s, eta=0:18:43\n",
      "2021-08-15 16:36:21 [INFO]\t[TRAIN] Epoch=39/100, Step=6/21, loss=20.102516, lr=0.0001, time_each_step=0.82s, eta=0:18:41\n",
      "2021-08-15 16:36:23 [INFO]\t[TRAIN] Epoch=39/100, Step=8/21, loss=17.073395, lr=0.000101, time_each_step=0.79s, eta=0:18:40\n",
      "2021-08-15 16:36:24 [INFO]\t[TRAIN] Epoch=39/100, Step=10/21, loss=22.066059, lr=0.000101, time_each_step=0.81s, eta=0:18:38\n",
      "2021-08-15 16:36:25 [INFO]\t[TRAIN] Epoch=39/100, Step=12/21, loss=19.212696, lr=0.000101, time_each_step=0.84s, eta=0:18:37\n",
      "2021-08-15 16:36:26 [INFO]\t[TRAIN] Epoch=39/100, Step=14/21, loss=19.546947, lr=0.000101, time_each_step=0.82s, eta=0:18:35\n",
      "2021-08-15 16:36:27 [INFO]\t[TRAIN] Epoch=39/100, Step=16/21, loss=21.527796, lr=0.000102, time_each_step=0.84s, eta=0:18:33\n",
      "2021-08-15 16:36:28 [INFO]\t[TRAIN] Epoch=39/100, Step=18/21, loss=25.090839, lr=0.000102, time_each_step=0.84s, eta=0:18:32\n",
      "2021-08-15 16:36:29 [INFO]\t[TRAIN] Epoch=39/100, Step=20/21, loss=20.144308, lr=0.000102, time_each_step=0.85s, eta=0:18:30\n",
      "2021-08-15 16:36:30 [INFO]\t[TRAIN] Epoch 39 finished, loss=19.380713, lr=0.000101 .\n",
      "2021-08-15 16:36:43 [INFO]\t[TRAIN] Epoch=40/100, Step=1/21, loss=18.696369, lr=0.000102, time_each_step=1.27s, eta=0:18:44\n",
      "2021-08-15 16:36:46 [INFO]\t[TRAIN] Epoch=40/100, Step=3/21, loss=20.472275, lr=0.000103, time_each_step=1.31s, eta=0:18:42\n",
      "2021-08-15 16:36:49 [INFO]\t[TRAIN] Epoch=40/100, Step=5/21, loss=25.478062, lr=0.000103, time_each_step=1.36s, eta=0:18:40\n",
      "2021-08-15 16:36:50 [INFO]\t[TRAIN] Epoch=40/100, Step=7/21, loss=23.806017, lr=0.000103, time_each_step=1.39s, eta=0:18:38\n",
      "2021-08-15 16:36:52 [INFO]\t[TRAIN] Epoch=40/100, Step=9/21, loss=28.259995, lr=0.000103, time_each_step=1.39s, eta=0:18:35\n",
      "2021-08-15 16:36:53 [INFO]\t[TRAIN] Epoch=40/100, Step=11/21, loss=16.469658, lr=0.000104, time_each_step=1.38s, eta=0:18:32\n",
      "2021-08-15 16:36:54 [INFO]\t[TRAIN] Epoch=40/100, Step=13/21, loss=19.783081, lr=0.000104, time_each_step=1.4s, eta=0:18:30\n",
      "2021-08-15 16:36:55 [INFO]\t[TRAIN] Epoch=40/100, Step=15/21, loss=18.069351, lr=0.000104, time_each_step=1.38s, eta=0:18:27\n",
      "2021-08-15 16:36:56 [INFO]\t[TRAIN] Epoch=40/100, Step=17/21, loss=18.080675, lr=0.000104, time_each_step=1.38s, eta=0:18:24\n",
      "2021-08-15 16:36:57 [INFO]\t[TRAIN] Epoch=40/100, Step=19/21, loss=19.773949, lr=0.000105, time_each_step=1.39s, eta=0:18:21\n",
      "2021-08-15 16:36:57 [INFO]\t[TRAIN] Epoch=40/100, Step=21/21, loss=17.322098, lr=0.000105, time_each_step=0.71s, eta=0:18:19\n",
      "2021-08-15 16:36:57 [INFO]\t[TRAIN] Epoch 40 finished, loss=19.770987, lr=0.000104 .\n",
      "2021-08-15 16:36:58 [INFO]\tStart to evaluating(total_samples=98, total_steps=7)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 7/7 [00:07<00:00,  1.10s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:37:06 [INFO]\t[EVAL] Finished, Epoch=40, bbox_map=4.918833 .\n",
      "2021-08-15 16:37:09 [INFO]\tModel saved in output/ppyolo/best_model.\n",
      "2021-08-15 16:37:11 [INFO]\tModel saved in output/ppyolo/epoch_40.\n",
      "2021-08-15 16:37:12 [INFO]\tCurrent evaluated best model in eval_dataset is epoch_40, bbox_map=4.918833038619135\n",
      "2021-08-15 16:37:17 [INFO]\t[TRAIN] Epoch=41/100, Step=2/21, loss=19.123413, lr=0.000105, time_each_step=0.87s, eta=0:28:30\n",
      "2021-08-15 16:37:18 [INFO]\t[TRAIN] Epoch=41/100, Step=4/21, loss=19.938791, lr=0.000105, time_each_step=0.78s, eta=0:28:26\n",
      "2021-08-15 16:37:19 [INFO]\t[TRAIN] Epoch=41/100, Step=6/21, loss=17.273611, lr=0.000106, time_each_step=0.75s, eta=0:28:24\n",
      "2021-08-15 16:37:21 [INFO]\t[TRAIN] Epoch=41/100, Step=8/21, loss=18.565073, lr=0.000106, time_each_step=0.78s, eta=0:28:23\n",
      "2021-08-15 16:37:23 [INFO]\t[TRAIN] Epoch=41/100, Step=10/21, loss=21.909462, lr=0.000106, time_each_step=0.79s, eta=0:28:22\n",
      "2021-08-15 16:37:24 [INFO]\t[TRAIN] Epoch=41/100, Step=12/21, loss=18.606026, lr=0.000106, time_each_step=0.81s, eta=0:28:20\n",
      "2021-08-15 16:37:25 [INFO]\t[TRAIN] Epoch=41/100, Step=14/21, loss=15.422082, lr=0.000107, time_each_step=0.82s, eta=0:28:19\n",
      "2021-08-15 16:37:26 [INFO]\t[TRAIN] Epoch=41/100, Step=16/21, loss=14.151088, lr=0.000107, time_each_step=0.8s, eta=0:28:17\n",
      "2021-08-15 16:37:27 [INFO]\t[TRAIN] Epoch=41/100, Step=18/21, loss=15.908457, lr=0.000107, time_each_step=0.78s, eta=0:28:15\n",
      "2021-08-15 16:37:27 [INFO]\t[TRAIN] Epoch=41/100, Step=20/21, loss=20.138388, lr=0.000107, time_each_step=0.79s, eta=0:28:14\n",
      "2021-08-15 16:37:28 [INFO]\t[TRAIN] Epoch 41 finished, loss=17.703228, lr=0.000106 .\n",
      "2021-08-15 16:37:34 [INFO]\t[TRAIN] Epoch=42/100, Step=1/21, loss=15.255852, lr=0.000108, time_each_step=0.82s, eta=0:16:51\n",
      "2021-08-15 16:37:35 [INFO]\t[TRAIN] Epoch=42/100, Step=3/21, loss=17.871004, lr=0.000108, time_each_step=0.84s, eta=0:16:49\n",
      "2021-08-15 16:37:36 [INFO]\t[TRAIN] Epoch=42/100, Step=5/21, loss=22.300114, lr=0.000108, time_each_step=0.85s, eta=0:16:48\n",
      "2021-08-15 16:37:37 [INFO]\t[TRAIN] Epoch=42/100, Step=7/21, loss=17.484322, lr=0.000108, time_each_step=0.81s, eta=0:16:45\n",
      "2021-08-15 16:37:39 [INFO]\t[TRAIN] Epoch=42/100, Step=9/21, loss=16.055256, lr=0.000109, time_each_step=0.79s, eta=0:16:44\n",
      "2021-08-15 16:37:40 [INFO]\t[TRAIN] Epoch=42/100, Step=11/21, loss=13.732914, lr=0.000109, time_each_step=0.77s, eta=0:16:42\n",
      "2021-08-15 16:37:40 [INFO]\t[TRAIN] Epoch=42/100, Step=13/21, loss=17.242996, lr=0.000109, time_each_step=0.76s, eta=0:16:40\n",
      "2021-08-15 16:37:41 [INFO]\t[TRAIN] Epoch=42/100, Step=15/21, loss=22.266403, lr=0.000109, time_each_step=0.76s, eta=0:16:39\n",
      "2021-08-15 16:37:42 [INFO]\t[TRAIN] Epoch=42/100, Step=17/21, loss=14.1814, lr=0.00011, time_each_step=0.77s, eta=0:16:37\n",
      "2021-08-15 16:37:43 [INFO]\t[TRAIN] Epoch=42/100, Step=19/21, loss=17.070217, lr=0.00011, time_each_step=0.79s, eta=0:16:36\n",
      "2021-08-15 16:37:44 [INFO]\t[TRAIN] Epoch=42/100, Step=21/21, loss=14.680199, lr=0.00011, time_each_step=0.51s, eta=0:16:34\n",
      "2021-08-15 16:37:44 [INFO]\t[TRAIN] Epoch 42 finished, loss=18.36142, lr=0.000109 .\n",
      "2021-08-15 16:37:50 [INFO]\t[TRAIN] Epoch=43/100, Step=2/21, loss=20.15583, lr=0.00011, time_each_step=0.77s, eta=0:15:57\n",
      "2021-08-15 16:37:52 [INFO]\t[TRAIN] Epoch=43/100, Step=4/21, loss=19.921007, lr=0.000111, time_each_step=0.79s, eta=0:15:56\n",
      "2021-08-15 16:37:53 [INFO]\t[TRAIN] Epoch=43/100, Step=6/21, loss=12.983332, lr=0.000111, time_each_step=0.79s, eta=0:15:54\n",
      "2021-08-15 16:37:54 [INFO]\t[TRAIN] Epoch=43/100, Step=8/21, loss=16.034807, lr=0.000111, time_each_step=0.79s, eta=0:15:53\n",
      "2021-08-15 16:37:56 [INFO]\t[TRAIN] Epoch=43/100, Step=10/21, loss=27.767467, lr=0.000111, time_each_step=0.8s, eta=0:15:51\n",
      "2021-08-15 16:37:57 [INFO]\t[TRAIN] Epoch=43/100, Step=12/21, loss=18.082399, lr=0.000112, time_each_step=0.83s, eta=0:15:50\n",
      "2021-08-15 16:37:58 [INFO]\t[TRAIN] Epoch=43/100, Step=14/21, loss=17.263016, lr=0.000112, time_each_step=0.83s, eta=0:15:48\n",
      "2021-08-15 16:37:58 [INFO]\t[TRAIN] Epoch=43/100, Step=16/21, loss=17.912271, lr=0.000112, time_each_step=0.82s, eta=0:15:47\n",
      "2021-08-15 16:37:59 [INFO]\t[TRAIN] Epoch=43/100, Step=18/21, loss=20.416948, lr=0.000112, time_each_step=0.8s, eta=0:15:45\n",
      "2021-08-15 16:38:00 [INFO]\t[TRAIN] Epoch=43/100, Step=20/21, loss=20.499002, lr=0.000113, time_each_step=0.82s, eta=0:15:43\n",
      "2021-08-15 16:38:01 [INFO]\t[TRAIN] Epoch 43 finished, loss=19.517344, lr=0.000112 .\n",
      "2021-08-15 16:38:06 [INFO]\t[TRAIN] Epoch=44/100, Step=1/21, loss=15.70408, lr=0.000113, time_each_step=0.79s, eta=0:16:42\n",
      "2021-08-15 16:38:07 [INFO]\t[TRAIN] Epoch=44/100, Step=3/21, loss=15.161131, lr=0.000113, time_each_step=0.76s, eta=0:16:39\n",
      "2021-08-15 16:38:09 [INFO]\t[TRAIN] Epoch=44/100, Step=5/21, loss=22.486448, lr=0.000113, time_each_step=0.78s, eta=0:16:38\n",
      "2021-08-15 16:38:10 [INFO]\t[TRAIN] Epoch=44/100, Step=7/21, loss=16.144859, lr=0.000114, time_each_step=0.79s, eta=0:16:37\n",
      "2021-08-15 16:38:11 [INFO]\t[TRAIN] Epoch=44/100, Step=9/21, loss=17.186117, lr=0.000114, time_each_step=0.77s, eta=0:16:35\n",
      "2021-08-15 16:38:12 [INFO]\t[TRAIN] Epoch=44/100, Step=11/21, loss=20.51597, lr=0.000114, time_each_step=0.76s, eta=0:16:33\n",
      "2021-08-15 16:38:13 [INFO]\t[TRAIN] Epoch=44/100, Step=13/21, loss=14.290804, lr=0.000114, time_each_step=0.79s, eta=0:16:32\n",
      "2021-08-15 16:38:14 [INFO]\t[TRAIN] Epoch=44/100, Step=15/21, loss=17.142511, lr=0.000115, time_each_step=0.79s, eta=0:16:30\n",
      "2021-08-15 16:38:15 [INFO]\t[TRAIN] Epoch=44/100, Step=17/21, loss=31.158464, lr=0.000115, time_each_step=0.8s, eta=0:16:29\n",
      "2021-08-15 16:38:16 [INFO]\t[TRAIN] Epoch=44/100, Step=19/21, loss=20.552683, lr=0.000115, time_each_step=0.78s, eta=0:16:27\n",
      "2021-08-15 16:38:16 [INFO]\t[TRAIN] Epoch=44/100, Step=21/21, loss=17.407187, lr=0.000115, time_each_step=0.51s, eta=0:16:26\n",
      "2021-08-15 16:38:16 [INFO]\t[TRAIN] Epoch 44 finished, loss=18.643703, lr=0.000114 .\n",
      "2021-08-15 16:38:22 [INFO]\t[TRAIN] Epoch=45/100, Step=2/21, loss=29.9939, lr=0.000116, time_each_step=0.74s, eta=0:15:28\n",
      "2021-08-15 16:38:23 [INFO]\t[TRAIN] Epoch=45/100, Step=4/21, loss=19.761887, lr=0.000116, time_each_step=0.7s, eta=0:15:25\n",
      "2021-08-15 16:38:25 [INFO]\t[TRAIN] Epoch=45/100, Step=6/21, loss=20.508913, lr=0.000116, time_each_step=0.72s, eta=0:15:24\n",
      "2021-08-15 16:38:25 [INFO]\t[TRAIN] Epoch=45/100, Step=8/21, loss=16.639427, lr=0.000116, time_each_step=0.72s, eta=0:15:23\n",
      "2021-08-15 16:38:27 [INFO]\t[TRAIN] Epoch=45/100, Step=10/21, loss=18.380016, lr=0.000117, time_each_step=0.74s, eta=0:15:22\n",
      "2021-08-15 16:38:28 [INFO]\t[TRAIN] Epoch=45/100, Step=12/21, loss=20.089338, lr=0.000117, time_each_step=0.72s, eta=0:15:20\n",
      "2021-08-15 16:38:29 [INFO]\t[TRAIN] Epoch=45/100, Step=14/21, loss=16.405304, lr=0.000117, time_each_step=0.74s, eta=0:15:19\n",
      "2021-08-15 16:38:30 [INFO]\t[TRAIN] Epoch=45/100, Step=16/21, loss=17.983334, lr=0.000117, time_each_step=0.73s, eta=0:15:17\n",
      "2021-08-15 16:38:30 [INFO]\t[TRAIN] Epoch=45/100, Step=18/21, loss=22.017057, lr=0.000118, time_each_step=0.71s, eta=0:15:16\n",
      "2021-08-15 16:38:31 [INFO]\t[TRAIN] Epoch=45/100, Step=20/21, loss=15.700872, lr=0.000118, time_each_step=0.72s, eta=0:15:14\n",
      "2021-08-15 16:38:31 [INFO]\t[TRAIN] Epoch 45 finished, loss=19.004309, lr=0.000117 .\n",
      "2021-08-15 16:38:37 [INFO]\t[TRAIN] Epoch=46/100, Step=1/21, loss=20.599384, lr=0.000118, time_each_step=0.76s, eta=0:14:27\n",
      "2021-08-15 16:38:39 [INFO]\t[TRAIN] Epoch=46/100, Step=3/21, loss=17.833075, lr=0.000118, time_each_step=0.79s, eta=0:14:26\n",
      "2021-08-15 16:38:40 [INFO]\t[TRAIN] Epoch=46/100, Step=5/21, loss=20.06662, lr=0.000119, time_each_step=0.78s, eta=0:14:25\n",
      "2021-08-15 16:38:41 [INFO]\t[TRAIN] Epoch=46/100, Step=7/21, loss=17.218048, lr=0.000119, time_each_step=0.79s, eta=0:14:23\n",
      "2021-08-15 16:38:42 [INFO]\t[TRAIN] Epoch=46/100, Step=9/21, loss=14.55171, lr=0.000119, time_each_step=0.77s, eta=0:14:21\n",
      "2021-08-15 16:38:44 [INFO]\t[TRAIN] Epoch=46/100, Step=11/21, loss=19.163891, lr=0.000119, time_each_step=0.78s, eta=0:14:20\n",
      "2021-08-15 16:38:45 [INFO]\t[TRAIN] Epoch=46/100, Step=13/21, loss=17.32276, lr=0.00012, time_each_step=0.78s, eta=0:14:18\n",
      "2021-08-15 16:38:46 [INFO]\t[TRAIN] Epoch=46/100, Step=15/21, loss=16.948513, lr=0.00012, time_each_step=0.8s, eta=0:14:17\n",
      "2021-08-15 16:38:46 [INFO]\t[TRAIN] Epoch=46/100, Step=17/21, loss=16.685362, lr=0.00012, time_each_step=0.8s, eta=0:14:15\n",
      "2021-08-15 16:38:47 [INFO]\t[TRAIN] Epoch=46/100, Step=19/21, loss=16.994587, lr=0.00012, time_each_step=0.8s, eta=0:14:14\n",
      "2021-08-15 16:38:48 [INFO]\t[TRAIN] Epoch=46/100, Step=21/21, loss=27.171764, lr=0.000121, time_each_step=0.53s, eta=0:14:12\n",
      "2021-08-15 16:38:48 [INFO]\t[TRAIN] Epoch 46 finished, loss=18.570412, lr=0.000119 .\n",
      "2021-08-15 16:39:01 [INFO]\t[TRAIN] Epoch=47/100, Step=2/21, loss=23.535278, lr=0.000121, time_each_step=1.13s, eta=0:15:32\n",
      "2021-08-15 16:39:03 [INFO]\t[TRAIN] Epoch=47/100, Step=4/21, loss=16.015909, lr=0.000121, time_each_step=1.13s, eta=0:15:30\n",
      "2021-08-15 16:39:04 [INFO]\t[TRAIN] Epoch=47/100, Step=6/21, loss=15.961782, lr=0.000121, time_each_step=1.13s, eta=0:15:27\n",
      "2021-08-15 16:39:05 [INFO]\t[TRAIN] Epoch=47/100, Step=8/21, loss=18.089071, lr=0.000122, time_each_step=1.15s, eta=0:15:25\n",
      "2021-08-15 16:39:06 [INFO]\t[TRAIN] Epoch=47/100, Step=10/21, loss=17.345547, lr=0.000122, time_each_step=1.13s, eta=0:15:23\n",
      "2021-08-15 16:39:07 [INFO]\t[TRAIN] Epoch=47/100, Step=12/21, loss=15.828407, lr=0.000122, time_each_step=1.13s, eta=0:15:21\n",
      "2021-08-15 16:39:09 [INFO]\t[TRAIN] Epoch=47/100, Step=14/21, loss=20.05813, lr=0.000122, time_each_step=1.15s, eta=0:15:18\n",
      "2021-08-15 16:39:09 [INFO]\t[TRAIN] Epoch=47/100, Step=16/21, loss=16.834951, lr=0.000123, time_each_step=1.15s, eta=0:15:16\n",
      "2021-08-15 16:39:10 [INFO]\t[TRAIN] Epoch=47/100, Step=18/21, loss=21.072033, lr=0.000123, time_each_step=1.15s, eta=0:15:14\n",
      "2021-08-15 16:39:11 [INFO]\t[TRAIN] Epoch=47/100, Step=20/21, loss=16.304598, lr=0.000123, time_each_step=1.14s, eta=0:15:11\n",
      "2021-08-15 16:39:11 [INFO]\t[TRAIN] Epoch 47 finished, loss=17.456032, lr=0.000122 .\n",
      "2021-08-15 16:39:17 [INFO]\t[TRAIN] Epoch=48/100, Step=1/21, loss=21.320562, lr=0.000123, time_each_step=0.77s, eta=0:20:58\n",
      "2021-08-15 16:39:18 [INFO]\t[TRAIN] Epoch=48/100, Step=3/21, loss=13.274564, lr=0.000124, time_each_step=0.78s, eta=0:20:57\n",
      "2021-08-15 16:39:19 [INFO]\t[TRAIN] Epoch=48/100, Step=5/21, loss=17.256437, lr=0.000124, time_each_step=0.79s, eta=0:20:56\n",
      "2021-08-15 16:39:21 [INFO]\t[TRAIN] Epoch=48/100, Step=7/21, loss=16.953733, lr=0.000124, time_each_step=0.77s, eta=0:20:54\n",
      "2021-08-15 16:39:22 [INFO]\t[TRAIN] Epoch=48/100, Step=9/21, loss=15.594402, lr=0.000124, time_each_step=0.77s, eta=0:20:52\n",
      "2021-08-15 16:39:23 [INFO]\t[TRAIN] Epoch=48/100, Step=11/21, loss=18.353367, lr=0.000125, time_each_step=0.78s, eta=0:20:51\n",
      "2021-08-15 16:39:24 [INFO]\t[TRAIN] Epoch=48/100, Step=13/21, loss=29.719957, lr=0.000125, time_each_step=0.78s, eta=0:20:49\n",
      "2021-08-15 16:39:25 [INFO]\t[TRAIN] Epoch=48/100, Step=15/21, loss=20.011503, lr=0.000125, time_each_step=0.79s, eta=0:20:48\n",
      "2021-08-15 16:39:26 [INFO]\t[TRAIN] Epoch=48/100, Step=17/21, loss=14.723474, lr=0.000125, time_each_step=0.8s, eta=0:20:46\n",
      "2021-08-15 16:39:26 [INFO]\t[TRAIN] Epoch=48/100, Step=19/21, loss=12.886523, lr=0.000125, time_each_step=0.79s, eta=0:20:45\n",
      "2021-08-15 16:39:27 [INFO]\t[TRAIN] Epoch=48/100, Step=21/21, loss=24.416027, lr=0.000125, time_each_step=0.52s, eta=0:20:43\n",
      "2021-08-15 16:39:27 [INFO]\t[TRAIN] Epoch 48 finished, loss=18.522554, lr=0.000124 .\n",
      "2021-08-15 16:39:40 [INFO]\t[TRAIN] Epoch=49/100, Step=2/21, loss=13.328374, lr=0.000125, time_each_step=1.08s, eta=0:14:41\n",
      "2021-08-15 16:39:41 [INFO]\t[TRAIN] Epoch=49/100, Step=4/21, loss=17.853251, lr=0.000125, time_each_step=1.1s, eta=0:14:39\n",
      "2021-08-15 16:39:43 [INFO]\t[TRAIN] Epoch=49/100, Step=6/21, loss=18.580956, lr=0.000125, time_each_step=1.11s, eta=0:14:37\n",
      "2021-08-15 16:39:44 [INFO]\t[TRAIN] Epoch=49/100, Step=8/21, loss=15.169268, lr=0.000125, time_each_step=1.13s, eta=0:14:35\n",
      "2021-08-15 16:39:45 [INFO]\t[TRAIN] Epoch=49/100, Step=10/21, loss=19.665617, lr=0.000125, time_each_step=1.11s, eta=0:14:33\n",
      "2021-08-15 16:39:46 [INFO]\t[TRAIN] Epoch=49/100, Step=12/21, loss=16.651852, lr=0.000125, time_each_step=1.12s, eta=0:14:31\n",
      "2021-08-15 16:39:48 [INFO]\t[TRAIN] Epoch=49/100, Step=14/21, loss=21.773039, lr=0.000125, time_each_step=1.13s, eta=0:14:29\n",
      "2021-08-15 16:39:48 [INFO]\t[TRAIN] Epoch=49/100, Step=16/21, loss=17.668152, lr=0.000125, time_each_step=1.12s, eta=0:14:26\n",
      "2021-08-15 16:39:49 [INFO]\t[TRAIN] Epoch=49/100, Step=18/21, loss=15.272361, lr=0.000125, time_each_step=1.13s, eta=0:14:24\n",
      "2021-08-15 16:39:50 [INFO]\t[TRAIN] Epoch=49/100, Step=20/21, loss=14.343155, lr=0.000125, time_each_step=1.13s, eta=0:14:22\n",
      "2021-08-15 16:39:50 [INFO]\t[TRAIN] Epoch 49 finished, loss=16.551435, lr=0.000125 .\n",
      "2021-08-15 16:39:56 [INFO]\t[TRAIN] Epoch=50/100, Step=1/21, loss=17.677889, lr=0.000125, time_each_step=0.79s, eta=0:20:17\n",
      "2021-08-15 16:39:57 [INFO]\t[TRAIN] Epoch=50/100, Step=3/21, loss=17.065859, lr=0.000125, time_each_step=0.79s, eta=0:20:15\n",
      "2021-08-15 16:39:59 [INFO]\t[TRAIN] Epoch=50/100, Step=5/21, loss=15.64453, lr=0.000125, time_each_step=0.82s, eta=0:20:14\n",
      "2021-08-15 16:40:00 [INFO]\t[TRAIN] Epoch=50/100, Step=7/21, loss=25.209091, lr=0.000125, time_each_step=0.8s, eta=0:20:12\n",
      "2021-08-15 16:40:01 [INFO]\t[TRAIN] Epoch=50/100, Step=9/21, loss=17.762529, lr=0.000125, time_each_step=0.8s, eta=0:20:11\n",
      "2021-08-15 16:40:03 [INFO]\t[TRAIN] Epoch=50/100, Step=11/21, loss=17.132362, lr=0.000125, time_each_step=0.81s, eta=0:20:9\n",
      "2021-08-15 16:40:04 [INFO]\t[TRAIN] Epoch=50/100, Step=13/21, loss=16.914587, lr=0.000125, time_each_step=0.8s, eta=0:20:8\n",
      "2021-08-15 16:40:04 [INFO]\t[TRAIN] Epoch=50/100, Step=15/21, loss=17.660357, lr=0.000125, time_each_step=0.8s, eta=0:20:6\n",
      "2021-08-15 16:40:05 [INFO]\t[TRAIN] Epoch=50/100, Step=17/21, loss=15.602861, lr=0.000125, time_each_step=0.8s, eta=0:20:4\n",
      "2021-08-15 16:40:06 [INFO]\t[TRAIN] Epoch=50/100, Step=19/21, loss=21.128355, lr=0.000125, time_each_step=0.82s, eta=0:20:3\n",
      "2021-08-15 16:40:07 [INFO]\t[TRAIN] Epoch=50/100, Step=21/21, loss=22.203178, lr=0.000125, time_each_step=0.56s, eta=0:20:1\n",
      "2021-08-15 16:40:07 [INFO]\t[TRAIN] Epoch 50 finished, loss=18.152569, lr=0.000125 .\n",
      "2021-08-15 16:40:13 [INFO]\t[TRAIN] Epoch=51/100, Step=2/21, loss=16.908556, lr=0.000125, time_each_step=0.81s, eta=0:14:35\n",
      "2021-08-15 16:40:15 [INFO]\t[TRAIN] Epoch=51/100, Step=4/21, loss=17.39069, lr=0.000125, time_each_step=0.79s, eta=0:14:33\n",
      "2021-08-15 16:40:16 [INFO]\t[TRAIN] Epoch=51/100, Step=6/21, loss=22.116226, lr=0.000125, time_each_step=0.8s, eta=0:14:32\n",
      "2021-08-15 16:40:17 [INFO]\t[TRAIN] Epoch=51/100, Step=8/21, loss=21.540754, lr=0.000125, time_each_step=0.81s, eta=0:14:31\n",
      "2021-08-15 16:40:19 [INFO]\t[TRAIN] Epoch=51/100, Step=10/21, loss=21.415117, lr=0.000125, time_each_step=0.81s, eta=0:14:29\n",
      "2021-08-15 16:40:20 [INFO]\t[TRAIN] Epoch=51/100, Step=12/21, loss=17.029518, lr=0.000125, time_each_step=0.8s, eta=0:14:27\n",
      "2021-08-15 16:40:21 [INFO]\t[TRAIN] Epoch=51/100, Step=14/21, loss=19.043861, lr=0.000125, time_each_step=0.83s, eta=0:14:26\n",
      "2021-08-15 16:40:22 [INFO]\t[TRAIN] Epoch=51/100, Step=16/21, loss=15.637213, lr=0.000125, time_each_step=0.83s, eta=0:14:24\n",
      "2021-08-15 16:40:22 [INFO]\t[TRAIN] Epoch=51/100, Step=18/21, loss=15.559847, lr=0.000125, time_each_step=0.82s, eta=0:14:22\n",
      "2021-08-15 16:40:23 [INFO]\t[TRAIN] Epoch=51/100, Step=20/21, loss=17.257717, lr=0.000125, time_each_step=0.82s, eta=0:14:21\n",
      "2021-08-15 16:40:24 [INFO]\t[TRAIN] Epoch 51 finished, loss=17.688345, lr=0.000125 .\n",
      "2021-08-15 16:40:29 [INFO]\t[TRAIN] Epoch=52/100, Step=1/21, loss=21.190601, lr=0.000125, time_each_step=0.76s, eta=0:14:33\n",
      "2021-08-15 16:40:30 [INFO]\t[TRAIN] Epoch=52/100, Step=3/21, loss=14.739605, lr=0.000125, time_each_step=0.75s, eta=0:14:31\n",
      "2021-08-15 16:40:31 [INFO]\t[TRAIN] Epoch=52/100, Step=5/21, loss=23.672028, lr=0.000125, time_each_step=0.75s, eta=0:14:29\n",
      "2021-08-15 16:40:33 [INFO]\t[TRAIN] Epoch=52/100, Step=7/21, loss=16.216105, lr=0.000125, time_each_step=0.77s, eta=0:14:28\n",
      "2021-08-15 16:40:34 [INFO]\t[TRAIN] Epoch=52/100, Step=9/21, loss=23.019211, lr=0.000125, time_each_step=0.75s, eta=0:14:26\n",
      "2021-08-15 16:40:35 [INFO]\t[TRAIN] Epoch=52/100, Step=11/21, loss=15.87911, lr=0.000125, time_each_step=0.77s, eta=0:14:25\n",
      "2021-08-15 16:40:36 [INFO]\t[TRAIN] Epoch=52/100, Step=13/21, loss=19.6385, lr=0.000125, time_each_step=0.76s, eta=0:14:23\n",
      "2021-08-15 16:40:37 [INFO]\t[TRAIN] Epoch=52/100, Step=15/21, loss=15.020889, lr=0.000125, time_each_step=0.75s, eta=0:14:22\n",
      "2021-08-15 16:40:37 [INFO]\t[TRAIN] Epoch=52/100, Step=17/21, loss=16.419762, lr=0.000125, time_each_step=0.75s, eta=0:14:20\n",
      "2021-08-15 16:40:38 [INFO]\t[TRAIN] Epoch=52/100, Step=19/21, loss=18.640541, lr=0.000125, time_each_step=0.74s, eta=0:14:19\n",
      "2021-08-15 16:40:39 [INFO]\t[TRAIN] Epoch=52/100, Step=21/21, loss=14.842677, lr=0.000125, time_each_step=0.51s, eta=0:14:17\n",
      "2021-08-15 16:40:39 [INFO]\t[TRAIN] Epoch 52 finished, loss=17.916195, lr=0.000125 .\n",
      "2021-08-15 16:40:47 [INFO]\t[TRAIN] Epoch=53/100, Step=2/21, loss=17.225496, lr=0.000125, time_each_step=0.85s, eta=0:12:47\n",
      "2021-08-15 16:40:48 [INFO]\t[TRAIN] Epoch=53/100, Step=4/21, loss=14.119301, lr=0.000125, time_each_step=0.84s, eta=0:12:45\n",
      "2021-08-15 16:40:50 [INFO]\t[TRAIN] Epoch=53/100, Step=6/21, loss=13.147297, lr=0.000125, time_each_step=0.83s, eta=0:12:43\n",
      "2021-08-15 16:40:51 [INFO]\t[TRAIN] Epoch=53/100, Step=8/21, loss=17.391146, lr=0.000125, time_each_step=0.85s, eta=0:12:42\n",
      "2021-08-15 16:40:52 [INFO]\t[TRAIN] Epoch=53/100, Step=10/21, loss=19.279591, lr=0.000125, time_each_step=0.86s, eta=0:12:40\n",
      "2021-08-15 16:40:53 [INFO]\t[TRAIN] Epoch=53/100, Step=12/21, loss=18.093473, lr=0.000125, time_each_step=0.87s, eta=0:12:39\n",
      "2021-08-15 16:40:54 [INFO]\t[TRAIN] Epoch=53/100, Step=14/21, loss=15.061466, lr=0.000125, time_each_step=0.87s, eta=0:12:37\n",
      "2021-08-15 16:40:55 [INFO]\t[TRAIN] Epoch=53/100, Step=16/21, loss=16.975813, lr=0.000125, time_each_step=0.87s, eta=0:12:35\n",
      "2021-08-15 16:40:55 [INFO]\t[TRAIN] Epoch=53/100, Step=18/21, loss=11.643223, lr=0.000125, time_each_step=0.87s, eta=0:12:33\n",
      "2021-08-15 16:40:56 [INFO]\t[TRAIN] Epoch=53/100, Step=20/21, loss=13.085323, lr=0.000125, time_each_step=0.86s, eta=0:12:32\n",
      "2021-08-15 16:40:57 [INFO]\t[TRAIN] Epoch 53 finished, loss=16.17664, lr=0.000125 .\n",
      "2021-08-15 16:41:02 [INFO]\t[TRAIN] Epoch=54/100, Step=1/21, loss=13.524778, lr=0.000125, time_each_step=0.76s, eta=0:14:36\n",
      "2021-08-15 16:41:03 [INFO]\t[TRAIN] Epoch=54/100, Step=3/21, loss=17.861511, lr=0.000125, time_each_step=0.75s, eta=0:14:34\n",
      "2021-08-15 16:41:04 [INFO]\t[TRAIN] Epoch=54/100, Step=5/21, loss=21.108463, lr=0.000125, time_each_step=0.74s, eta=0:14:33\n",
      "2021-08-15 16:41:06 [INFO]\t[TRAIN] Epoch=54/100, Step=7/21, loss=15.59005, lr=0.000125, time_each_step=0.74s, eta=0:14:31\n",
      "2021-08-15 16:41:07 [INFO]\t[TRAIN] Epoch=54/100, Step=9/21, loss=16.035856, lr=0.000125, time_each_step=0.72s, eta=0:14:30\n",
      "2021-08-15 16:41:08 [INFO]\t[TRAIN] Epoch=54/100, Step=11/21, loss=16.332703, lr=0.000125, time_each_step=0.73s, eta=0:14:28\n",
      "2021-08-15 16:41:09 [INFO]\t[TRAIN] Epoch=54/100, Step=13/21, loss=19.681204, lr=0.000125, time_each_step=0.73s, eta=0:14:27\n",
      "2021-08-15 16:41:09 [INFO]\t[TRAIN] Epoch=54/100, Step=15/21, loss=16.283979, lr=0.000125, time_each_step=0.73s, eta=0:14:25\n",
      "2021-08-15 16:41:10 [INFO]\t[TRAIN] Epoch=54/100, Step=17/21, loss=19.069988, lr=0.000125, time_each_step=0.74s, eta=0:14:24\n",
      "2021-08-15 16:41:11 [INFO]\t[TRAIN] Epoch=54/100, Step=19/21, loss=15.264097, lr=0.000125, time_each_step=0.74s, eta=0:14:22\n",
      "2021-08-15 16:41:12 [INFO]\t[TRAIN] Epoch=54/100, Step=21/21, loss=17.472586, lr=0.000125, time_each_step=0.5s, eta=0:14:21\n",
      "2021-08-15 16:41:12 [INFO]\t[TRAIN] Epoch 54 finished, loss=16.787664, lr=0.000125 .\n",
      "2021-08-15 16:41:19 [INFO]\t[TRAIN] Epoch=55/100, Step=2/21, loss=17.838842, lr=0.000125, time_each_step=0.78s, eta=0:12:21\n",
      "2021-08-15 16:41:20 [INFO]\t[TRAIN] Epoch=55/100, Step=4/21, loss=15.423531, lr=0.000125, time_each_step=0.8s, eta=0:12:20\n",
      "2021-08-15 16:41:21 [INFO]\t[TRAIN] Epoch=55/100, Step=6/21, loss=12.351338, lr=0.000125, time_each_step=0.79s, eta=0:12:18\n",
      "2021-08-15 16:41:23 [INFO]\t[TRAIN] Epoch=55/100, Step=8/21, loss=19.821354, lr=0.000125, time_each_step=0.8s, eta=0:12:17\n",
      "2021-08-15 16:41:24 [INFO]\t[TRAIN] Epoch=55/100, Step=10/21, loss=15.567663, lr=0.000125, time_each_step=0.82s, eta=0:12:16\n",
      "2021-08-15 16:41:25 [INFO]\t[TRAIN] Epoch=55/100, Step=12/21, loss=13.872447, lr=0.000125, time_each_step=0.82s, eta=0:12:14\n",
      "2021-08-15 16:41:26 [INFO]\t[TRAIN] Epoch=55/100, Step=14/21, loss=17.65633, lr=0.000125, time_each_step=0.85s, eta=0:12:13\n",
      "2021-08-15 16:41:27 [INFO]\t[TRAIN] Epoch=55/100, Step=16/21, loss=18.749802, lr=0.000125, time_each_step=0.86s, eta=0:12:11\n",
      "2021-08-15 16:41:28 [INFO]\t[TRAIN] Epoch=55/100, Step=18/21, loss=13.429358, lr=0.000125, time_each_step=0.87s, eta=0:12:9\n",
      "2021-08-15 16:41:29 [INFO]\t[TRAIN] Epoch=55/100, Step=20/21, loss=11.993823, lr=0.000125, time_each_step=0.85s, eta=0:12:7\n",
      "2021-08-15 16:41:30 [INFO]\t[TRAIN] Epoch 55 finished, loss=16.882387, lr=0.000125 .\n",
      "2021-08-15 16:41:35 [INFO]\t[TRAIN] Epoch=56/100, Step=1/21, loss=16.998903, lr=0.000125, time_each_step=0.81s, eta=0:13:47\n",
      "2021-08-15 16:41:36 [INFO]\t[TRAIN] Epoch=56/100, Step=3/21, loss=15.777981, lr=0.000125, time_each_step=0.78s, eta=0:13:45\n",
      "2021-08-15 16:41:37 [INFO]\t[TRAIN] Epoch=56/100, Step=5/21, loss=19.33709, lr=0.000125, time_each_step=0.8s, eta=0:13:43\n",
      "2021-08-15 16:41:39 [INFO]\t[TRAIN] Epoch=56/100, Step=7/21, loss=16.307438, lr=0.000125, time_each_step=0.79s, eta=0:13:42\n",
      "2021-08-15 16:41:40 [INFO]\t[TRAIN] Epoch=56/100, Step=9/21, loss=12.475285, lr=0.000125, time_each_step=0.78s, eta=0:13:40\n",
      "2021-08-15 16:41:41 [INFO]\t[TRAIN] Epoch=56/100, Step=11/21, loss=16.313917, lr=0.000125, time_each_step=0.79s, eta=0:13:38\n",
      "2021-08-15 16:41:42 [INFO]\t[TRAIN] Epoch=56/100, Step=13/21, loss=17.616278, lr=0.000125, time_each_step=0.78s, eta=0:13:37\n",
      "2021-08-15 16:41:43 [INFO]\t[TRAIN] Epoch=56/100, Step=15/21, loss=17.894423, lr=0.000125, time_each_step=0.77s, eta=0:13:35\n",
      "2021-08-15 16:41:44 [INFO]\t[TRAIN] Epoch=56/100, Step=17/21, loss=17.864416, lr=0.000125, time_each_step=0.76s, eta=0:13:34\n",
      "2021-08-15 16:41:45 [INFO]\t[TRAIN] Epoch=56/100, Step=19/21, loss=14.341816, lr=0.000125, time_each_step=0.78s, eta=0:13:32\n",
      "2021-08-15 16:41:46 [INFO]\t[TRAIN] Epoch=56/100, Step=21/21, loss=16.509899, lr=0.000125, time_each_step=0.53s, eta=0:13:31\n",
      "2021-08-15 16:41:46 [INFO]\t[TRAIN] Epoch 56 finished, loss=16.678183, lr=0.000125 .\n",
      "2021-08-15 16:41:52 [INFO]\t[TRAIN] Epoch=57/100, Step=2/21, loss=15.744483, lr=0.000125, time_each_step=0.79s, eta=0:12:26\n",
      "2021-08-15 16:41:53 [INFO]\t[TRAIN] Epoch=57/100, Step=4/21, loss=17.770142, lr=0.000125, time_each_step=0.78s, eta=0:12:25\n",
      "2021-08-15 16:41:55 [INFO]\t[TRAIN] Epoch=57/100, Step=6/21, loss=16.171673, lr=0.000125, time_each_step=0.8s, eta=0:12:23\n",
      "2021-08-15 16:41:56 [INFO]\t[TRAIN] Epoch=57/100, Step=8/21, loss=18.91049, lr=0.000125, time_each_step=0.78s, eta=0:12:22\n",
      "2021-08-15 16:41:57 [INFO]\t[TRAIN] Epoch=57/100, Step=10/21, loss=17.914322, lr=0.000125, time_each_step=0.8s, eta=0:12:20\n",
      "2021-08-15 16:41:58 [INFO]\t[TRAIN] Epoch=57/100, Step=12/21, loss=18.30645, lr=0.000125, time_each_step=0.81s, eta=0:12:19\n",
      "2021-08-15 16:41:59 [INFO]\t[TRAIN] Epoch=57/100, Step=14/21, loss=15.214895, lr=0.000125, time_each_step=0.82s, eta=0:12:17\n",
      "2021-08-15 16:42:00 [INFO]\t[TRAIN] Epoch=57/100, Step=16/21, loss=19.995697, lr=0.000125, time_each_step=0.82s, eta=0:12:15\n",
      "2021-08-15 16:42:01 [INFO]\t[TRAIN] Epoch=57/100, Step=18/21, loss=17.183729, lr=0.000125, time_each_step=0.8s, eta=0:12:14\n",
      "2021-08-15 16:42:02 [INFO]\t[TRAIN] Epoch=57/100, Step=20/21, loss=20.259386, lr=0.000125, time_each_step=0.81s, eta=0:12:12\n",
      "2021-08-15 16:42:02 [INFO]\t[TRAIN] Epoch 57 finished, loss=17.203348, lr=0.000125 .\n",
      "2021-08-15 16:42:09 [INFO]\t[TRAIN] Epoch=58/100, Step=1/21, loss=18.027864, lr=0.000125, time_each_step=0.87s, eta=0:12:44\n",
      "2021-08-15 16:42:10 [INFO]\t[TRAIN] Epoch=58/100, Step=3/21, loss=15.733771, lr=0.000125, time_each_step=0.86s, eta=0:12:42\n",
      "2021-08-15 16:42:12 [INFO]\t[TRAIN] Epoch=58/100, Step=5/21, loss=15.46863, lr=0.000125, time_each_step=0.86s, eta=0:12:40\n",
      "2021-08-15 16:42:13 [INFO]\t[TRAIN] Epoch=58/100, Step=7/21, loss=13.478394, lr=0.000125, time_each_step=0.84s, eta=0:12:38\n",
      "2021-08-15 16:42:14 [INFO]\t[TRAIN] Epoch=58/100, Step=9/21, loss=17.523249, lr=0.000125, time_each_step=0.84s, eta=0:12:36\n",
      "2021-08-15 16:42:15 [INFO]\t[TRAIN] Epoch=58/100, Step=11/21, loss=16.887157, lr=0.000125, time_each_step=0.83s, eta=0:12:35\n",
      "2021-08-15 16:42:16 [INFO]\t[TRAIN] Epoch=58/100, Step=13/21, loss=12.184109, lr=0.000125, time_each_step=0.84s, eta=0:12:33\n",
      "2021-08-15 16:42:17 [INFO]\t[TRAIN] Epoch=58/100, Step=15/21, loss=19.126936, lr=0.000125, time_each_step=0.86s, eta=0:12:31\n",
      "2021-08-15 16:42:18 [INFO]\t[TRAIN] Epoch=58/100, Step=17/21, loss=17.176291, lr=0.000125, time_each_step=0.88s, eta=0:12:30\n",
      "2021-08-15 16:42:19 [INFO]\t[TRAIN] Epoch=58/100, Step=19/21, loss=11.810245, lr=0.000125, time_each_step=0.88s, eta=0:12:28\n",
      "2021-08-15 16:42:21 [INFO]\t[TRAIN] Epoch=58/100, Step=21/21, loss=14.624831, lr=0.000125, time_each_step=0.57s, eta=0:12:26\n",
      "2021-08-15 16:42:21 [INFO]\t[TRAIN] Epoch 58 finished, loss=15.449544, lr=0.000125 .\n",
      "2021-08-15 16:42:27 [INFO]\t[TRAIN] Epoch=59/100, Step=2/21, loss=15.350189, lr=0.000125, time_each_step=0.85s, eta=0:13:23\n",
      "2021-08-15 16:42:29 [INFO]\t[TRAIN] Epoch=59/100, Step=4/21, loss=12.302112, lr=0.000125, time_each_step=0.85s, eta=0:13:22\n",
      "2021-08-15 16:42:30 [INFO]\t[TRAIN] Epoch=59/100, Step=6/21, loss=17.193197, lr=0.000125, time_each_step=0.86s, eta=0:13:20\n",
      "2021-08-15 16:42:31 [INFO]\t[TRAIN] Epoch=59/100, Step=8/21, loss=15.991117, lr=0.000125, time_each_step=0.86s, eta=0:13:18\n",
      "2021-08-15 16:42:33 [INFO]\t[TRAIN] Epoch=59/100, Step=10/21, loss=20.092903, lr=0.000125, time_each_step=0.9s, eta=0:13:17\n",
      "2021-08-15 16:42:34 [INFO]\t[TRAIN] Epoch=59/100, Step=12/21, loss=12.67592, lr=0.000125, time_each_step=0.87s, eta=0:13:15\n",
      "2021-08-15 16:42:35 [INFO]\t[TRAIN] Epoch=59/100, Step=14/21, loss=19.383417, lr=0.000125, time_each_step=0.86s, eta=0:13:13\n",
      "2021-08-15 16:42:35 [INFO]\t[TRAIN] Epoch=59/100, Step=16/21, loss=18.68862, lr=0.000125, time_each_step=0.85s, eta=0:13:11\n",
      "2021-08-15 16:42:36 [INFO]\t[TRAIN] Epoch=59/100, Step=18/21, loss=21.043238, lr=0.000125, time_each_step=0.84s, eta=0:13:10\n",
      "2021-08-15 16:42:37 [INFO]\t[TRAIN] Epoch=59/100, Step=20/21, loss=17.182083, lr=0.000125, time_each_step=0.84s, eta=0:13:8\n",
      "2021-08-15 16:42:38 [INFO]\t[TRAIN] Epoch 59 finished, loss=16.522537, lr=0.000125 .\n",
      "2021-08-15 16:42:45 [INFO]\t[TRAIN] Epoch=60/100, Step=1/21, loss=15.611202, lr=0.000125, time_each_step=0.89s, eta=0:12:24\n",
      "2021-08-15 16:42:46 [INFO]\t[TRAIN] Epoch=60/100, Step=3/21, loss=18.382256, lr=0.000125, time_each_step=0.88s, eta=0:12:22\n",
      "2021-08-15 16:42:47 [INFO]\t[TRAIN] Epoch=60/100, Step=5/21, loss=13.657255, lr=0.000125, time_each_step=0.88s, eta=0:12:21\n",
      "2021-08-15 16:42:48 [INFO]\t[TRAIN] Epoch=60/100, Step=7/21, loss=17.97632, lr=0.000125, time_each_step=0.86s, eta=0:12:19\n",
      "2021-08-15 16:42:50 [INFO]\t[TRAIN] Epoch=60/100, Step=9/21, loss=13.159039, lr=0.000125, time_each_step=0.87s, eta=0:12:17\n",
      "2021-08-15 16:42:51 [INFO]\t[TRAIN] Epoch=60/100, Step=11/21, loss=20.628019, lr=0.000125, time_each_step=0.89s, eta=0:12:15\n",
      "2021-08-15 16:42:52 [INFO]\t[TRAIN] Epoch=60/100, Step=13/21, loss=15.759879, lr=0.000125, time_each_step=0.9s, eta=0:12:14\n",
      "2021-08-15 16:42:54 [INFO]\t[TRAIN] Epoch=60/100, Step=15/21, loss=15.85696, lr=0.000125, time_each_step=0.93s, eta=0:12:12\n",
      "2021-08-15 16:42:55 [INFO]\t[TRAIN] Epoch=60/100, Step=17/21, loss=17.910944, lr=0.000125, time_each_step=0.92s, eta=0:12:10\n",
      "2021-08-15 16:42:56 [INFO]\t[TRAIN] Epoch=60/100, Step=19/21, loss=18.075832, lr=0.000125, time_each_step=0.92s, eta=0:12:8\n",
      "2021-08-15 16:42:57 [INFO]\t[TRAIN] Epoch=60/100, Step=21/21, loss=13.561256, lr=0.000125, time_each_step=0.6s, eta=0:12:7\n",
      "2021-08-15 16:42:57 [INFO]\t[TRAIN] Epoch 60 finished, loss=16.264709, lr=0.000125 .\n",
      "2021-08-15 16:42:57 [INFO]\tStart to evaluating(total_samples=98, total_steps=7)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 7/7 [00:06<00:00,  1.01it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:43:05 [INFO]\t[EVAL] Finished, Epoch=60, bbox_map=9.765209 .\n",
      "2021-08-15 16:43:08 [INFO]\tModel saved in output/ppyolo/best_model.\n",
      "2021-08-15 16:43:10 [INFO]\tModel saved in output/ppyolo/epoch_60.\n",
      "2021-08-15 16:43:10 [INFO]\tCurrent evaluated best model in eval_dataset is epoch_60, bbox_map=9.765208664457269\n",
      "2021-08-15 16:43:15 [INFO]\t[TRAIN] Epoch=61/100, Step=2/21, loss=15.489842, lr=0.000125, time_each_step=0.81s, eta=0:13:13\n",
      "2021-08-15 16:43:17 [INFO]\t[TRAIN] Epoch=61/100, Step=4/21, loss=17.672678, lr=0.000125, time_each_step=0.82s, eta=0:13:11\n",
      "2021-08-15 16:43:18 [INFO]\t[TRAIN] Epoch=61/100, Step=6/21, loss=16.575106, lr=0.000125, time_each_step=0.85s, eta=0:13:10\n",
      "2021-08-15 16:43:20 [INFO]\t[TRAIN] Epoch=61/100, Step=8/21, loss=17.914013, lr=0.000125, time_each_step=0.85s, eta=0:13:8\n",
      "2021-08-15 16:43:21 [INFO]\t[TRAIN] Epoch=61/100, Step=10/21, loss=17.398294, lr=0.000125, time_each_step=0.84s, eta=0:13:7\n",
      "2021-08-15 16:43:22 [INFO]\t[TRAIN] Epoch=61/100, Step=12/21, loss=17.228054, lr=0.000125, time_each_step=0.83s, eta=0:13:5\n",
      "2021-08-15 16:43:23 [INFO]\t[TRAIN] Epoch=61/100, Step=14/21, loss=16.897362, lr=0.000125, time_each_step=0.81s, eta=0:13:3\n",
      "2021-08-15 16:43:24 [INFO]\t[TRAIN] Epoch=61/100, Step=16/21, loss=17.115793, lr=0.000125, time_each_step=0.81s, eta=0:13:1\n",
      "2021-08-15 16:43:25 [INFO]\t[TRAIN] Epoch=61/100, Step=18/21, loss=18.629023, lr=0.000125, time_each_step=0.83s, eta=0:13:0\n",
      "2021-08-15 16:43:26 [INFO]\t[TRAIN] Epoch=61/100, Step=20/21, loss=17.621614, lr=0.000125, time_each_step=0.82s, eta=0:12:58\n",
      "2021-08-15 16:43:27 [INFO]\t[TRAIN] Epoch 61 finished, loss=16.801056, lr=0.000125 .\n",
      "2021-08-15 16:43:33 [INFO]\t[TRAIN] Epoch=62/100, Step=1/21, loss=17.336567, lr=0.000125, time_each_step=0.88s, eta=0:11:28\n",
      "2021-08-15 16:43:34 [INFO]\t[TRAIN] Epoch=62/100, Step=3/21, loss=14.922677, lr=0.000125, time_each_step=0.89s, eta=0:11:26\n",
      "2021-08-15 16:43:36 [INFO]\t[TRAIN] Epoch=62/100, Step=5/21, loss=13.971169, lr=0.000125, time_each_step=0.88s, eta=0:11:24\n",
      "2021-08-15 16:43:37 [INFO]\t[TRAIN] Epoch=62/100, Step=7/21, loss=12.4901, lr=0.000125, time_each_step=0.86s, eta=0:11:22\n",
      "2021-08-15 16:43:38 [INFO]\t[TRAIN] Epoch=62/100, Step=9/21, loss=16.028044, lr=0.000125, time_each_step=0.84s, eta=0:11:20\n",
      "2021-08-15 16:43:39 [INFO]\t[TRAIN] Epoch=62/100, Step=11/21, loss=18.426615, lr=0.000125, time_each_step=0.86s, eta=0:11:19\n",
      "2021-08-15 16:43:40 [INFO]\t[TRAIN] Epoch=62/100, Step=13/21, loss=19.136919, lr=0.000125, time_each_step=0.88s, eta=0:11:17\n",
      "2021-08-15 16:43:42 [INFO]\t[TRAIN] Epoch=62/100, Step=15/21, loss=17.509996, lr=0.000125, time_each_step=0.88s, eta=0:11:15\n",
      "2021-08-15 16:43:43 [INFO]\t[TRAIN] Epoch=62/100, Step=17/21, loss=15.343774, lr=0.000125, time_each_step=0.87s, eta=0:11:14\n",
      "2021-08-15 16:43:43 [INFO]\t[TRAIN] Epoch=62/100, Step=19/21, loss=19.096418, lr=0.000125, time_each_step=0.85s, eta=0:11:12\n",
      "2021-08-15 16:43:44 [INFO]\t[TRAIN] Epoch=62/100, Step=21/21, loss=15.009101, lr=0.000125, time_each_step=0.54s, eta=0:11:10\n",
      "2021-08-15 16:43:44 [INFO]\t[TRAIN] Epoch 62 finished, loss=17.072586, lr=0.000125 .\n",
      "2021-08-15 16:43:52 [INFO]\t[TRAIN] Epoch=63/100, Step=2/21, loss=16.203833, lr=0.000125, time_each_step=0.88s, eta=0:11:14\n",
      "2021-08-15 16:43:53 [INFO]\t[TRAIN] Epoch=63/100, Step=4/21, loss=16.733263, lr=0.000125, time_each_step=0.86s, eta=0:11:12\n",
      "2021-08-15 16:43:55 [INFO]\t[TRAIN] Epoch=63/100, Step=6/21, loss=23.036829, lr=0.000125, time_each_step=0.87s, eta=0:11:11\n",
      "2021-08-15 16:43:56 [INFO]\t[TRAIN] Epoch=63/100, Step=8/21, loss=15.108212, lr=0.000125, time_each_step=0.88s, eta=0:11:9\n",
      "2021-08-15 16:43:57 [INFO]\t[TRAIN] Epoch=63/100, Step=10/21, loss=13.070391, lr=0.000125, time_each_step=0.89s, eta=0:11:7\n",
      "2021-08-15 16:43:58 [INFO]\t[TRAIN] Epoch=63/100, Step=12/21, loss=17.241024, lr=0.000125, time_each_step=0.88s, eta=0:11:5\n",
      "2021-08-15 16:43:59 [INFO]\t[TRAIN] Epoch=63/100, Step=14/21, loss=13.295424, lr=0.000125, time_each_step=0.86s, eta=0:11:3\n",
      "2021-08-15 16:44:00 [INFO]\t[TRAIN] Epoch=63/100, Step=16/21, loss=14.282919, lr=0.000125, time_each_step=0.86s, eta=0:11:2\n",
      "2021-08-15 16:44:00 [INFO]\t[TRAIN] Epoch=63/100, Step=18/21, loss=15.808778, lr=0.000125, time_each_step=0.86s, eta=0:11:0\n",
      "2021-08-15 16:44:01 [INFO]\t[TRAIN] Epoch=63/100, Step=20/21, loss=22.281986, lr=0.000125, time_each_step=0.86s, eta=0:10:58\n",
      "2021-08-15 16:44:01 [INFO]\t[TRAIN] Epoch 63 finished, loss=17.065292, lr=0.000125 .\n",
      "2021-08-15 16:44:07 [INFO]\t[TRAIN] Epoch=64/100, Step=1/21, loss=19.054724, lr=0.000125, time_each_step=0.73s, eta=0:11:9\n",
      "2021-08-15 16:44:08 [INFO]\t[TRAIN] Epoch=64/100, Step=3/21, loss=16.614325, lr=0.000125, time_each_step=0.74s, eta=0:11:8\n",
      "2021-08-15 16:44:09 [INFO]\t[TRAIN] Epoch=64/100, Step=5/21, loss=17.570454, lr=0.000125, time_each_step=0.71s, eta=0:11:6\n",
      "2021-08-15 16:44:10 [INFO]\t[TRAIN] Epoch=64/100, Step=7/21, loss=15.953093, lr=0.000125, time_each_step=0.71s, eta=0:11:4\n",
      "2021-08-15 16:44:11 [INFO]\t[TRAIN] Epoch=64/100, Step=9/21, loss=16.021339, lr=0.000125, time_each_step=0.71s, eta=0:11:3\n",
      "2021-08-15 16:44:12 [INFO]\t[TRAIN] Epoch=64/100, Step=11/21, loss=15.692917, lr=0.000125, time_each_step=0.71s, eta=0:11:1\n",
      "2021-08-15 16:44:13 [INFO]\t[TRAIN] Epoch=64/100, Step=13/21, loss=14.852508, lr=0.000125, time_each_step=0.73s, eta=0:11:0\n",
      "2021-08-15 16:44:15 [INFO]\t[TRAIN] Epoch=64/100, Step=15/21, loss=19.626451, lr=0.000125, time_each_step=0.74s, eta=0:10:59\n",
      "2021-08-15 16:44:16 [INFO]\t[TRAIN] Epoch=64/100, Step=17/21, loss=14.782886, lr=0.000125, time_each_step=0.75s, eta=0:10:57\n",
      "2021-08-15 16:44:17 [INFO]\t[TRAIN] Epoch=64/100, Step=19/21, loss=14.866962, lr=0.000125, time_each_step=0.77s, eta=0:10:56\n",
      "2021-08-15 16:44:18 [INFO]\t[TRAIN] Epoch=64/100, Step=21/21, loss=16.608988, lr=0.000125, time_each_step=0.54s, eta=0:10:54\n",
      "2021-08-15 16:44:18 [INFO]\t[TRAIN] Epoch 64 finished, loss=16.430252, lr=0.000125 .\n",
      "2021-08-15 16:44:26 [INFO]\t[TRAIN] Epoch=65/100, Step=2/21, loss=15.034423, lr=0.000125, time_each_step=0.88s, eta=0:10:7\n",
      "2021-08-15 16:44:27 [INFO]\t[TRAIN] Epoch=65/100, Step=4/21, loss=16.621265, lr=0.000125, time_each_step=0.91s, eta=0:10:5\n",
      "2021-08-15 16:44:28 [INFO]\t[TRAIN] Epoch=65/100, Step=6/21, loss=17.07859, lr=0.000125, time_each_step=0.92s, eta=0:10:4\n",
      "2021-08-15 16:44:30 [INFO]\t[TRAIN] Epoch=65/100, Step=8/21, loss=21.051201, lr=0.000125, time_each_step=0.92s, eta=0:10:2\n",
      "2021-08-15 16:44:31 [INFO]\t[TRAIN] Epoch=65/100, Step=10/21, loss=13.784368, lr=0.000125, time_each_step=0.92s, eta=0:10:0\n",
      "2021-08-15 16:44:32 [INFO]\t[TRAIN] Epoch=65/100, Step=12/21, loss=15.833472, lr=0.000125, time_each_step=0.91s, eta=0:9:58\n",
      "2021-08-15 16:44:32 [INFO]\t[TRAIN] Epoch=65/100, Step=14/21, loss=19.202364, lr=0.000125, time_each_step=0.89s, eta=0:9:56\n",
      "2021-08-15 16:44:33 [INFO]\t[TRAIN] Epoch=65/100, Step=16/21, loss=13.153398, lr=0.000125, time_each_step=0.89s, eta=0:9:54\n",
      "2021-08-15 16:44:35 [INFO]\t[TRAIN] Epoch=65/100, Step=18/21, loss=19.803335, lr=0.000125, time_each_step=0.89s, eta=0:9:53\n",
      "2021-08-15 16:44:36 [INFO]\t[TRAIN] Epoch=65/100, Step=20/21, loss=15.263751, lr=0.000125, time_each_step=0.9s, eta=0:9:51\n",
      "2021-08-15 16:44:36 [INFO]\t[TRAIN] Epoch 65 finished, loss=16.319624, lr=0.000125 .\n",
      "2021-08-15 16:44:41 [INFO]\t[TRAIN] Epoch=66/100, Step=1/21, loss=12.301632, lr=0.000125, time_each_step=0.78s, eta=0:11:3\n",
      "2021-08-15 16:44:42 [INFO]\t[TRAIN] Epoch=66/100, Step=3/21, loss=15.179954, lr=0.000125, time_each_step=0.78s, eta=0:11:2\n",
      "2021-08-15 16:44:44 [INFO]\t[TRAIN] Epoch=66/100, Step=5/21, loss=17.32094, lr=0.000125, time_each_step=0.77s, eta=0:11:0\n",
      "2021-08-15 16:44:45 [INFO]\t[TRAIN] Epoch=66/100, Step=7/21, loss=17.563396, lr=0.000125, time_each_step=0.75s, eta=0:10:58\n",
      "2021-08-15 16:44:46 [INFO]\t[TRAIN] Epoch=66/100, Step=9/21, loss=21.322245, lr=0.000125, time_each_step=0.75s, eta=0:10:57\n",
      "2021-08-15 16:44:46 [INFO]\t[TRAIN] Epoch=66/100, Step=11/21, loss=17.204954, lr=0.000125, time_each_step=0.74s, eta=0:10:55\n",
      "2021-08-15 16:44:48 [INFO]\t[TRAIN] Epoch=66/100, Step=13/21, loss=16.714373, lr=0.000125, time_each_step=0.77s, eta=0:10:54\n",
      "2021-08-15 16:44:49 [INFO]\t[TRAIN] Epoch=66/100, Step=15/21, loss=15.463237, lr=0.000125, time_each_step=0.78s, eta=0:10:52\n",
      "2021-08-15 16:44:50 [INFO]\t[TRAIN] Epoch=66/100, Step=17/21, loss=10.881476, lr=0.000125, time_each_step=0.77s, eta=0:10:51\n",
      "2021-08-15 16:44:51 [INFO]\t[TRAIN] Epoch=66/100, Step=19/21, loss=12.054314, lr=0.000125, time_each_step=0.76s, eta=0:10:49\n",
      "2021-08-15 16:44:52 [INFO]\t[TRAIN] Epoch=66/100, Step=21/21, loss=16.317804, lr=0.000125, time_each_step=0.54s, eta=0:10:48\n",
      "2021-08-15 16:44:52 [INFO]\t[TRAIN] Epoch 66 finished, loss=16.061035, lr=0.000125 .\n",
      "2021-08-15 16:44:59 [INFO]\t[TRAIN] Epoch=67/100, Step=2/21, loss=16.367668, lr=0.000125, time_each_step=0.82s, eta=0:9:33\n",
      "2021-08-15 16:45:01 [INFO]\t[TRAIN] Epoch=67/100, Step=4/21, loss=15.860239, lr=0.000125, time_each_step=0.85s, eta=0:9:32\n",
      "2021-08-15 16:45:02 [INFO]\t[TRAIN] Epoch=67/100, Step=6/21, loss=13.792436, lr=0.000125, time_each_step=0.86s, eta=0:9:31\n",
      "2021-08-15 16:45:03 [INFO]\t[TRAIN] Epoch=67/100, Step=8/21, loss=14.753632, lr=0.000125, time_each_step=0.87s, eta=0:9:29\n",
      "2021-08-15 16:45:04 [INFO]\t[TRAIN] Epoch=67/100, Step=10/21, loss=16.780949, lr=0.000125, time_each_step=0.89s, eta=0:9:28\n",
      "2021-08-15 16:45:06 [INFO]\t[TRAIN] Epoch=67/100, Step=12/21, loss=16.599552, lr=0.000125, time_each_step=0.9s, eta=0:9:26\n",
      "2021-08-15 16:45:07 [INFO]\t[TRAIN] Epoch=67/100, Step=14/21, loss=13.277053, lr=0.000125, time_each_step=0.88s, eta=0:9:24\n",
      "2021-08-15 16:45:08 [INFO]\t[TRAIN] Epoch=67/100, Step=16/21, loss=14.732591, lr=0.000125, time_each_step=0.89s, eta=0:9:22\n",
      "2021-08-15 16:45:08 [INFO]\t[TRAIN] Epoch=67/100, Step=18/21, loss=14.776111, lr=0.000125, time_each_step=0.89s, eta=0:9:20\n",
      "2021-08-15 16:45:09 [INFO]\t[TRAIN] Epoch=67/100, Step=20/21, loss=15.145164, lr=0.000125, time_each_step=0.87s, eta=0:9:19\n",
      "2021-08-15 16:45:10 [INFO]\t[TRAIN] Epoch 67 finished, loss=15.74474, lr=0.000125 .\n",
      "2021-08-15 16:45:15 [INFO]\t[TRAIN] Epoch=68/100, Step=1/21, loss=16.561541, lr=0.000125, time_each_step=0.82s, eta=0:10:11\n",
      "2021-08-15 16:45:17 [INFO]\t[TRAIN] Epoch=68/100, Step=3/21, loss=12.935951, lr=0.000125, time_each_step=0.79s, eta=0:10:9\n",
      "2021-08-15 16:45:18 [INFO]\t[TRAIN] Epoch=68/100, Step=5/21, loss=12.871989, lr=0.000125, time_each_step=0.8s, eta=0:10:7\n",
      "2021-08-15 16:45:19 [INFO]\t[TRAIN] Epoch=68/100, Step=7/21, loss=16.331144, lr=0.000125, time_each_step=0.81s, eta=0:10:6\n",
      "2021-08-15 16:45:20 [INFO]\t[TRAIN] Epoch=68/100, Step=9/21, loss=15.833948, lr=0.000125, time_each_step=0.81s, eta=0:10:4\n",
      "2021-08-15 16:45:22 [INFO]\t[TRAIN] Epoch=68/100, Step=11/21, loss=14.104404, lr=0.000125, time_each_step=0.8s, eta=0:10:2\n",
      "2021-08-15 16:45:22 [INFO]\t[TRAIN] Epoch=68/100, Step=13/21, loss=13.258681, lr=0.000125, time_each_step=0.8s, eta=0:10:1\n",
      "2021-08-15 16:45:24 [INFO]\t[TRAIN] Epoch=68/100, Step=15/21, loss=19.521523, lr=0.000125, time_each_step=0.81s, eta=0:9:59\n",
      "2021-08-15 16:45:24 [INFO]\t[TRAIN] Epoch=68/100, Step=17/21, loss=16.547466, lr=0.000125, time_each_step=0.79s, eta=0:9:58\n",
      "2021-08-15 16:45:25 [INFO]\t[TRAIN] Epoch=68/100, Step=19/21, loss=17.23225, lr=0.000125, time_each_step=0.78s, eta=0:9:56\n",
      "2021-08-15 16:45:26 [INFO]\t[TRAIN] Epoch=68/100, Step=21/21, loss=17.578178, lr=0.000125, time_each_step=0.53s, eta=0:9:54\n",
      "2021-08-15 16:45:26 [INFO]\t[TRAIN] Epoch 68 finished, loss=15.704741, lr=0.000125 .\n",
      "2021-08-15 16:45:34 [INFO]\t[TRAIN] Epoch=69/100, Step=2/21, loss=13.621265, lr=0.000125, time_each_step=0.9s, eta=0:9:6\n",
      "2021-08-15 16:45:36 [INFO]\t[TRAIN] Epoch=69/100, Step=4/21, loss=12.573802, lr=0.000125, time_each_step=0.9s, eta=0:9:4\n",
      "2021-08-15 16:45:37 [INFO]\t[TRAIN] Epoch=69/100, Step=6/21, loss=14.782795, lr=0.000125, time_each_step=0.87s, eta=0:9:2\n",
      "2021-08-15 16:45:38 [INFO]\t[TRAIN] Epoch=69/100, Step=8/21, loss=18.340801, lr=0.000125, time_each_step=0.87s, eta=0:9:0\n",
      "2021-08-15 16:45:39 [INFO]\t[TRAIN] Epoch=69/100, Step=10/21, loss=16.431515, lr=0.000125, time_each_step=0.88s, eta=0:8:58\n",
      "2021-08-15 16:45:40 [INFO]\t[TRAIN] Epoch=69/100, Step=12/21, loss=14.415679, lr=0.000125, time_each_step=0.89s, eta=0:8:57\n",
      "2021-08-15 16:45:41 [INFO]\t[TRAIN] Epoch=69/100, Step=14/21, loss=16.266962, lr=0.000125, time_each_step=0.89s, eta=0:8:55\n",
      "2021-08-15 16:45:42 [INFO]\t[TRAIN] Epoch=69/100, Step=16/21, loss=19.247177, lr=0.000125, time_each_step=0.89s, eta=0:8:53\n",
      "2021-08-15 16:45:43 [INFO]\t[TRAIN] Epoch=69/100, Step=18/21, loss=20.036619, lr=0.000125, time_each_step=0.92s, eta=0:8:51\n",
      "2021-08-15 16:45:44 [INFO]\t[TRAIN] Epoch=69/100, Step=20/21, loss=13.451722, lr=0.000125, time_each_step=0.92s, eta=0:8:50\n",
      "2021-08-15 16:45:45 [INFO]\t[TRAIN] Epoch 69 finished, loss=15.811413, lr=0.000125 .\n",
      "2021-08-15 16:45:52 [INFO]\t[TRAIN] Epoch=70/100, Step=1/21, loss=12.025859, lr=0.000125, time_each_step=0.86s, eta=0:10:5\n",
      "2021-08-15 16:45:53 [INFO]\t[TRAIN] Epoch=70/100, Step=3/21, loss=13.558553, lr=0.000125, time_each_step=0.85s, eta=0:10:3\n",
      "2021-08-15 16:45:54 [INFO]\t[TRAIN] Epoch=70/100, Step=5/21, loss=16.415197, lr=0.000125, time_each_step=0.87s, eta=0:10:2\n",
      "2021-08-15 16:45:55 [INFO]\t[TRAIN] Epoch=70/100, Step=7/21, loss=15.249794, lr=0.000125, time_each_step=0.86s, eta=0:10:0\n",
      "2021-08-15 16:45:56 [INFO]\t[TRAIN] Epoch=70/100, Step=9/21, loss=12.641318, lr=0.000125, time_each_step=0.85s, eta=0:9:58\n",
      "2021-08-15 16:45:57 [INFO]\t[TRAIN] Epoch=70/100, Step=11/21, loss=12.145496, lr=0.000125, time_each_step=0.86s, eta=0:9:57\n",
      "2021-08-15 16:45:58 [INFO]\t[TRAIN] Epoch=70/100, Step=13/21, loss=15.973926, lr=0.000125, time_each_step=0.85s, eta=0:9:55\n",
      "2021-08-15 16:45:59 [INFO]\t[TRAIN] Epoch=70/100, Step=15/21, loss=14.075786, lr=0.000125, time_each_step=0.84s, eta=0:9:53\n",
      "2021-08-15 16:46:00 [INFO]\t[TRAIN] Epoch=70/100, Step=17/21, loss=21.993515, lr=0.000125, time_each_step=0.81s, eta=0:9:51\n",
      "2021-08-15 16:46:01 [INFO]\t[TRAIN] Epoch=70/100, Step=19/21, loss=12.500798, lr=0.000125, time_each_step=0.8s, eta=0:9:50\n",
      "2021-08-15 16:46:01 [INFO]\t[TRAIN] Epoch=70/100, Step=21/21, loss=15.711709, lr=0.000125, time_each_step=0.47s, eta=0:9:48\n",
      "2021-08-15 16:46:01 [INFO]\t[TRAIN] Epoch 70 finished, loss=15.649841, lr=0.000125 .\n",
      "2021-08-15 16:46:08 [INFO]\t[TRAIN] Epoch=71/100, Step=2/21, loss=14.98707, lr=1.2e-05, time_each_step=0.75s, eta=0:8:36\n",
      "2021-08-15 16:46:09 [INFO]\t[TRAIN] Epoch=71/100, Step=4/21, loss=15.526984, lr=1.2e-05, time_each_step=0.74s, eta=0:8:34\n",
      "2021-08-15 16:46:10 [INFO]\t[TRAIN] Epoch=71/100, Step=6/21, loss=14.116061, lr=1.2e-05, time_each_step=0.75s, eta=0:8:33\n",
      "2021-08-15 16:46:11 [INFO]\t[TRAIN] Epoch=71/100, Step=8/21, loss=12.555934, lr=1.2e-05, time_each_step=0.76s, eta=0:8:32\n",
      "2021-08-15 16:46:13 [INFO]\t[TRAIN] Epoch=71/100, Step=10/21, loss=19.458828, lr=1.2e-05, time_each_step=0.76s, eta=0:8:30\n",
      "2021-08-15 16:46:13 [INFO]\t[TRAIN] Epoch=71/100, Step=12/21, loss=11.108877, lr=1.2e-05, time_each_step=0.75s, eta=0:8:28\n",
      "2021-08-15 16:46:14 [INFO]\t[TRAIN] Epoch=71/100, Step=14/21, loss=11.92977, lr=1.2e-05, time_each_step=0.76s, eta=0:8:27\n",
      "2021-08-15 16:46:15 [INFO]\t[TRAIN] Epoch=71/100, Step=16/21, loss=14.104933, lr=1.2e-05, time_each_step=0.79s, eta=0:8:26\n",
      "2021-08-15 16:46:16 [INFO]\t[TRAIN] Epoch=71/100, Step=18/21, loss=13.997355, lr=1.2e-05, time_each_step=0.79s, eta=0:8:24\n",
      "2021-08-15 16:46:17 [INFO]\t[TRAIN] Epoch=71/100, Step=20/21, loss=16.462675, lr=1.2e-05, time_each_step=0.8s, eta=0:8:23\n",
      "2021-08-15 16:46:18 [INFO]\t[TRAIN] Epoch 71 finished, loss=14.991508, lr=1.2e-05 .\n",
      "2021-08-15 16:46:23 [INFO]\t[TRAIN] Epoch=72/100, Step=1/21, loss=13.814419, lr=1.2e-05, time_each_step=0.77s, eta=0:8:26\n",
      "2021-08-15 16:46:24 [INFO]\t[TRAIN] Epoch=72/100, Step=3/21, loss=13.907033, lr=1.2e-05, time_each_step=0.76s, eta=0:8:24\n",
      "2021-08-15 16:46:26 [INFO]\t[TRAIN] Epoch=72/100, Step=5/21, loss=16.737272, lr=1.2e-05, time_each_step=0.77s, eta=0:8:22\n",
      "2021-08-15 16:46:27 [INFO]\t[TRAIN] Epoch=72/100, Step=7/21, loss=19.859533, lr=1.2e-05, time_each_step=0.78s, eta=0:8:21\n",
      "2021-08-15 16:46:28 [INFO]\t[TRAIN] Epoch=72/100, Step=9/21, loss=13.272065, lr=1.2e-05, time_each_step=0.77s, eta=0:8:19\n",
      "2021-08-15 16:46:29 [INFO]\t[TRAIN] Epoch=72/100, Step=11/21, loss=17.841461, lr=1.2e-05, time_each_step=0.79s, eta=0:8:18\n",
      "2021-08-15 16:46:30 [INFO]\t[TRAIN] Epoch=72/100, Step=13/21, loss=15.157582, lr=1.2e-05, time_each_step=0.79s, eta=0:8:16\n",
      "2021-08-15 16:46:31 [INFO]\t[TRAIN] Epoch=72/100, Step=15/21, loss=12.969578, lr=1.2e-05, time_each_step=0.77s, eta=0:8:15\n",
      "2021-08-15 16:46:32 [INFO]\t[TRAIN] Epoch=72/100, Step=17/21, loss=14.183747, lr=1.2e-05, time_each_step=0.77s, eta=0:8:13\n",
      "2021-08-15 16:46:33 [INFO]\t[TRAIN] Epoch=72/100, Step=19/21, loss=26.691961, lr=1.2e-05, time_each_step=0.79s, eta=0:8:12\n",
      "2021-08-15 16:46:34 [INFO]\t[TRAIN] Epoch=72/100, Step=21/21, loss=10.233542, lr=1.2e-05, time_each_step=0.54s, eta=0:8:10\n",
      "2021-08-15 16:46:34 [INFO]\t[TRAIN] Epoch 72 finished, loss=16.40169, lr=1.2e-05 .\n",
      "2021-08-15 16:46:39 [INFO]\t[TRAIN] Epoch=73/100, Step=2/21, loss=16.681173, lr=1.2e-05, time_each_step=0.73s, eta=0:7:53\n",
      "2021-08-15 16:46:40 [INFO]\t[TRAIN] Epoch=73/100, Step=4/21, loss=13.182304, lr=1.2e-05, time_each_step=0.72s, eta=0:7:52\n",
      "2021-08-15 16:46:41 [INFO]\t[TRAIN] Epoch=73/100, Step=6/21, loss=13.455885, lr=1.2e-05, time_each_step=0.69s, eta=0:7:50\n",
      "2021-08-15 16:46:42 [INFO]\t[TRAIN] Epoch=73/100, Step=8/21, loss=13.995092, lr=1.2e-05, time_each_step=0.71s, eta=0:7:49\n",
      "2021-08-15 16:46:43 [INFO]\t[TRAIN] Epoch=73/100, Step=10/21, loss=11.039866, lr=1.2e-05, time_each_step=0.7s, eta=0:7:47\n",
      "2021-08-15 16:46:45 [INFO]\t[TRAIN] Epoch=73/100, Step=12/21, loss=17.596838, lr=1.2e-05, time_each_step=0.74s, eta=0:7:46\n",
      "2021-08-15 16:46:46 [INFO]\t[TRAIN] Epoch=73/100, Step=14/21, loss=14.957059, lr=1.2e-05, time_each_step=0.74s, eta=0:7:45\n",
      "2021-08-15 16:46:47 [INFO]\t[TRAIN] Epoch=73/100, Step=16/21, loss=13.741162, lr=1.2e-05, time_each_step=0.73s, eta=0:7:43\n",
      "2021-08-15 16:46:47 [INFO]\t[TRAIN] Epoch=73/100, Step=18/21, loss=18.648027, lr=1.2e-05, time_each_step=0.71s, eta=0:7:42\n",
      "2021-08-15 16:46:48 [INFO]\t[TRAIN] Epoch=73/100, Step=20/21, loss=13.800512, lr=1.2e-05, time_each_step=0.72s, eta=0:7:40\n",
      "2021-08-15 16:46:49 [INFO]\t[TRAIN] Epoch 73 finished, loss=15.268396, lr=1.2e-05 .\n",
      "2021-08-15 16:46:55 [INFO]\t[TRAIN] Epoch=74/100, Step=1/21, loss=13.968501, lr=1.2e-05, time_each_step=0.79s, eta=0:7:8\n",
      "2021-08-15 16:46:56 [INFO]\t[TRAIN] Epoch=74/100, Step=3/21, loss=12.175813, lr=1.2e-05, time_each_step=0.81s, eta=0:7:7\n",
      "2021-08-15 16:46:58 [INFO]\t[TRAIN] Epoch=74/100, Step=5/21, loss=13.280112, lr=1.2e-05, time_each_step=0.84s, eta=0:7:6\n",
      "2021-08-15 16:46:59 [INFO]\t[TRAIN] Epoch=74/100, Step=7/21, loss=10.674688, lr=1.2e-05, time_each_step=0.82s, eta=0:7:4\n",
      "2021-08-15 16:47:00 [INFO]\t[TRAIN] Epoch=74/100, Step=9/21, loss=16.791674, lr=1.2e-05, time_each_step=0.84s, eta=0:7:2\n",
      "2021-08-15 16:47:01 [INFO]\t[TRAIN] Epoch=74/100, Step=11/21, loss=15.844479, lr=1.2e-05, time_each_step=0.82s, eta=0:7:0\n",
      "2021-08-15 16:47:02 [INFO]\t[TRAIN] Epoch=74/100, Step=13/21, loss=13.695168, lr=1.2e-05, time_each_step=0.8s, eta=0:6:59\n",
      "2021-08-15 16:47:03 [INFO]\t[TRAIN] Epoch=74/100, Step=15/21, loss=12.592607, lr=1.2e-05, time_each_step=0.81s, eta=0:6:57\n",
      "2021-08-15 16:47:04 [INFO]\t[TRAIN] Epoch=74/100, Step=17/21, loss=19.157749, lr=1.2e-05, time_each_step=0.84s, eta=0:6:56\n",
      "2021-08-15 16:47:05 [INFO]\t[TRAIN] Epoch=74/100, Step=19/21, loss=16.505018, lr=1.2e-05, time_each_step=0.85s, eta=0:6:54\n",
      "2021-08-15 16:47:06 [INFO]\t[TRAIN] Epoch=74/100, Step=21/21, loss=15.851419, lr=1.2e-05, time_each_step=0.58s, eta=0:6:52\n",
      "2021-08-15 16:47:06 [INFO]\t[TRAIN] Epoch 74 finished, loss=15.007815, lr=1.2e-05 .\n",
      "2021-08-15 16:47:13 [INFO]\t[TRAIN] Epoch=75/100, Step=2/21, loss=14.691876, lr=1.2e-05, time_each_step=0.84s, eta=0:7:58\n",
      "2021-08-15 16:47:14 [INFO]\t[TRAIN] Epoch=75/100, Step=4/21, loss=10.128802, lr=1.2e-05, time_each_step=0.82s, eta=0:7:56\n",
      "2021-08-15 16:47:15 [INFO]\t[TRAIN] Epoch=75/100, Step=6/21, loss=14.092618, lr=1.2e-05, time_each_step=0.82s, eta=0:7:55\n",
      "2021-08-15 16:47:17 [INFO]\t[TRAIN] Epoch=75/100, Step=8/21, loss=21.087091, lr=1.2e-05, time_each_step=0.83s, eta=0:7:53\n",
      "2021-08-15 16:47:17 [INFO]\t[TRAIN] Epoch=75/100, Step=10/21, loss=17.431105, lr=1.2e-05, time_each_step=0.82s, eta=0:7:51\n",
      "2021-08-15 16:47:19 [INFO]\t[TRAIN] Epoch=75/100, Step=12/21, loss=12.699485, lr=1.2e-05, time_each_step=0.84s, eta=0:7:50\n",
      "2021-08-15 16:47:19 [INFO]\t[TRAIN] Epoch=75/100, Step=14/21, loss=14.239725, lr=1.2e-05, time_each_step=0.82s, eta=0:7:48\n",
      "2021-08-15 16:47:20 [INFO]\t[TRAIN] Epoch=75/100, Step=16/21, loss=11.33231, lr=1.2e-05, time_each_step=0.8s, eta=0:7:46\n",
      "2021-08-15 16:47:21 [INFO]\t[TRAIN] Epoch=75/100, Step=18/21, loss=20.965807, lr=1.2e-05, time_each_step=0.79s, eta=0:7:45\n",
      "2021-08-15 16:47:22 [INFO]\t[TRAIN] Epoch=75/100, Step=20/21, loss=15.230839, lr=1.2e-05, time_each_step=0.79s, eta=0:7:43\n",
      "2021-08-15 16:47:23 [INFO]\t[TRAIN] Epoch 75 finished, loss=15.446279, lr=1.2e-05 .\n",
      "2021-08-15 16:47:30 [INFO]\t[TRAIN] Epoch=76/100, Step=1/21, loss=12.78542, lr=1.2e-05, time_each_step=0.87s, eta=0:7:15\n",
      "2021-08-15 16:47:32 [INFO]\t[TRAIN] Epoch=76/100, Step=3/21, loss=15.550118, lr=1.2e-05, time_each_step=0.89s, eta=0:7:13\n",
      "2021-08-15 16:47:33 [INFO]\t[TRAIN] Epoch=76/100, Step=5/21, loss=14.652157, lr=1.2e-05, time_each_step=0.92s, eta=0:7:12\n",
      "2021-08-15 16:47:35 [INFO]\t[TRAIN] Epoch=76/100, Step=7/21, loss=13.258658, lr=1.2e-05, time_each_step=0.9s, eta=0:7:10\n",
      "2021-08-15 16:47:36 [INFO]\t[TRAIN] Epoch=76/100, Step=9/21, loss=13.61452, lr=1.2e-05, time_each_step=0.91s, eta=0:7:8\n",
      "2021-08-15 16:47:37 [INFO]\t[TRAIN] Epoch=76/100, Step=11/21, loss=15.0512, lr=1.2e-05, time_each_step=0.9s, eta=0:7:6\n",
      "2021-08-15 16:47:38 [INFO]\t[TRAIN] Epoch=76/100, Step=13/21, loss=18.216288, lr=1.2e-05, time_each_step=0.91s, eta=0:7:4\n",
      "2021-08-15 16:47:38 [INFO]\t[TRAIN] Epoch=76/100, Step=15/21, loss=12.958809, lr=1.2e-05, time_each_step=0.92s, eta=0:7:3\n",
      "2021-08-15 16:47:39 [INFO]\t[TRAIN] Epoch=76/100, Step=17/21, loss=16.23505, lr=1.2e-05, time_each_step=0.92s, eta=0:7:1\n",
      "2021-08-15 16:47:40 [INFO]\t[TRAIN] Epoch=76/100, Step=19/21, loss=13.878945, lr=1.2e-05, time_each_step=0.91s, eta=0:6:59\n",
      "2021-08-15 16:47:41 [INFO]\t[TRAIN] Epoch=76/100, Step=21/21, loss=18.337152, lr=1.2e-05, time_each_step=0.53s, eta=0:6:57\n",
      "2021-08-15 16:47:41 [INFO]\t[TRAIN] Epoch 76 finished, loss=14.421856, lr=1.2e-05 .\n",
      "2021-08-15 16:47:48 [INFO]\t[TRAIN] Epoch=77/100, Step=2/21, loss=14.402389, lr=1.2e-05, time_each_step=0.81s, eta=0:7:42\n",
      "2021-08-15 16:47:49 [INFO]\t[TRAIN] Epoch=77/100, Step=4/21, loss=12.585873, lr=1.2e-05, time_each_step=0.78s, eta=0:7:40\n",
      "2021-08-15 16:47:50 [INFO]\t[TRAIN] Epoch=77/100, Step=6/21, loss=17.450569, lr=1.2e-05, time_each_step=0.79s, eta=0:7:39\n",
      "2021-08-15 16:47:52 [INFO]\t[TRAIN] Epoch=77/100, Step=8/21, loss=16.619545, lr=1.2e-05, time_each_step=0.8s, eta=0:7:37\n",
      "2021-08-15 16:47:53 [INFO]\t[TRAIN] Epoch=77/100, Step=10/21, loss=16.201176, lr=1.2e-05, time_each_step=0.79s, eta=0:7:36\n",
      "2021-08-15 16:47:53 [INFO]\t[TRAIN] Epoch=77/100, Step=12/21, loss=13.310452, lr=1.2e-05, time_each_step=0.78s, eta=0:7:34\n",
      "2021-08-15 16:47:54 [INFO]\t[TRAIN] Epoch=77/100, Step=14/21, loss=14.160137, lr=1.2e-05, time_each_step=0.77s, eta=0:7:32\n",
      "2021-08-15 16:47:55 [INFO]\t[TRAIN] Epoch=77/100, Step=16/21, loss=16.277714, lr=1.2e-05, time_each_step=0.79s, eta=0:7:31\n",
      "2021-08-15 16:47:56 [INFO]\t[TRAIN] Epoch=77/100, Step=18/21, loss=15.284252, lr=1.2e-05, time_each_step=0.81s, eta=0:7:30\n",
      "2021-08-15 16:47:57 [INFO]\t[TRAIN] Epoch=77/100, Step=20/21, loss=14.630195, lr=1.2e-05, time_each_step=0.8s, eta=0:7:28\n",
      "2021-08-15 16:47:57 [INFO]\t[TRAIN] Epoch 77 finished, loss=15.253312, lr=1.2e-05 .\n",
      "2021-08-15 16:48:03 [INFO]\t[TRAIN] Epoch=78/100, Step=1/21, loss=14.571241, lr=1.2e-05, time_each_step=0.75s, eta=0:6:37\n",
      "2021-08-15 16:48:04 [INFO]\t[TRAIN] Epoch=78/100, Step=3/21, loss=11.359363, lr=1.2e-05, time_each_step=0.74s, eta=0:6:35\n",
      "2021-08-15 16:48:05 [INFO]\t[TRAIN] Epoch=78/100, Step=5/21, loss=21.246557, lr=1.2e-05, time_each_step=0.75s, eta=0:6:34\n",
      "2021-08-15 16:48:07 [INFO]\t[TRAIN] Epoch=78/100, Step=7/21, loss=15.507166, lr=1.2e-05, time_each_step=0.77s, eta=0:6:32\n",
      "2021-08-15 16:48:08 [INFO]\t[TRAIN] Epoch=78/100, Step=9/21, loss=15.752605, lr=1.2e-05, time_each_step=0.79s, eta=0:6:31\n",
      "2021-08-15 16:48:10 [INFO]\t[TRAIN] Epoch=78/100, Step=11/21, loss=18.327543, lr=1.2e-05, time_each_step=0.82s, eta=0:6:30\n",
      "2021-08-15 16:48:10 [INFO]\t[TRAIN] Epoch=78/100, Step=13/21, loss=13.12366, lr=1.2e-05, time_each_step=0.83s, eta=0:6:28\n",
      "2021-08-15 16:48:11 [INFO]\t[TRAIN] Epoch=78/100, Step=15/21, loss=15.28539, lr=1.2e-05, time_each_step=0.8s, eta=0:6:26\n",
      "2021-08-15 16:48:12 [INFO]\t[TRAIN] Epoch=78/100, Step=17/21, loss=18.188313, lr=1.2e-05, time_each_step=0.8s, eta=0:6:25\n",
      "2021-08-15 16:48:13 [INFO]\t[TRAIN] Epoch=78/100, Step=19/21, loss=15.266687, lr=1.2e-05, time_each_step=0.81s, eta=0:6:23\n",
      "2021-08-15 16:48:14 [INFO]\t[TRAIN] Epoch=78/100, Step=21/21, loss=14.796231, lr=1.2e-05, time_each_step=0.56s, eta=0:6:22\n",
      "2021-08-15 16:48:14 [INFO]\t[TRAIN] Epoch 78 finished, loss=15.5341, lr=1.2e-05 .\n",
      "2021-08-15 16:48:21 [INFO]\t[TRAIN] Epoch=79/100, Step=2/21, loss=14.169211, lr=1.2e-05, time_each_step=0.85s, eta=0:6:38\n",
      "2021-08-15 16:48:23 [INFO]\t[TRAIN] Epoch=79/100, Step=4/21, loss=19.905268, lr=1.2e-05, time_each_step=0.85s, eta=0:6:36\n",
      "2021-08-15 16:48:24 [INFO]\t[TRAIN] Epoch=79/100, Step=6/21, loss=16.749132, lr=1.2e-05, time_each_step=0.87s, eta=0:6:35\n",
      "2021-08-15 16:48:26 [INFO]\t[TRAIN] Epoch=79/100, Step=8/21, loss=18.999746, lr=1.2e-05, time_each_step=0.86s, eta=0:6:33\n",
      "2021-08-15 16:48:26 [INFO]\t[TRAIN] Epoch=79/100, Step=10/21, loss=15.36544, lr=1.2e-05, time_each_step=0.84s, eta=0:6:31\n",
      "2021-08-15 16:48:27 [INFO]\t[TRAIN] Epoch=79/100, Step=12/21, loss=13.327458, lr=1.2e-05, time_each_step=0.85s, eta=0:6:29\n",
      "2021-08-15 16:48:28 [INFO]\t[TRAIN] Epoch=79/100, Step=14/21, loss=16.008577, lr=1.2e-05, time_each_step=0.85s, eta=0:6:28\n",
      "2021-08-15 16:48:29 [INFO]\t[TRAIN] Epoch=79/100, Step=16/21, loss=11.255051, lr=1.2e-05, time_each_step=0.84s, eta=0:6:26\n",
      "2021-08-15 16:48:29 [INFO]\t[TRAIN] Epoch=79/100, Step=18/21, loss=12.334649, lr=1.2e-05, time_each_step=0.82s, eta=0:6:24\n",
      "2021-08-15 16:48:30 [INFO]\t[TRAIN] Epoch=79/100, Step=20/21, loss=14.367883, lr=1.2e-05, time_each_step=0.82s, eta=0:6:23\n",
      "2021-08-15 16:48:31 [INFO]\t[TRAIN] Epoch 79 finished, loss=14.751575, lr=1.2e-05 .\n",
      "2021-08-15 16:48:36 [INFO]\t[TRAIN] Epoch=80/100, Step=1/21, loss=15.518901, lr=1.2e-05, time_each_step=0.77s, eta=0:6:15\n",
      "2021-08-15 16:48:38 [INFO]\t[TRAIN] Epoch=80/100, Step=3/21, loss=11.838717, lr=1.2e-05, time_each_step=0.78s, eta=0:6:14\n",
      "2021-08-15 16:48:39 [INFO]\t[TRAIN] Epoch=80/100, Step=5/21, loss=14.243075, lr=1.2e-05, time_each_step=0.75s, eta=0:6:12\n",
      "2021-08-15 16:48:41 [INFO]\t[TRAIN] Epoch=80/100, Step=7/21, loss=13.585449, lr=1.2e-05, time_each_step=0.76s, eta=0:6:10\n",
      "2021-08-15 16:48:42 [INFO]\t[TRAIN] Epoch=80/100, Step=9/21, loss=15.151743, lr=1.2e-05, time_each_step=0.77s, eta=0:6:9\n",
      "2021-08-15 16:48:43 [INFO]\t[TRAIN] Epoch=80/100, Step=11/21, loss=15.587256, lr=1.2e-05, time_each_step=0.78s, eta=0:6:7\n",
      "2021-08-15 16:48:44 [INFO]\t[TRAIN] Epoch=80/100, Step=13/21, loss=13.85069, lr=1.2e-05, time_each_step=0.79s, eta=0:6:6\n",
      "2021-08-15 16:48:45 [INFO]\t[TRAIN] Epoch=80/100, Step=15/21, loss=12.38408, lr=1.2e-05, time_each_step=0.79s, eta=0:6:4\n",
      "2021-08-15 16:48:46 [INFO]\t[TRAIN] Epoch=80/100, Step=17/21, loss=17.914341, lr=1.2e-05, time_each_step=0.82s, eta=0:6:3\n",
      "2021-08-15 16:48:47 [INFO]\t[TRAIN] Epoch=80/100, Step=19/21, loss=13.835294, lr=1.2e-05, time_each_step=0.83s, eta=0:6:1\n",
      "2021-08-15 16:48:48 [INFO]\t[TRAIN] Epoch=80/100, Step=21/21, loss=16.066927, lr=1.2e-05, time_each_step=0.58s, eta=0:6:0\n",
      "2021-08-15 16:48:48 [INFO]\t[TRAIN] Epoch 80 finished, loss=15.245173, lr=1.2e-05 .\n",
      "2021-08-15 16:48:48 [INFO]\tStart to evaluating(total_samples=98, total_steps=7)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 7/7 [00:06<00:00,  1.05it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:48:56 [INFO]\t[EVAL] Finished, Epoch=80, bbox_map=26.520526 .\n",
      "2021-08-15 16:48:58 [INFO]\tModel saved in output/ppyolo/best_model.\n",
      "2021-08-15 16:49:01 [INFO]\tModel saved in output/ppyolo/epoch_80.\n",
      "2021-08-15 16:49:01 [INFO]\tCurrent evaluated best model in eval_dataset is epoch_80, bbox_map=26.520525686120024\n",
      "2021-08-15 16:49:07 [INFO]\t[TRAIN] Epoch=81/100, Step=2/21, loss=16.578768, lr=1e-06, time_each_step=0.78s, eta=0:5:52\n",
      "2021-08-15 16:49:08 [INFO]\t[TRAIN] Epoch=81/100, Step=4/21, loss=13.531879, lr=1e-06, time_each_step=0.8s, eta=0:5:51\n",
      "2021-08-15 16:49:10 [INFO]\t[TRAIN] Epoch=81/100, Step=6/21, loss=12.748194, lr=1e-06, time_each_step=0.8s, eta=0:5:49\n",
      "2021-08-15 16:49:11 [INFO]\t[TRAIN] Epoch=81/100, Step=8/21, loss=13.344918, lr=1e-06, time_each_step=0.8s, eta=0:5:48\n",
      "2021-08-15 16:49:13 [INFO]\t[TRAIN] Epoch=81/100, Step=10/21, loss=14.279385, lr=1e-06, time_each_step=0.83s, eta=0:5:46\n",
      "2021-08-15 16:49:14 [INFO]\t[TRAIN] Epoch=81/100, Step=12/21, loss=14.085868, lr=1e-06, time_each_step=0.84s, eta=0:5:45\n",
      "2021-08-15 16:49:15 [INFO]\t[TRAIN] Epoch=81/100, Step=14/21, loss=15.227477, lr=1e-06, time_each_step=0.84s, eta=0:5:43\n",
      "2021-08-15 16:49:16 [INFO]\t[TRAIN] Epoch=81/100, Step=16/21, loss=13.059432, lr=1e-06, time_each_step=0.83s, eta=0:5:41\n",
      "2021-08-15 16:49:17 [INFO]\t[TRAIN] Epoch=81/100, Step=18/21, loss=13.355586, lr=1e-06, time_each_step=0.83s, eta=0:5:40\n",
      "2021-08-15 16:49:17 [INFO]\t[TRAIN] Epoch=81/100, Step=20/21, loss=18.064119, lr=1e-06, time_each_step=0.82s, eta=0:5:38\n",
      "2021-08-15 16:49:18 [INFO]\t[TRAIN] Epoch 81 finished, loss=14.70755, lr=1e-06 .\n",
      "2021-08-15 16:49:23 [INFO]\t[TRAIN] Epoch=82/100, Step=1/21, loss=18.691433, lr=1e-06, time_each_step=0.81s, eta=0:5:31\n",
      "2021-08-15 16:49:24 [INFO]\t[TRAIN] Epoch=82/100, Step=3/21, loss=13.892894, lr=1e-06, time_each_step=0.79s, eta=0:5:29\n",
      "2021-08-15 16:49:25 [INFO]\t[TRAIN] Epoch=82/100, Step=5/21, loss=13.251101, lr=1e-06, time_each_step=0.78s, eta=0:5:28\n",
      "2021-08-15 16:49:27 [INFO]\t[TRAIN] Epoch=82/100, Step=7/21, loss=14.759917, lr=1e-06, time_each_step=0.8s, eta=0:5:26\n",
      "2021-08-15 16:49:28 [INFO]\t[TRAIN] Epoch=82/100, Step=9/21, loss=15.299765, lr=1e-06, time_each_step=0.77s, eta=0:5:24\n",
      "2021-08-15 16:49:29 [INFO]\t[TRAIN] Epoch=82/100, Step=11/21, loss=13.443927, lr=1e-06, time_each_step=0.78s, eta=0:5:23\n",
      "2021-08-15 16:49:30 [INFO]\t[TRAIN] Epoch=82/100, Step=13/21, loss=17.603374, lr=1e-06, time_each_step=0.79s, eta=0:5:21\n",
      "2021-08-15 16:49:31 [INFO]\t[TRAIN] Epoch=82/100, Step=15/21, loss=12.271921, lr=1e-06, time_each_step=0.79s, eta=0:5:20\n",
      "2021-08-15 16:49:32 [INFO]\t[TRAIN] Epoch=82/100, Step=17/21, loss=12.090391, lr=1e-06, time_each_step=0.79s, eta=0:5:18\n",
      "2021-08-15 16:49:33 [INFO]\t[TRAIN] Epoch=82/100, Step=19/21, loss=17.806808, lr=1e-06, time_each_step=0.81s, eta=0:5:17\n",
      "2021-08-15 16:49:34 [INFO]\t[TRAIN] Epoch=82/100, Step=21/21, loss=14.906756, lr=1e-06, time_each_step=0.57s, eta=0:5:15\n",
      "2021-08-15 16:49:34 [INFO]\t[TRAIN] Epoch 82 finished, loss=14.802541, lr=1e-06 .\n",
      "2021-08-15 16:49:41 [INFO]\t[TRAIN] Epoch=83/100, Step=2/21, loss=14.076989, lr=1e-06, time_each_step=0.82s, eta=0:5:14\n",
      "2021-08-15 16:49:42 [INFO]\t[TRAIN] Epoch=83/100, Step=4/21, loss=14.27677, lr=1e-06, time_each_step=0.81s, eta=0:5:12\n",
      "2021-08-15 16:49:43 [INFO]\t[TRAIN] Epoch=83/100, Step=6/21, loss=17.437208, lr=1e-06, time_each_step=0.79s, eta=0:5:10\n",
      "2021-08-15 16:49:44 [INFO]\t[TRAIN] Epoch=83/100, Step=8/21, loss=11.379378, lr=1e-06, time_each_step=0.82s, eta=0:5:9\n",
      "2021-08-15 16:49:46 [INFO]\t[TRAIN] Epoch=83/100, Step=10/21, loss=16.558737, lr=1e-06, time_each_step=0.81s, eta=0:5:7\n",
      "2021-08-15 16:49:47 [INFO]\t[TRAIN] Epoch=83/100, Step=12/21, loss=18.761442, lr=1e-06, time_each_step=0.82s, eta=0:5:5\n",
      "2021-08-15 16:49:48 [INFO]\t[TRAIN] Epoch=83/100, Step=14/21, loss=12.80091, lr=1e-06, time_each_step=0.82s, eta=0:5:4\n",
      "2021-08-15 16:49:49 [INFO]\t[TRAIN] Epoch=83/100, Step=16/21, loss=12.25815, lr=1e-06, time_each_step=0.82s, eta=0:5:2\n",
      "2021-08-15 16:49:49 [INFO]\t[TRAIN] Epoch=83/100, Step=18/21, loss=14.256974, lr=1e-06, time_each_step=0.8s, eta=0:5:0\n",
      "2021-08-15 16:49:50 [INFO]\t[TRAIN] Epoch=83/100, Step=20/21, loss=14.896184, lr=1e-06, time_each_step=0.79s, eta=0:4:59\n",
      "2021-08-15 16:49:51 [INFO]\t[TRAIN] Epoch 83 finished, loss=15.143413, lr=1e-06 .\n",
      "2021-08-15 16:49:57 [INFO]\t[TRAIN] Epoch=84/100, Step=1/21, loss=12.2149, lr=1e-06, time_each_step=0.83s, eta=0:4:49\n",
      "2021-08-15 16:49:58 [INFO]\t[TRAIN] Epoch=84/100, Step=3/21, loss=14.207967, lr=1e-06, time_each_step=0.83s, eta=0:4:47\n",
      "2021-08-15 16:49:59 [INFO]\t[TRAIN] Epoch=84/100, Step=5/21, loss=19.194851, lr=1e-06, time_each_step=0.84s, eta=0:4:46\n",
      "2021-08-15 16:50:01 [INFO]\t[TRAIN] Epoch=84/100, Step=7/21, loss=17.69091, lr=1e-06, time_each_step=0.81s, eta=0:4:44\n",
      "2021-08-15 16:50:02 [INFO]\t[TRAIN] Epoch=84/100, Step=9/21, loss=14.509156, lr=1e-06, time_each_step=0.8s, eta=0:4:42\n",
      "2021-08-15 16:50:03 [INFO]\t[TRAIN] Epoch=84/100, Step=11/21, loss=14.658061, lr=1e-06, time_each_step=0.81s, eta=0:4:41\n",
      "2021-08-15 16:50:04 [INFO]\t[TRAIN] Epoch=84/100, Step=13/21, loss=14.275719, lr=1e-06, time_each_step=0.82s, eta=0:4:39\n",
      "2021-08-15 16:50:05 [INFO]\t[TRAIN] Epoch=84/100, Step=15/21, loss=15.101871, lr=1e-06, time_each_step=0.8s, eta=0:4:37\n",
      "2021-08-15 16:50:06 [INFO]\t[TRAIN] Epoch=84/100, Step=17/21, loss=15.195938, lr=1e-06, time_each_step=0.83s, eta=0:4:36\n",
      "2021-08-15 16:50:07 [INFO]\t[TRAIN] Epoch=84/100, Step=19/21, loss=14.724635, lr=1e-06, time_each_step=0.83s, eta=0:4:34\n",
      "2021-08-15 16:50:08 [INFO]\t[TRAIN] Epoch=84/100, Step=21/21, loss=13.575024, lr=1e-06, time_each_step=0.53s, eta=0:4:32\n",
      "2021-08-15 16:50:08 [INFO]\t[TRAIN] Epoch 84 finished, loss=15.622481, lr=1e-06 .\n",
      "2021-08-15 16:50:15 [INFO]\t[TRAIN] Epoch=85/100, Step=2/21, loss=14.49413, lr=1e-06, time_each_step=0.86s, eta=0:4:45\n",
      "2021-08-15 16:50:17 [INFO]\t[TRAIN] Epoch=85/100, Step=4/21, loss=20.043833, lr=1e-06, time_each_step=0.86s, eta=0:4:43\n",
      "2021-08-15 16:50:18 [INFO]\t[TRAIN] Epoch=85/100, Step=6/21, loss=15.074561, lr=1e-06, time_each_step=0.88s, eta=0:4:41\n",
      "2021-08-15 16:50:19 [INFO]\t[TRAIN] Epoch=85/100, Step=8/21, loss=18.189617, lr=1e-06, time_each_step=0.88s, eta=0:4:40\n",
      "2021-08-15 16:50:21 [INFO]\t[TRAIN] Epoch=85/100, Step=10/21, loss=13.698638, lr=1e-06, time_each_step=0.9s, eta=0:4:38\n",
      "2021-08-15 16:50:22 [INFO]\t[TRAIN] Epoch=85/100, Step=12/21, loss=19.843807, lr=1e-06, time_each_step=0.91s, eta=0:4:36\n",
      "2021-08-15 16:50:23 [INFO]\t[TRAIN] Epoch=85/100, Step=14/21, loss=15.809406, lr=1e-06, time_each_step=0.91s, eta=0:4:35\n",
      "2021-08-15 16:50:24 [INFO]\t[TRAIN] Epoch=85/100, Step=16/21, loss=13.705056, lr=1e-06, time_each_step=0.88s, eta=0:4:33\n",
      "2021-08-15 16:50:24 [INFO]\t[TRAIN] Epoch=85/100, Step=18/21, loss=13.316321, lr=1e-06, time_each_step=0.87s, eta=0:4:31\n",
      "2021-08-15 16:50:25 [INFO]\t[TRAIN] Epoch=85/100, Step=20/21, loss=10.395945, lr=1e-06, time_each_step=0.88s, eta=0:4:29\n",
      "2021-08-15 16:50:26 [INFO]\t[TRAIN] Epoch 85 finished, loss=14.990242, lr=1e-06 .\n",
      "2021-08-15 16:50:33 [INFO]\t[TRAIN] Epoch=86/100, Step=1/21, loss=10.711022, lr=1e-06, time_each_step=0.89s, eta=0:4:41\n",
      "2021-08-15 16:50:34 [INFO]\t[TRAIN] Epoch=86/100, Step=3/21, loss=20.945124, lr=1e-06, time_each_step=0.88s, eta=0:4:39\n",
      "2021-08-15 16:50:36 [INFO]\t[TRAIN] Epoch=86/100, Step=5/21, loss=12.73001, lr=1e-06, time_each_step=0.87s, eta=0:4:37\n",
      "2021-08-15 16:50:37 [INFO]\t[TRAIN] Epoch=86/100, Step=7/21, loss=15.018954, lr=1e-06, time_each_step=0.86s, eta=0:4:35\n",
      "2021-08-15 16:50:38 [INFO]\t[TRAIN] Epoch=86/100, Step=9/21, loss=16.823601, lr=1e-06, time_each_step=0.86s, eta=0:4:33\n",
      "2021-08-15 16:50:39 [INFO]\t[TRAIN] Epoch=86/100, Step=11/21, loss=16.552702, lr=1e-06, time_each_step=0.84s, eta=0:4:31\n",
      "2021-08-15 16:50:40 [INFO]\t[TRAIN] Epoch=86/100, Step=13/21, loss=16.381565, lr=1e-06, time_each_step=0.86s, eta=0:4:30\n",
      "2021-08-15 16:50:41 [INFO]\t[TRAIN] Epoch=86/100, Step=15/21, loss=11.947013, lr=1e-06, time_each_step=0.85s, eta=0:4:28\n",
      "2021-08-15 16:50:42 [INFO]\t[TRAIN] Epoch=86/100, Step=17/21, loss=15.046501, lr=1e-06, time_each_step=0.86s, eta=0:4:26\n",
      "2021-08-15 16:50:43 [INFO]\t[TRAIN] Epoch=86/100, Step=19/21, loss=13.571053, lr=1e-06, time_each_step=0.88s, eta=0:4:25\n",
      "2021-08-15 16:50:44 [INFO]\t[TRAIN] Epoch=86/100, Step=21/21, loss=13.314117, lr=1e-06, time_each_step=0.52s, eta=0:4:23\n",
      "2021-08-15 16:50:44 [INFO]\t[TRAIN] Epoch 86 finished, loss=15.303536, lr=1e-06 .\n",
      "2021-08-15 16:50:52 [INFO]\t[TRAIN] Epoch=87/100, Step=2/21, loss=14.656208, lr=1e-06, time_each_step=0.87s, eta=0:4:22\n",
      "2021-08-15 16:50:53 [INFO]\t[TRAIN] Epoch=87/100, Step=4/21, loss=15.446222, lr=1e-06, time_each_step=0.87s, eta=0:4:20\n",
      "2021-08-15 16:50:54 [INFO]\t[TRAIN] Epoch=87/100, Step=6/21, loss=14.865672, lr=1e-06, time_each_step=0.88s, eta=0:4:19\n",
      "2021-08-15 16:50:56 [INFO]\t[TRAIN] Epoch=87/100, Step=8/21, loss=16.295135, lr=1e-06, time_each_step=0.87s, eta=0:4:17\n",
      "2021-08-15 16:50:56 [INFO]\t[TRAIN] Epoch=87/100, Step=10/21, loss=15.548014, lr=1e-06, time_each_step=0.86s, eta=0:4:15\n",
      "2021-08-15 16:50:57 [INFO]\t[TRAIN] Epoch=87/100, Step=12/21, loss=17.345936, lr=1e-06, time_each_step=0.86s, eta=0:4:13\n",
      "2021-08-15 16:50:58 [INFO]\t[TRAIN] Epoch=87/100, Step=14/21, loss=14.596889, lr=1e-06, time_each_step=0.89s, eta=0:4:12\n",
      "2021-08-15 16:50:59 [INFO]\t[TRAIN] Epoch=87/100, Step=16/21, loss=14.109562, lr=1e-06, time_each_step=0.89s, eta=0:4:10\n",
      "2021-08-15 16:51:00 [INFO]\t[TRAIN] Epoch=87/100, Step=18/21, loss=13.426443, lr=1e-06, time_each_step=0.88s, eta=0:4:8\n",
      "2021-08-15 16:51:02 [INFO]\t[TRAIN] Epoch=87/100, Step=20/21, loss=14.998934, lr=1e-06, time_each_step=0.91s, eta=0:4:6\n",
      "2021-08-15 16:51:02 [INFO]\t[TRAIN] Epoch 87 finished, loss=15.0272, lr=1e-06 .\n",
      "2021-08-15 16:51:10 [INFO]\t[TRAIN] Epoch=88/100, Step=1/21, loss=14.315123, lr=1e-06, time_each_step=0.9s, eta=0:4:13\n",
      "2021-08-15 16:51:11 [INFO]\t[TRAIN] Epoch=88/100, Step=3/21, loss=16.23868, lr=1e-06, time_each_step=0.9s, eta=0:4:11\n",
      "2021-08-15 16:51:12 [INFO]\t[TRAIN] Epoch=88/100, Step=5/21, loss=12.813744, lr=1e-06, time_each_step=0.91s, eta=0:4:10\n",
      "2021-08-15 16:51:14 [INFO]\t[TRAIN] Epoch=88/100, Step=7/21, loss=13.9188, lr=1e-06, time_each_step=0.92s, eta=0:4:8\n",
      "2021-08-15 16:51:16 [INFO]\t[TRAIN] Epoch=88/100, Step=9/21, loss=15.02918, lr=1e-06, time_each_step=0.95s, eta=0:4:7\n",
      "2021-08-15 16:51:17 [INFO]\t[TRAIN] Epoch=88/100, Step=11/21, loss=16.710775, lr=1e-06, time_each_step=0.97s, eta=0:4:5\n",
      "2021-08-15 16:51:18 [INFO]\t[TRAIN] Epoch=88/100, Step=13/21, loss=13.912939, lr=1e-06, time_each_step=0.95s, eta=0:4:3\n",
      "2021-08-15 16:51:19 [INFO]\t[TRAIN] Epoch=88/100, Step=15/21, loss=16.13419, lr=1e-06, time_each_step=0.97s, eta=0:4:1\n",
      "2021-08-15 16:51:20 [INFO]\t[TRAIN] Epoch=88/100, Step=17/21, loss=12.996868, lr=1e-06, time_each_step=0.96s, eta=0:3:59\n",
      "2021-08-15 16:51:21 [INFO]\t[TRAIN] Epoch=88/100, Step=19/21, loss=19.203331, lr=1e-06, time_each_step=0.96s, eta=0:3:57\n",
      "2021-08-15 16:51:22 [INFO]\t[TRAIN] Epoch=88/100, Step=21/21, loss=16.519333, lr=1e-06, time_each_step=0.62s, eta=0:3:55\n",
      "2021-08-15 16:51:22 [INFO]\t[TRAIN] Epoch 88 finished, loss=14.938549, lr=1e-06 .\n",
      "2021-08-15 16:51:29 [INFO]\t[TRAIN] Epoch=89/100, Step=2/21, loss=17.784653, lr=1e-06, time_each_step=0.92s, eta=0:4:9\n",
      "2021-08-15 16:51:31 [INFO]\t[TRAIN] Epoch=89/100, Step=4/21, loss=19.92013, lr=1e-06, time_each_step=0.93s, eta=0:4:8\n",
      "2021-08-15 16:51:32 [INFO]\t[TRAIN] Epoch=89/100, Step=6/21, loss=15.677508, lr=1e-06, time_each_step=0.92s, eta=0:4:6\n",
      "2021-08-15 16:51:34 [INFO]\t[TRAIN] Epoch=89/100, Step=8/21, loss=16.439413, lr=1e-06, time_each_step=0.91s, eta=0:4:4\n",
      "2021-08-15 16:51:35 [INFO]\t[TRAIN] Epoch=89/100, Step=10/21, loss=13.810149, lr=1e-06, time_each_step=0.92s, eta=0:4:2\n",
      "2021-08-15 16:51:36 [INFO]\t[TRAIN] Epoch=89/100, Step=12/21, loss=13.649677, lr=1e-06, time_each_step=0.92s, eta=0:4:0\n",
      "2021-08-15 16:51:37 [INFO]\t[TRAIN] Epoch=89/100, Step=14/21, loss=13.64964, lr=1e-06, time_each_step=0.9s, eta=0:3:58\n",
      "2021-08-15 16:51:38 [INFO]\t[TRAIN] Epoch=89/100, Step=16/21, loss=17.043299, lr=1e-06, time_each_step=0.9s, eta=0:3:56\n",
      "2021-08-15 16:51:39 [INFO]\t[TRAIN] Epoch=89/100, Step=18/21, loss=13.970647, lr=1e-06, time_each_step=0.89s, eta=0:3:54\n",
      "2021-08-15 16:51:39 [INFO]\t[TRAIN] Epoch=89/100, Step=20/21, loss=13.460097, lr=1e-06, time_each_step=0.87s, eta=0:3:53\n",
      "2021-08-15 16:51:40 [INFO]\t[TRAIN] Epoch 89 finished, loss=14.817415, lr=1e-06 .\n",
      "2021-08-15 16:51:46 [INFO]\t[TRAIN] Epoch=90/100, Step=1/21, loss=15.015876, lr=1e-06, time_each_step=0.83s, eta=0:3:28\n",
      "2021-08-15 16:51:48 [INFO]\t[TRAIN] Epoch=90/100, Step=3/21, loss=13.678933, lr=1e-06, time_each_step=0.82s, eta=0:3:26\n",
      "2021-08-15 16:51:49 [INFO]\t[TRAIN] Epoch=90/100, Step=5/21, loss=16.038673, lr=1e-06, time_each_step=0.82s, eta=0:3:25\n",
      "2021-08-15 16:51:50 [INFO]\t[TRAIN] Epoch=90/100, Step=7/21, loss=12.869532, lr=1e-06, time_each_step=0.81s, eta=0:3:23\n",
      "2021-08-15 16:51:51 [INFO]\t[TRAIN] Epoch=90/100, Step=9/21, loss=10.242607, lr=1e-06, time_each_step=0.82s, eta=0:3:21\n",
      "2021-08-15 16:51:53 [INFO]\t[TRAIN] Epoch=90/100, Step=11/21, loss=10.627393, lr=1e-06, time_each_step=0.84s, eta=0:3:20\n",
      "2021-08-15 16:51:54 [INFO]\t[TRAIN] Epoch=90/100, Step=13/21, loss=13.203481, lr=1e-06, time_each_step=0.84s, eta=0:3:18\n",
      "2021-08-15 16:51:55 [INFO]\t[TRAIN] Epoch=90/100, Step=15/21, loss=19.35729, lr=1e-06, time_each_step=0.85s, eta=0:3:16\n",
      "2021-08-15 16:51:56 [INFO]\t[TRAIN] Epoch=90/100, Step=17/21, loss=12.108686, lr=1e-06, time_each_step=0.85s, eta=0:3:15\n",
      "2021-08-15 16:51:56 [INFO]\t[TRAIN] Epoch=90/100, Step=19/21, loss=14.374297, lr=1e-06, time_each_step=0.85s, eta=0:3:13\n",
      "2021-08-15 16:51:57 [INFO]\t[TRAIN] Epoch=90/100, Step=21/21, loss=13.002563, lr=1e-06, time_each_step=0.56s, eta=0:3:11\n",
      "2021-08-15 16:51:57 [INFO]\t[TRAIN] Epoch 90 finished, loss=14.25999, lr=1e-06 .\n",
      "2021-08-15 16:52:03 [INFO]\t[TRAIN] Epoch=91/100, Step=2/21, loss=10.27351, lr=0.0, time_each_step=0.79s, eta=0:3:4\n",
      "2021-08-15 16:52:05 [INFO]\t[TRAIN] Epoch=91/100, Step=4/21, loss=13.738264, lr=0.0, time_each_step=0.78s, eta=0:3:2\n",
      "2021-08-15 16:52:06 [INFO]\t[TRAIN] Epoch=91/100, Step=6/21, loss=12.757036, lr=0.0, time_each_step=0.77s, eta=0:3:1\n",
      "2021-08-15 16:52:07 [INFO]\t[TRAIN] Epoch=91/100, Step=8/21, loss=17.478622, lr=0.0, time_each_step=0.77s, eta=0:2:59\n",
      "2021-08-15 16:52:08 [INFO]\t[TRAIN] Epoch=91/100, Step=10/21, loss=14.237679, lr=0.0, time_each_step=0.79s, eta=0:2:58\n",
      "2021-08-15 16:52:10 [INFO]\t[TRAIN] Epoch=91/100, Step=12/21, loss=21.664484, lr=0.0, time_each_step=0.8s, eta=0:2:56\n",
      "2021-08-15 16:52:11 [INFO]\t[TRAIN] Epoch=91/100, Step=14/21, loss=14.740508, lr=0.0, time_each_step=0.79s, eta=0:2:55\n",
      "2021-08-15 16:52:11 [INFO]\t[TRAIN] Epoch=91/100, Step=16/21, loss=13.926845, lr=0.0, time_each_step=0.79s, eta=0:2:53\n",
      "2021-08-15 16:52:12 [INFO]\t[TRAIN] Epoch=91/100, Step=18/21, loss=10.504234, lr=0.0, time_each_step=0.78s, eta=0:2:51\n",
      "2021-08-15 16:52:13 [INFO]\t[TRAIN] Epoch=91/100, Step=20/21, loss=13.765642, lr=0.0, time_each_step=0.78s, eta=0:2:50\n",
      "2021-08-15 16:52:14 [INFO]\t[TRAIN] Epoch 91 finished, loss=14.783205, lr=0.0 .\n",
      "2021-08-15 16:52:21 [INFO]\t[TRAIN] Epoch=92/100, Step=1/21, loss=15.444535, lr=0.0, time_each_step=0.88s, eta=0:2:41\n",
      "2021-08-15 16:52:22 [INFO]\t[TRAIN] Epoch=92/100, Step=3/21, loss=16.557821, lr=0.0, time_each_step=0.89s, eta=0:2:40\n",
      "2021-08-15 16:52:23 [INFO]\t[TRAIN] Epoch=92/100, Step=5/21, loss=17.509239, lr=0.0, time_each_step=0.89s, eta=0:2:38\n",
      "2021-08-15 16:52:25 [INFO]\t[TRAIN] Epoch=92/100, Step=7/21, loss=11.74015, lr=0.0, time_each_step=0.89s, eta=0:2:36\n",
      "2021-08-15 16:52:26 [INFO]\t[TRAIN] Epoch=92/100, Step=9/21, loss=16.919439, lr=0.0, time_each_step=0.88s, eta=0:2:34\n",
      "2021-08-15 16:52:27 [INFO]\t[TRAIN] Epoch=92/100, Step=11/21, loss=14.79779, lr=0.0, time_each_step=0.88s, eta=0:2:32\n",
      "2021-08-15 16:52:28 [INFO]\t[TRAIN] Epoch=92/100, Step=13/21, loss=11.206408, lr=0.0, time_each_step=0.88s, eta=0:2:31\n",
      "2021-08-15 16:52:29 [INFO]\t[TRAIN] Epoch=92/100, Step=15/21, loss=14.292998, lr=0.0, time_each_step=0.9s, eta=0:2:29\n",
      "2021-08-15 16:52:30 [INFO]\t[TRAIN] Epoch=92/100, Step=17/21, loss=15.898628, lr=0.0, time_each_step=0.91s, eta=0:2:27\n",
      "2021-08-15 16:52:31 [INFO]\t[TRAIN] Epoch=92/100, Step=19/21, loss=15.202627, lr=0.0, time_each_step=0.9s, eta=0:2:25\n",
      "2021-08-15 16:52:32 [INFO]\t[TRAIN] Epoch=92/100, Step=21/21, loss=16.950602, lr=0.0, time_each_step=0.55s, eta=0:2:24\n",
      "2021-08-15 16:52:32 [INFO]\t[TRAIN] Epoch 92 finished, loss=14.885632, lr=0.0 .\n",
      "2021-08-15 16:52:38 [INFO]\t[TRAIN] Epoch=93/100, Step=2/21, loss=17.230352, lr=0.0, time_each_step=0.79s, eta=0:2:37\n",
      "2021-08-15 16:52:39 [INFO]\t[TRAIN] Epoch=93/100, Step=4/21, loss=14.399529, lr=0.0, time_each_step=0.8s, eta=0:2:35\n",
      "2021-08-15 16:52:41 [INFO]\t[TRAIN] Epoch=93/100, Step=6/21, loss=14.452535, lr=0.0, time_each_step=0.8s, eta=0:2:34\n",
      "2021-08-15 16:52:42 [INFO]\t[TRAIN] Epoch=93/100, Step=8/21, loss=13.737313, lr=0.0, time_each_step=0.8s, eta=0:2:32\n",
      "2021-08-15 16:52:44 [INFO]\t[TRAIN] Epoch=93/100, Step=10/21, loss=15.283837, lr=0.0, time_each_step=0.83s, eta=0:2:31\n",
      "2021-08-15 16:52:45 [INFO]\t[TRAIN] Epoch=93/100, Step=12/21, loss=14.904359, lr=0.0, time_each_step=0.85s, eta=0:2:29\n",
      "2021-08-15 16:52:46 [INFO]\t[TRAIN] Epoch=93/100, Step=14/21, loss=19.477606, lr=0.0, time_each_step=0.84s, eta=0:2:28\n",
      "2021-08-15 16:52:47 [INFO]\t[TRAIN] Epoch=93/100, Step=16/21, loss=14.713957, lr=0.0, time_each_step=0.83s, eta=0:2:26\n",
      "2021-08-15 16:52:48 [INFO]\t[TRAIN] Epoch=93/100, Step=18/21, loss=12.510224, lr=0.0, time_each_step=0.84s, eta=0:2:24\n",
      "2021-08-15 16:52:49 [INFO]\t[TRAIN] Epoch=93/100, Step=20/21, loss=14.020502, lr=0.0, time_each_step=0.83s, eta=0:2:23\n",
      "2021-08-15 16:52:49 [INFO]\t[TRAIN] Epoch 93 finished, loss=15.372026, lr=0.0 .\n",
      "2021-08-15 16:52:56 [INFO]\t[TRAIN] Epoch=94/100, Step=1/21, loss=13.675144, lr=0.0, time_each_step=0.91s, eta=0:2:14\n",
      "2021-08-15 16:52:57 [INFO]\t[TRAIN] Epoch=94/100, Step=3/21, loss=17.62767, lr=0.0, time_each_step=0.89s, eta=0:2:12\n",
      "2021-08-15 16:52:59 [INFO]\t[TRAIN] Epoch=94/100, Step=5/21, loss=12.633869, lr=0.0, time_each_step=0.89s, eta=0:2:10\n",
      "2021-08-15 16:53:00 [INFO]\t[TRAIN] Epoch=94/100, Step=7/21, loss=15.706679, lr=0.0, time_each_step=0.87s, eta=0:2:8\n",
      "2021-08-15 16:53:01 [INFO]\t[TRAIN] Epoch=94/100, Step=9/21, loss=17.203585, lr=0.0, time_each_step=0.86s, eta=0:2:6\n",
      "2021-08-15 16:53:02 [INFO]\t[TRAIN] Epoch=94/100, Step=11/21, loss=12.897676, lr=0.0, time_each_step=0.86s, eta=0:2:4\n",
      "2021-08-15 16:53:04 [INFO]\t[TRAIN] Epoch=94/100, Step=13/21, loss=12.918489, lr=0.0, time_each_step=0.88s, eta=0:2:3\n",
      "2021-08-15 16:53:05 [INFO]\t[TRAIN] Epoch=94/100, Step=15/21, loss=15.84569, lr=0.0, time_each_step=0.9s, eta=0:2:1\n",
      "2021-08-15 16:53:06 [INFO]\t[TRAIN] Epoch=94/100, Step=17/21, loss=16.648903, lr=0.0, time_each_step=0.9s, eta=0:1:59\n",
      "2021-08-15 16:53:07 [INFO]\t[TRAIN] Epoch=94/100, Step=19/21, loss=16.661203, lr=0.0, time_each_step=0.89s, eta=0:1:57\n",
      "2021-08-15 16:53:08 [INFO]\t[TRAIN] Epoch=94/100, Step=21/21, loss=13.476427, lr=0.0, time_each_step=0.56s, eta=0:1:56\n",
      "2021-08-15 16:53:08 [INFO]\t[TRAIN] Epoch 94 finished, loss=14.872658, lr=0.0 .\n",
      "2021-08-15 16:53:14 [INFO]\t[TRAIN] Epoch=95/100, Step=2/21, loss=14.949434, lr=0.0, time_each_step=0.82s, eta=0:1:59\n",
      "2021-08-15 16:53:16 [INFO]\t[TRAIN] Epoch=95/100, Step=4/21, loss=14.272947, lr=0.0, time_each_step=0.85s, eta=0:1:58\n",
      "2021-08-15 16:53:17 [INFO]\t[TRAIN] Epoch=95/100, Step=6/21, loss=14.53541, lr=0.0, time_each_step=0.86s, eta=0:1:57\n",
      "2021-08-15 16:53:18 [INFO]\t[TRAIN] Epoch=95/100, Step=8/21, loss=12.570177, lr=0.0, time_each_step=0.86s, eta=0:1:55\n",
      "2021-08-15 16:53:20 [INFO]\t[TRAIN] Epoch=95/100, Step=10/21, loss=16.090521, lr=0.0, time_each_step=0.85s, eta=0:1:53\n",
      "2021-08-15 16:53:20 [INFO]\t[TRAIN] Epoch=95/100, Step=12/21, loss=14.938408, lr=0.0, time_each_step=0.84s, eta=0:1:51\n",
      "2021-08-15 16:53:21 [INFO]\t[TRAIN] Epoch=95/100, Step=14/21, loss=14.619147, lr=0.0, time_each_step=0.83s, eta=0:1:50\n",
      "2021-08-15 16:53:23 [INFO]\t[TRAIN] Epoch=95/100, Step=16/21, loss=21.571796, lr=0.0, time_each_step=0.84s, eta=0:1:48\n",
      "2021-08-15 16:53:23 [INFO]\t[TRAIN] Epoch=95/100, Step=18/21, loss=17.272924, lr=0.0, time_each_step=0.84s, eta=0:1:46\n",
      "2021-08-15 16:53:24 [INFO]\t[TRAIN] Epoch=95/100, Step=20/21, loss=14.855216, lr=0.0, time_each_step=0.84s, eta=0:1:45\n",
      "2021-08-15 16:53:25 [INFO]\t[TRAIN] Epoch 95 finished, loss=14.917213, lr=0.0 .\n",
      "2021-08-15 16:53:30 [INFO]\t[TRAIN] Epoch=96/100, Step=1/21, loss=16.761847, lr=0.0, time_each_step=0.84s, eta=0:1:37\n",
      "2021-08-15 16:53:32 [INFO]\t[TRAIN] Epoch=96/100, Step=3/21, loss=15.942821, lr=0.0, time_each_step=0.81s, eta=0:1:35\n",
      "2021-08-15 16:53:33 [INFO]\t[TRAIN] Epoch=96/100, Step=5/21, loss=12.82592, lr=0.0, time_each_step=0.83s, eta=0:1:34\n",
      "2021-08-15 16:53:35 [INFO]\t[TRAIN] Epoch=96/100, Step=7/21, loss=10.139861, lr=0.0, time_each_step=0.83s, eta=0:1:32\n",
      "2021-08-15 16:53:36 [INFO]\t[TRAIN] Epoch=96/100, Step=9/21, loss=15.53042, lr=0.0, time_each_step=0.83s, eta=0:1:31\n",
      "2021-08-15 16:53:37 [INFO]\t[TRAIN] Epoch=96/100, Step=11/21, loss=18.260975, lr=0.0, time_each_step=0.84s, eta=0:1:29\n",
      "2021-08-15 16:53:38 [INFO]\t[TRAIN] Epoch=96/100, Step=13/21, loss=14.047653, lr=0.0, time_each_step=0.83s, eta=0:1:27\n",
      "2021-08-15 16:53:39 [INFO]\t[TRAIN] Epoch=96/100, Step=15/21, loss=24.999638, lr=0.0, time_each_step=0.84s, eta=0:1:26\n",
      "2021-08-15 16:53:40 [INFO]\t[TRAIN] Epoch=96/100, Step=17/21, loss=15.407103, lr=0.0, time_each_step=0.83s, eta=0:1:24\n",
      "2021-08-15 16:53:41 [INFO]\t[TRAIN] Epoch=96/100, Step=19/21, loss=17.701267, lr=0.0, time_each_step=0.83s, eta=0:1:22\n",
      "2021-08-15 16:53:42 [INFO]\t[TRAIN] Epoch=96/100, Step=21/21, loss=9.875116, lr=0.0, time_each_step=0.56s, eta=0:1:21\n",
      "2021-08-15 16:53:42 [INFO]\t[TRAIN] Epoch 96 finished, loss=15.475004, lr=0.0 .\n",
      "2021-08-15 16:53:49 [INFO]\t[TRAIN] Epoch=97/100, Step=2/21, loss=17.129105, lr=0.0, time_each_step=0.86s, eta=0:1:20\n",
      "2021-08-15 16:53:52 [INFO]\t[TRAIN] Epoch=97/100, Step=4/21, loss=16.686769, lr=0.0, time_each_step=0.93s, eta=0:1:20\n",
      "2021-08-15 16:53:53 [INFO]\t[TRAIN] Epoch=97/100, Step=6/21, loss=16.868441, lr=0.0, time_each_step=0.92s, eta=0:1:18\n",
      "2021-08-15 16:53:54 [INFO]\t[TRAIN] Epoch=97/100, Step=8/21, loss=13.314653, lr=0.0, time_each_step=0.91s, eta=0:1:16\n",
      "2021-08-15 16:53:56 [INFO]\t[TRAIN] Epoch=97/100, Step=10/21, loss=16.202536, lr=0.0, time_each_step=0.93s, eta=0:1:14\n",
      "2021-08-15 16:53:57 [INFO]\t[TRAIN] Epoch=97/100, Step=12/21, loss=12.87575, lr=0.0, time_each_step=0.96s, eta=0:1:13\n",
      "2021-08-15 16:53:59 [INFO]\t[TRAIN] Epoch=97/100, Step=14/21, loss=11.200264, lr=0.0, time_each_step=1.0s, eta=0:1:11\n",
      "2021-08-15 16:54:01 [INFO]\t[TRAIN] Epoch=97/100, Step=16/21, loss=17.671585, lr=0.0, time_each_step=1.05s, eta=0:1:9\n",
      "2021-08-15 16:54:02 [INFO]\t[TRAIN] Epoch=97/100, Step=18/21, loss=13.553857, lr=0.0, time_each_step=1.07s, eta=0:1:7\n",
      "2021-08-15 16:54:03 [INFO]\t[TRAIN] Epoch=97/100, Step=20/21, loss=17.613314, lr=0.0, time_each_step=1.08s, eta=0:1:5\n",
      "2021-08-15 16:54:04 [INFO]\t[TRAIN] Epoch 97 finished, loss=15.787495, lr=0.0 .\n",
      "2021-08-15 16:54:10 [INFO]\t[TRAIN] Epoch=98/100, Step=1/21, loss=14.936769, lr=0.0, time_each_step=1.04s, eta=0:1:17\n",
      "2021-08-15 16:54:12 [INFO]\t[TRAIN] Epoch=98/100, Step=3/21, loss=13.052857, lr=0.0, time_each_step=0.98s, eta=0:1:14\n",
      "2021-08-15 16:54:13 [INFO]\t[TRAIN] Epoch=98/100, Step=5/21, loss=15.011401, lr=0.0, time_each_step=0.98s, eta=0:1:12\n",
      "2021-08-15 16:54:14 [INFO]\t[TRAIN] Epoch=98/100, Step=7/21, loss=13.8714, lr=0.0, time_each_step=0.98s, eta=0:1:10\n",
      "2021-08-15 16:54:15 [INFO]\t[TRAIN] Epoch=98/100, Step=9/21, loss=12.797547, lr=0.0, time_each_step=0.96s, eta=0:1:8\n",
      "2021-08-15 16:54:16 [INFO]\t[TRAIN] Epoch=98/100, Step=11/21, loss=13.849164, lr=0.0, time_each_step=0.95s, eta=0:1:6\n",
      "2021-08-15 16:54:17 [INFO]\t[TRAIN] Epoch=98/100, Step=13/21, loss=15.420852, lr=0.0, time_each_step=0.9s, eta=0:1:4\n",
      "2021-08-15 16:54:18 [INFO]\t[TRAIN] Epoch=98/100, Step=15/21, loss=13.104642, lr=0.0, time_each_step=0.85s, eta=0:1:2\n",
      "2021-08-15 16:54:19 [INFO]\t[TRAIN] Epoch=98/100, Step=17/21, loss=12.077824, lr=0.0, time_each_step=0.82s, eta=0:1:0\n",
      "2021-08-15 16:54:20 [INFO]\t[TRAIN] Epoch=98/100, Step=19/21, loss=14.389832, lr=0.0, time_each_step=0.81s, eta=0:0:58\n",
      "2021-08-15 16:54:21 [INFO]\t[TRAIN] Epoch=98/100, Step=21/21, loss=20.345755, lr=0.0, time_each_step=0.55s, eta=0:0:56\n",
      "2021-08-15 16:54:21 [INFO]\t[TRAIN] Epoch 98 finished, loss=14.436201, lr=0.0 .\n",
      "2021-08-15 16:54:27 [INFO]\t[TRAIN] Epoch=99/100, Step=2/21, loss=14.489992, lr=0.0, time_each_step=0.76s, eta=0:0:44\n",
      "2021-08-15 16:54:28 [INFO]\t[TRAIN] Epoch=99/100, Step=4/21, loss=15.132144, lr=0.0, time_each_step=0.78s, eta=0:0:43\n",
      "2021-08-15 16:54:29 [INFO]\t[TRAIN] Epoch=99/100, Step=6/21, loss=12.253983, lr=0.0, time_each_step=0.77s, eta=0:0:41\n",
      "2021-08-15 16:54:31 [INFO]\t[TRAIN] Epoch=99/100, Step=8/21, loss=15.510378, lr=0.0, time_each_step=0.78s, eta=0:0:40\n",
      "2021-08-15 16:54:32 [INFO]\t[TRAIN] Epoch=99/100, Step=10/21, loss=14.415799, lr=0.0, time_each_step=0.78s, eta=0:0:38\n",
      "2021-08-15 16:54:33 [INFO]\t[TRAIN] Epoch=99/100, Step=12/21, loss=16.595596, lr=0.0, time_each_step=0.79s, eta=0:0:37\n",
      "2021-08-15 16:54:34 [INFO]\t[TRAIN] Epoch=99/100, Step=14/21, loss=14.11667, lr=0.0, time_each_step=0.79s, eta=0:0:35\n",
      "2021-08-15 16:54:35 [INFO]\t[TRAIN] Epoch=99/100, Step=16/21, loss=17.622499, lr=0.0, time_each_step=0.79s, eta=0:0:34\n",
      "2021-08-15 16:54:36 [INFO]\t[TRAIN] Epoch=99/100, Step=18/21, loss=14.433782, lr=0.0, time_each_step=0.8s, eta=0:0:32\n",
      "2021-08-15 16:54:36 [INFO]\t[TRAIN] Epoch=99/100, Step=20/21, loss=17.672239, lr=0.0, time_each_step=0.78s, eta=0:0:31\n",
      "2021-08-15 16:54:37 [INFO]\t[TRAIN] Epoch 99 finished, loss=14.945701, lr=0.0 .\n",
      "2021-08-15 16:54:44 [INFO]\t[TRAIN] Epoch=100/100, Step=1/21, loss=13.901341, lr=0.0, time_each_step=0.87s, eta=0:0:30\n",
      "2021-08-15 16:54:46 [INFO]\t[TRAIN] Epoch=100/100, Step=3/21, loss=9.683697, lr=0.0, time_each_step=0.86s, eta=0:0:28\n",
      "2021-08-15 16:54:47 [INFO]\t[TRAIN] Epoch=100/100, Step=5/21, loss=13.426223, lr=0.0, time_each_step=0.89s, eta=0:0:27\n",
      "2021-08-15 16:54:49 [INFO]\t[TRAIN] Epoch=100/100, Step=7/21, loss=14.828511, lr=0.0, time_each_step=0.89s, eta=0:0:25\n",
      "2021-08-15 16:54:50 [INFO]\t[TRAIN] Epoch=100/100, Step=9/21, loss=16.361107, lr=0.0, time_each_step=0.91s, eta=0:0:23\n",
      "2021-08-15 16:54:51 [INFO]\t[TRAIN] Epoch=100/100, Step=11/21, loss=12.265188, lr=0.0, time_each_step=0.91s, eta=0:0:22\n",
      "2021-08-15 16:54:52 [INFO]\t[TRAIN] Epoch=100/100, Step=13/21, loss=13.850825, lr=0.0, time_each_step=0.93s, eta=0:0:20\n",
      "2021-08-15 16:54:53 [INFO]\t[TRAIN] Epoch=100/100, Step=15/21, loss=13.10109, lr=0.0, time_each_step=0.92s, eta=0:0:18\n",
      "2021-08-15 16:54:54 [INFO]\t[TRAIN] Epoch=100/100, Step=17/21, loss=18.692869, lr=0.0, time_each_step=0.94s, eta=0:0:16\n",
      "2021-08-15 16:54:55 [INFO]\t[TRAIN] Epoch=100/100, Step=19/21, loss=14.184681, lr=0.0, time_each_step=0.94s, eta=0:0:14\n",
      "2021-08-15 16:54:56 [INFO]\t[TRAIN] Epoch=100/100, Step=21/21, loss=13.62363, lr=0.0, time_each_step=0.6s, eta=0:0:13\n",
      "2021-08-15 16:54:56 [INFO]\t[TRAIN] Epoch 100 finished, loss=13.925554, lr=0.0 .\n",
      "2021-08-15 16:54:57 [INFO]\tStart to evaluating(total_samples=98, total_steps=7)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 7/7 [00:06<00:00,  1.09it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 16:55:04 [INFO]\t[EVAL] Finished, Epoch=100, bbox_map=29.707574 .\n",
      "2021-08-15 16:55:07 [INFO]\tModel saved in output/ppyolo/best_model.\n",
      "2021-08-15 16:55:09 [INFO]\tModel saved in output/ppyolo/epoch_100.\n",
      "2021-08-15 16:55:09 [INFO]\tCurrent evaluated best model in eval_dataset is epoch_100, bbox_map=29.707574460094616\n"
     ]
    }
   ],
   "source": [
    "# 环境变量配置，用于控制是否使用GPU\r\n",
    "# 说明文档：https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu\r\n",
    "import os\r\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n",
    "\r\n",
    "from paddlex.det import transforms\r\n",
    "import paddlex as pdx\r\n",
    "\r\n",
    "# 下载和解压昆虫检测数据集，本项目已经整理好数据集，故此步骤可以省略\r\n",
    "#insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz'\r\n",
    "#pdx.utils.download_and_decompress(insect_dataset, path='./')\r\n",
    "\r\n",
    "# 定义训练和验证时的transforms\r\n",
    "# API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html\r\n",
    "train_transforms = transforms.Compose([\r\n",
    "    transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(),\r\n",
    "    transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize(\r\n",
    "        target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(),\r\n",
    "    transforms.Normalize()\r\n",
    "])\r\n",
    "\r\n",
    "eval_transforms = transforms.Compose([\r\n",
    "    transforms.Resize(\r\n",
    "        target_size=608, interp='CUBIC'), transforms.Normalize()\r\n",
    "])\r\n",
    "\r\n",
    "# 定义训练和验证所用的数据集，我们需要根据自己的路径和文件名来作调整\r\n",
    "# API说明：https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection\r\n",
    "train_dataset = pdx.datasets.VOCDetection(\r\n",
    "    data_dir='fire',\r\n",
    "    file_list='fire/train_list.txt',\r\n",
    "    label_list='fire/labels.txt',\r\n",
    "    transforms=train_transforms,\r\n",
    "    shuffle=True)\r\n",
    "eval_dataset = pdx.datasets.VOCDetection(\r\n",
    "    data_dir='fire',\r\n",
    "    file_list='fire/val_list.txt',\r\n",
    "    label_list='fire/labels.txt',\r\n",
    "    transforms=eval_transforms)\r\n",
    "\r\n",
    "# 初始化模型，并进行训练\r\n",
    "# 可使用VisualDL查看训练指标，参考https://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html\r\n",
    "num_classes = len(train_dataset.labels)\r\n",
    "\r\n",
    "# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-ppyolo\r\n",
    "model = pdx.det.PPYOLO(num_classes=num_classes)\r\n",
    "\r\n",
    "# API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train\r\n",
    "# 各参数介绍与调整说明：https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html\r\n",
    "model.train(\r\n",
    "    num_epochs=100,#模型训练迭代的总轮数(模型对训练集全部样本过一遍即为一个epoch)\r\n",
    "    train_dataset=train_dataset,\r\n",
    "    train_batch_size=16,#前向计算一次(即为一个step)所用到的样本数量，跟机器的显存/内存高度相关，batch_size越高，所消耗的显存/内存就越高\r\n",
    "    eval_dataset=eval_dataset,\r\n",
    "    learning_rate=0.000125,#默认优化器的初始学习率\r\n",
    "    lr_decay_epochs=[70, 80 , 90],#后期学习率衰减策略\r\n",
    "    save_dir='output/ppyolo',\r\n",
    "    use_vdl=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 创建一个保存模型的的位置\r\n",
    "!mkdir ~/work/result_model\r\n",
    "!mkdir ~/work/result_model/ppyolo\r\n",
    "! mv ~/output/ppyolo/best_model ~/work/result_model/ppyolo"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 17:20:12 [INFO]\tModel[PPYOLO] loaded.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/cbook/__init__.py:2349: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  if isinstance(obj, collections.Iterator):\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 17:20:13 [INFO]\tThe visualized result is saved as ./visualize_9.jpg\n"
     ]
    }
   ],
   "source": [
    "import paddlex as pdx\r\n",
    "\r\n",
    "test_jpg = './work/test/9.jpg'\r\n",
    "model = pdx.load_model('./work/result_model/ppyolo/best_model')\r\n",
    "# predict接口并未过滤低置信度识别结果，用户根据需求按score值进行过滤\r\n",
    "result = model.predict(test_jpg)\r\n",
    "# 可视化结果存储在./output\r\n",
    "pdx.det.visualize(test_jpg, result, threshold=0.2, save_dir='./')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 17:21:29 [INFO]\tModel[PPYOLO] loaded.\n",
      "9.jpg\n",
      "2021-08-15 17:21:29 [INFO]\tThe visualized result is saved as ./work/test_out/visualize_9.jpg\n",
      "71.jpg\n",
      "2021-08-15 17:21:29 [INFO]\tThe visualized result is saved as ./work/test_out/visualize_71.jpg\n",
      "395.jpg\n",
      "2021-08-15 17:21:29 [INFO]\tThe visualized result is saved as ./work/test_out/visualize_395.jpg\n",
      "73.jpg\n",
      "2021-08-15 17:21:29 [INFO]\tThe visualized result is saved as ./work/test_out/visualize_73.jpg\n",
      "7.jpg\n",
      "2021-08-15 17:21:29 [INFO]\tThe visualized result is saved as ./work/test_out/visualize_7.jpg\n"
     ]
    }
   ],
   "source": [
    "import paddlex as pdx\r\n",
    "import os, sys\r\n",
    "model = pdx.load_model('./work/result_model/ppyolo/best_model')\r\n",
    "path = \"./work/test\"#预测图片文件夹路径\r\n",
    "dirs = os.listdir(path)#获取文件夹中文件列表\r\n",
    "for file_dir in dirs:\r\n",
    "    if file_dir.endswith('jpg'):#判断是否为jpg格式，以免其他非图片格式的影响\r\n",
    "        print(file_dir)\r\n",
    "        test_jpg=os.path.join(path,file_dir)#路径拼接\r\n",
    "        result = model.predict(test_jpg)\r\n",
    "        pdx.det.visualize(test_jpg, result, threshold=0.1, save_dir='./work/test_out')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 模型导出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py:2043: UserWarning: The Attr(force_cpu) of Op(fill_constant) will be deprecated in the future, please use 'device_guard' instead. 'device_guard' has higher priority when they are used at the same time.\n",
      "  \"used at the same time.\" % type)\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/iou_aware.py:64\n",
      "The behavior of expression A * B has been unified with elementwise_mul(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_mul(X, Y, axis=0) instead of A * B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/math_op_patch.py:322: UserWarning: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlex/cv/nets/detection/iou_aware.py:40\n",
      "The behavior of expression A / B has been unified with elementwise_div(X, Y, axis=-1) from Paddle 2.0. If your code works well in the older versions but crashes in this version, try to use elementwise_div(X, Y, axis=0) instead of A / B. This transitional warning will be dropped in the future.\n",
      "  op_type, op_type, EXPRESSION_MAP[method_name]))\n",
      "W0815 17:23:33.880823  5066 device_context.cc:404] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.1, Runtime API Version: 10.1\n",
      "W0815 17:23:33.886114  5066 device_context.cc:422] device: 0, cuDNN Version: 7.6.\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/io.py:2358: UserWarning: This list is not set, Because of Paramerter not found in program. There are: create_parameter_0.w_0 create_parameter_1.w_0 create_parameter_2.w_0 create_parameter_3.w_0 create_parameter_4.w_0 create_parameter_5.w_0 create_parameter_6.w_0 create_parameter_7.w_0 create_parameter_8.w_0 create_parameter_9.w_0 create_parameter_10.w_0 create_parameter_11.w_0 create_parameter_12.w_0 create_parameter_13.w_0 create_parameter_14.w_0 create_parameter_15.w_0 create_parameter_16.w_0 create_parameter_17.w_0 create_parameter_18.w_0 create_parameter_19.w_0 create_parameter_20.w_0 create_parameter_21.w_0 create_parameter_22.w_0 create_parameter_23.w_0 create_parameter_24.w_0 create_parameter_25.w_0 create_parameter_26.w_0 create_parameter_27.w_0 create_parameter_28.w_0 create_parameter_29.w_0 create_parameter_30.w_0 create_parameter_31.w_0 create_parameter_32.w_0 create_parameter_33.w_0 create_parameter_34.w_0 create_parameter_35.w_0 create_parameter_36.w_0 create_parameter_37.w_0 create_parameter_38.w_0 create_parameter_39.w_0 create_parameter_40.w_0 create_parameter_41.w_0 create_parameter_42.w_0 create_parameter_43.w_0 create_parameter_44.w_0 create_parameter_45.w_0 create_parameter_46.w_0 create_parameter_47.w_0\n",
      "  format(\" \".join(unused_para_list)))\n",
      "2021-08-15 17:23:39 [INFO]\tModel[PPYOLO] loaded.\n",
      "2021-08-15 17:23:40 [INFO]\tModel for inference deploy saved in ./work/result_model/ppyolo/inference_model.\n"
     ]
    }
   ],
   "source": [
    "!paddlex --export_inference --model_dir=./work/result_model/ppyolo/best_model --save_dir=./work/result_model/ppyolo/inference_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Name: paddlehub\r\n",
      "Version: 2.1.0\r\n",
      "Summary: A toolkit for managing pretrained models of PaddlePaddle and helping user getting started with transfer learning more efficiently.\r\n",
      "Home-page: https://github.com/PaddlePaddle/PaddleHub\r\n",
      "Author: PaddlePaddle Author\r\n",
      "Author-email: UNKNOWN\r\n",
      "License: Apache 2.0\r\n",
      "Location: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages\r\n",
      "Requires: matplotlib, rarfile, paddle2onnx, tqdm, filelock, paddlenlp, gitpython, easydict, gunicorn, colorama, colorlog, pyyaml, opencv-python, Pillow, packaging, visualdl, flask, numpy, pyzmq\r\n",
      "Required-by: paddlex\r\n"
     ]
    }
   ],
   "source": [
    "!pip show paddlehub"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__model__  model.yml  __params__\r\n"
     ]
    }
   ],
   "source": [
    "%ls ./work/result_model/ppyolo/inference_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/__init__.py:107: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import MutableMapping\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import Iterable, Mapping\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import Sized\n",
      "The converted module is stored in `work/result_model/ppyolo/hub_model`.\n"
     ]
    }
   ],
   "source": [
    "! hub convert --model_dir ./work/result_model/ppyolo/inference_model \\\r\n",
    "              --module_name fire_model \\\r\n",
    "              --module_version 1.0 \\\r\n",
    "              --output_dir work/result_model/ppyolo/hub_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/__init__.py:107: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import MutableMapping\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import Iterable, Mapping\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import Sized\n",
      "Decompress /home/aistudio/work/result_model/ppyolo/hub_model/fire_model.tar.gz\n",
      "[##################################################] 100.00%\n",
      "[2021-08-15 17:27:59,670] [    INFO] - Successfully installed fire_model-1.0\n"
     ]
    }
   ],
   "source": [
    "!hub install ~/work/result_model/ppyolo/hub_model/fire_model.tar.gz"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "#终端运行一下指令\r\n",
    "#hub serving start -m fire_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/ipykernel_launcher.py:9: DeprecationWarning: tostring() is deprecated. Use tobytes() instead.\n",
      "  if __name__ == '__main__':\n",
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/cbook/__init__.py:2366: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  return list(data) if isinstance(data, collections.MappingView) else data\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2021-08-15 18:18:06 [INFO]\tThe visualized result is saved as ./work/hub_test_out/visualize_7.jpg\n"
     ]
    }
   ],
   "source": [
    "import requests\r\n",
    "import json\r\n",
    "import cv2\r\n",
    "import base64\r\n",
    "\r\n",
    "def cv2_to_base64(image):\r\n",
    "    data = cv2.imencode('.jpg', image)[1]\r\n",
    "    return base64.b64encode(data.tostring()).decode('utf8')\r\n",
    "\r\n",
    "if __name__ == '__main__':\r\n",
    "    img_name='9'\r\n",
    "    test_jpg = './work/test/'+img_name+'.jpg'\r\n",
    "    # 获取图片的base64编码格式\r\n",
    "    img1 = cv2_to_base64(cv2.imread(test_jpg))\r\n",
    "    data = {'images': [img1]}\r\n",
    "    # 指定content-type\r\n",
    "    headers = {\"Content-type\": \"application/json\"}\r\n",
    "    # 发送HTTP请求\r\n",
    "    url = \"http://127.0.0.1:8866/predict/fire_model\"\r\n",
    "    r = requests.post(url=url, headers=headers, data=json.dumps(data))\r\n",
    "    pdx.det.visualize(test_jpg, r.json()[\"results\"][0], threshold=0.1, save_dir='./work/hub_test_out')\r\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "%cp ~/.paddlehub/modules/fire_model/serving_client_demo.py ~/work/result_model/ppyolo/hub_model"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 2.1.2 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
