diff --git a/notebooks/unit8/unit8_part2.ipynb b/notebooks/unit8/unit8_part2.ipynb
index b36924a..d7fd26d 100644
--- a/notebooks/unit8/unit8_part2.ipynb
+++ b/notebooks/unit8/unit8_part2.ipynb
@@ -3,8 +3,8 @@
{
"cell_type": "markdown",
"metadata": {
- "id": "view-in-github",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "view-in-github"
},
"source": [
""
@@ -202,11 +202,26 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 1,
"metadata": {
"id": "RJMxkaldwIVx"
},
- "outputs": [],
+ "outputs": [
+ {
+ "ename": "CalledProcessError",
+ "evalue": "Command 'b'# Install ViZDoom deps from \\n# https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux\\n\\napt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \\\\\\nnasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \\\\\\nlibopenal-dev timidity libwildmidi-dev unzip ffmpeg\\n\\n# Boost libraries\\napt-get install libboost-all-dev\\n\\n# Lua binding dependencies\\napt-get install liblua5.1-dev\\n'' returned non-zero exit status 100.",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mCalledProcessError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m get_ipython()\u001b[39m.\u001b[39;49mrun_cell_magic(\u001b[39m'\u001b[39;49m\u001b[39mbash\u001b[39;49m\u001b[39m'\u001b[39;49m, \u001b[39m'\u001b[39;49m\u001b[39m'\u001b[39;49m, \u001b[39m'\u001b[39;49m\u001b[39m# Install ViZDoom deps from \u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m# https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39mapt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \u001b[39;49m\u001b[39m\\\\\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39mnasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \u001b[39;49m\u001b[39m\\\\\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39mlibopenal-dev timidity libwildmidi-dev unzip ffmpeg\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39m# Boost libraries\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39mapt-get install libboost-all-dev\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39m# Lua binding dependencies\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39mapt-get install liblua5.1-dev\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m'\u001b[39;49m)\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/IPython/core/interactiveshell.py:2430\u001b[0m, in \u001b[0;36mInteractiveShell.run_cell_magic\u001b[0;34m(self, magic_name, line, cell)\u001b[0m\n\u001b[1;32m 2428\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mbuiltin_trap:\n\u001b[1;32m 2429\u001b[0m args \u001b[39m=\u001b[39m (magic_arg_s, cell)\n\u001b[0;32m-> 2430\u001b[0m result \u001b[39m=\u001b[39m fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 2432\u001b[0m \u001b[39m# The code below prevents the output from being displayed\u001b[39;00m\n\u001b[1;32m 2433\u001b[0m \u001b[39m# when using magics with decodator @output_can_be_silenced\u001b[39;00m\n\u001b[1;32m 2434\u001b[0m \u001b[39m# when the last Python token in the expression is a ';'.\u001b[39;00m\n\u001b[1;32m 2435\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mgetattr\u001b[39m(fn, magic\u001b[39m.\u001b[39mMAGIC_OUTPUT_CAN_BE_SILENCED, \u001b[39mFalse\u001b[39;00m):\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/IPython/core/magics/script.py:153\u001b[0m, in \u001b[0;36mScriptMagics._make_script_magic..named_script_magic\u001b[0;34m(line, cell)\u001b[0m\n\u001b[1;32m 151\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m 152\u001b[0m line \u001b[39m=\u001b[39m script\n\u001b[0;32m--> 153\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mshebang(line, cell)\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/IPython/core/magics/script.py:305\u001b[0m, in \u001b[0;36mScriptMagics.shebang\u001b[0;34m(self, line, cell)\u001b[0m\n\u001b[1;32m 300\u001b[0m \u001b[39mif\u001b[39;00m args\u001b[39m.\u001b[39mraise_error \u001b[39mand\u001b[39;00m p\u001b[39m.\u001b[39mreturncode \u001b[39m!=\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[1;32m 301\u001b[0m \u001b[39m# If we get here and p.returncode is still None, we must have\u001b[39;00m\n\u001b[1;32m 302\u001b[0m \u001b[39m# killed it but not yet seen its return code. We don't wait for it,\u001b[39;00m\n\u001b[1;32m 303\u001b[0m \u001b[39m# in case it's stuck in uninterruptible sleep. -9 = SIGKILL\u001b[39;00m\n\u001b[1;32m 304\u001b[0m rc \u001b[39m=\u001b[39m p\u001b[39m.\u001b[39mreturncode \u001b[39mor\u001b[39;00m \u001b[39m-\u001b[39m\u001b[39m9\u001b[39m\n\u001b[0;32m--> 305\u001b[0m \u001b[39mraise\u001b[39;00m CalledProcessError(rc, cell)\n",
+ "\u001b[0;31mCalledProcessError\u001b[0m: Command 'b'# Install ViZDoom deps from \\n# https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux\\n\\napt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \\\\\\nnasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \\\\\\nlibopenal-dev timidity libwildmidi-dev unzip ffmpeg\\n\\n# Boost libraries\\napt-get install libboost-all-dev\\n\\n# Lua binding dependencies\\napt-get install liblua5.1-dev\\n'' returned non-zero exit status 100."
+ ]
+ }
+ ],
"source": [
"%%capture\n",
"%%bash\n",
@@ -236,11 +251,162 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 2,
"metadata": {
"id": "bbqfPZnIsvA6"
},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Collecting sample-factory\n",
+ " Downloading sample_factory-2.0.3-py3-none-any.whl (9.0 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.0/9.0 MB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0mm\n",
+ "\u001b[?25hCollecting huggingface-hub<1.0,>=0.10.0\n",
+ " Downloading huggingface_hub-0.12.1-py3-none-any.whl (190 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m190.3/190.3 kB\u001b[0m \u001b[31m19.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting gym<1.0,>=0.26.1\n",
+ " Downloading gym-0.26.2.tar.gz (721 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m721.7/721.7 kB\u001b[0m \u001b[31m26.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Installing build dependencies ... \u001b[?25ldone\n",
+ "\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n",
+ "\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
+ "\u001b[?25hCollecting colorlog\n",
+ " Downloading colorlog-6.7.0-py2.py3-none-any.whl (11 kB)\n",
+ "Collecting filelock\n",
+ " Using cached filelock-3.9.0-py3-none-any.whl (9.7 kB)\n",
+ "Requirement already satisfied: psutil>=5.7.0 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from sample-factory) (5.9.0)\n",
+ "Collecting faster-fifo<2.0,>=1.4.2\n",
+ " Downloading faster-fifo-1.4.2.tar.gz (84 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m84.6/84.6 kB\u001b[0m \u001b[31m7.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Installing build dependencies ... \u001b[?25ldone\n",
+ "\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n",
+ "\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
+ "\u001b[?25hCollecting pyglet\n",
+ " Downloading pyglet-2.0.4-py3-none-any.whl (831 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m831.0/831.0 kB\u001b[0m \u001b[31m39.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: torch!=1.13.0,<2.0,>=1.9 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from sample-factory) (1.13.1)\n",
+ "Collecting threadpoolctl>=2.0.0\n",
+ " Using cached threadpoolctl-3.1.0-py3-none-any.whl (14 kB)\n",
+ "Collecting signal-slot-mp<2.0,>=1.0.3\n",
+ " Downloading signal_slot_mp-1.0.3-py3-none-any.whl (11 kB)\n",
+ "Collecting opencv-python!=3.4.18.65\n",
+ " Downloading opencv_python-4.7.0.72-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (61.8 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m61.8/61.8 MB\u001b[0m \u001b[31m24.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
+ "\u001b[?25hCollecting wandb>=0.12.9\n",
+ " Downloading wandb-0.13.10-py3-none-any.whl (2.0 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m29.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: numpy<2.0,>=1.18.1 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from sample-factory) (1.23.5)\n",
+ "Collecting tensorboard>=1.15.0\n",
+ " Using cached tensorboard-2.12.0-py3-none-any.whl (5.6 MB)\n",
+ "Collecting tensorboardx>=2.0\n",
+ " Downloading tensorboardX-2.6-py2.py3-none-any.whl (114 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m114.5/114.5 kB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hRequirement already satisfied: setuptools>=45.2.0 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from faster-fifo<2.0,>=1.4.2->sample-factory) (65.6.3)\n",
+ "Collecting cython>=0.29\n",
+ " Using cached Cython-0.29.33-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (1.9 MB)\n",
+ "Collecting gym-notices>=0.0.4\n",
+ " Downloading gym_notices-0.0.8-py3-none-any.whl (3.0 kB)\n",
+ "Collecting cloudpickle>=1.2.0\n",
+ " Downloading cloudpickle-2.2.1-py3-none-any.whl (25 kB)\n",
+ "Collecting tqdm>=4.42.1\n",
+ " Using cached tqdm-4.64.1-py2.py3-none-any.whl (78 kB)\n",
+ "Collecting pyyaml>=5.1\n",
+ " Using cached PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (682 kB)\n",
+ "Requirement already satisfied: packaging>=20.9 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.10.0->sample-factory) (22.0)\n",
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.10.0->sample-factory) (4.4.0)\n",
+ "Requirement already satisfied: requests in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.10.0->sample-factory) (2.28.1)\n",
+ "Collecting absl-py>=0.4\n",
+ " Using cached absl_py-1.4.0-py3-none-any.whl (126 kB)\n",
+ "Collecting tensorboard-plugin-wit>=1.6.0\n",
+ " Using cached tensorboard_plugin_wit-1.8.1-py3-none-any.whl (781 kB)\n",
+ "Collecting markdown>=2.6.8\n",
+ " Using cached Markdown-3.4.1-py3-none-any.whl (93 kB)\n",
+ "Requirement already satisfied: wheel>=0.26 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from tensorboard>=1.15.0->sample-factory) (0.38.4)\n",
+ "Collecting google-auth-oauthlib<0.5,>=0.4.1\n",
+ " Using cached google_auth_oauthlib-0.4.6-py2.py3-none-any.whl (18 kB)\n",
+ "Collecting google-auth<3,>=1.6.3\n",
+ " Downloading google_auth-2.16.1-py2.py3-none-any.whl (177 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m177.2/177.2 kB\u001b[0m \u001b[31m19.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting tensorboard-data-server<0.8.0,>=0.7.0\n",
+ " Using cached tensorboard_data_server-0.7.0-py3-none-manylinux2014_x86_64.whl (6.6 MB)\n",
+ "Collecting grpcio>=1.48.2\n",
+ " Downloading grpcio-1.51.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.8 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.8/4.8 MB\u001b[0m \u001b[31m28.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n",
+ "\u001b[?25hCollecting werkzeug>=1.0.1\n",
+ " Downloading Werkzeug-2.2.3-py3-none-any.whl (233 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m233.6/233.6 kB\u001b[0m \u001b[31m20.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting protobuf>=3.19.6\n",
+ " Downloading protobuf-4.22.0-cp37-abi3-manylinux2014_x86_64.whl (302 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m302.4/302.4 kB\u001b[0m \u001b[31m23.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25h Using cached protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n",
+ "Collecting GitPython>=1.0.0\n",
+ " Downloading GitPython-3.1.31-py3-none-any.whl (184 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m184.3/184.3 kB\u001b[0m \u001b[31m18.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting docker-pycreds>=0.4.0\n",
+ " Using cached docker_pycreds-0.4.0-py2.py3-none-any.whl (9.0 kB)\n",
+ "Collecting pathtools\n",
+ " Using cached pathtools-0.1.2-py3-none-any.whl\n",
+ "Collecting sentry-sdk>=1.0.0\n",
+ " Downloading sentry_sdk-1.15.0-py2.py3-none-any.whl (181 kB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m181.3/181.3 kB\u001b[0m \u001b[31m20.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
+ "\u001b[?25hCollecting setproctitle\n",
+ " Using cached setproctitle-1.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30 kB)\n",
+ "Collecting appdirs>=1.4.3\n",
+ " Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB)\n",
+ "Collecting Click!=8.0.0,>=7.0\n",
+ " Using cached click-8.1.3-py3-none-any.whl (96 kB)\n",
+ "Requirement already satisfied: six>=1.4.0 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from docker-pycreds>=0.4.0->wandb>=0.12.9->sample-factory) (1.16.0)\n",
+ "Collecting gitdb<5,>=4.0.1\n",
+ " Using cached gitdb-4.0.10-py3-none-any.whl (62 kB)\n",
+ "Collecting pyasn1-modules>=0.2.1\n",
+ " Using cached pyasn1_modules-0.2.8-py2.py3-none-any.whl (155 kB)\n",
+ "Collecting rsa<5,>=3.1.4\n",
+ " Using cached rsa-4.9-py3-none-any.whl (34 kB)\n",
+ "Collecting cachetools<6.0,>=2.0.0\n",
+ " Using cached cachetools-5.3.0-py3-none-any.whl (9.3 kB)\n",
+ "Collecting requests-oauthlib>=0.7.0\n",
+ " Using cached requests_oauthlib-1.3.1-py2.py3-none-any.whl (23 kB)\n",
+ "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (1.26.14)\n",
+ "Requirement already satisfied: certifi>=2017.4.17 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (2022.12.7)\n",
+ "Requirement already satisfied: charset-normalizer<3,>=2 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (2.0.4)\n",
+ "Requirement already satisfied: idna<4,>=2.5 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (3.4)\n",
+ "Collecting MarkupSafe>=2.1.1\n",
+ " Using cached MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (25 kB)\n",
+ "Collecting smmap<6,>=3.0.1\n",
+ " Using cached smmap-5.0.0-py3-none-any.whl (24 kB)\n",
+ "Collecting pyasn1<0.5.0,>=0.4.6\n",
+ " Using cached pyasn1-0.4.8-py2.py3-none-any.whl (77 kB)\n",
+ "Collecting oauthlib>=3.0.0\n",
+ " Using cached oauthlib-3.2.2-py3-none-any.whl (151 kB)\n",
+ "Building wheels for collected packages: faster-fifo, gym\n",
+ " Building wheel for faster-fifo (pyproject.toml) ... \u001b[?25ldone\n",
+ "\u001b[?25h Created wheel for faster-fifo: filename=faster_fifo-1.4.2-cp310-cp310-linux_x86_64.whl size=78864 sha256=db802db0bb9e4e639957e0cc929a5055871d73dac2a78aab10dd613331d4e086\n",
+ " Stored in directory: /home/chqma/.cache/pip/wheels/46/57/35/44590621055121fe1a2f1ae60846e531621498f6d6e48c8975\n",
+ " Building wheel for gym (pyproject.toml) ... \u001b[?25ldone\n",
+ "\u001b[?25h Created wheel for gym: filename=gym-0.26.2-py3-none-any.whl size=827634 sha256=2ecaeda2a512edc3ad62eb999df1ba486447df53cdcd38d3cd5acc3b0b013d44\n",
+ " Stored in directory: /home/chqma/.cache/pip/wheels/ae/5f/67/64914473eb34e9ba89dbc7eefe7e9be8f6673fbc6f0273b29f\n",
+ "Successfully built faster-fifo gym\n",
+ "Installing collected packages: tensorboard-plugin-wit, pyglet, pyasn1, pathtools, gym-notices, appdirs, tqdm, threadpoolctl, tensorboard-data-server, smmap, setproctitle, sentry-sdk, rsa, pyyaml, pyasn1-modules, protobuf, opencv-python, oauthlib, MarkupSafe, markdown, grpcio, filelock, docker-pycreds, cython, colorlog, cloudpickle, Click, cachetools, absl-py, werkzeug, tensorboardx, requests-oauthlib, huggingface-hub, gym, google-auth, gitdb, faster-fifo, signal-slot-mp, google-auth-oauthlib, GitPython, wandb, tensorboard, sample-factory\n",
+ "Successfully installed Click-8.1.3 GitPython-3.1.31 MarkupSafe-2.1.2 absl-py-1.4.0 appdirs-1.4.4 cachetools-5.3.0 cloudpickle-2.2.1 colorlog-6.7.0 cython-0.29.33 docker-pycreds-0.4.0 faster-fifo-1.4.2 filelock-3.9.0 gitdb-4.0.10 google-auth-2.16.1 google-auth-oauthlib-0.4.6 grpcio-1.51.3 gym-0.26.2 gym-notices-0.0.8 huggingface-hub-0.12.1 markdown-3.4.1 oauthlib-3.2.2 opencv-python-4.7.0.72 pathtools-0.1.2 protobuf-3.20.3 pyasn1-0.4.8 pyasn1-modules-0.2.8 pyglet-2.0.4 pyyaml-6.0 requests-oauthlib-1.3.1 rsa-4.9 sample-factory-2.0.3 sentry-sdk-1.15.0 setproctitle-1.3.2 signal-slot-mp-1.0.3 smmap-5.0.0 tensorboard-2.12.0 tensorboard-data-server-0.7.0 tensorboard-plugin-wit-1.8.1 tensorboardx-2.6 threadpoolctl-3.1.0 tqdm-4.64.1 wandb-0.13.10 werkzeug-2.2.3\n",
+ "Collecting vizdoom\n",
+ " Downloading vizdoom-1.1.14.tar.gz (15.7 MB)\n",
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
+ "\u001b[?25h Installing build dependencies ... \u001b[?25ldone\n",
+ "\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n",
+ "\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
+ "\u001b[?25hRequirement already satisfied: numpy in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from vizdoom) (1.23.5)\n",
+ "Building wheels for collected packages: vizdoom\n",
+ " Building wheel for vizdoom (pyproject.toml) ... \u001b[?25ldone\n",
+ "\u001b[?25h Created wheel for vizdoom: filename=vizdoom-1.1.14-cp310-cp310-linux_x86_64.whl size=14192416 sha256=732a3631973e8da574807abc6da03d63e48238fdf6024c6f34c11e5c4dcf2056\n",
+ " Stored in directory: /home/chqma/.cache/pip/wheels/a4/13/80/6927dae582137aef0836f48491051c797a5de184891b8ca6c5\n",
+ "Successfully built vizdoom\n",
+ "Installing collected packages: vizdoom\n",
+ "Successfully installed vizdoom-1.1.14\n"
+ ]
+ }
+ ],
"source": [
"# install python libraries\n",
"!pip install sample-factory\n",
@@ -258,24 +424,24 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 6,
"metadata": {
"id": "bCgZbeiavcDU"
},
"outputs": [],
"source": [
"import functools\n",
- "\n",
+ "from encoder import make_vizdoom_encoder\n",
"from sample_factory.algo.utils.context import global_model_factory\n",
"from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args\n",
"from sample_factory.envs.env_utils import register_env\n",
"from sample_factory.train import run_rl\n",
"\n",
- "from sf_examples.vizdoom.doom.doom_model import make_vizdoom_encoder\n",
"from sf_examples.vizdoom.doom.doom_params import add_doom_env_args, doom_override_defaults\n",
"from sf_examples.vizdoom.doom.doom_utils import DOOM_ENVS, make_doom_env_from_spec\n",
"\n",
"\n",
+ "\n",
"# Registers all the ViZDoom environments\n",
"def register_vizdoom_envs():\n",
" for env_spec in DOOM_ENVS:\n",
@@ -346,11 +512,88 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "env: CUDA_VISIBLE_DEVICES=1\n"
+ ]
+ }
+ ],
+ "source": [
+ "%env CUDA_VISIBLE_DEVICES=1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
"metadata": {
"id": "y_TeicMvyKHP"
},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33m[2023-02-24 08:05:26,614][795538] Environment doom_basic already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,616][795538] Environment doom_two_colors_easy already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,617][795538] Environment doom_two_colors_hard already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,619][795538] Environment doom_dm already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,619][795538] Environment doom_dwango5 already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,620][795538] Environment doom_my_way_home_flat_actions already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,621][795538] Environment doom_defend_the_center_flat_actions already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,621][795538] Environment doom_my_way_home already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,623][795538] Environment doom_deadly_corridor already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,623][795538] Environment doom_defend_the_center already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,624][795538] Environment doom_defend_the_line already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,624][795538] Environment doom_health_gathering already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,625][795538] Environment doom_health_gathering_supreme already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,625][795538] Environment doom_battle already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,626][795538] Environment doom_battle2 already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,626][795538] Environment doom_duel_bots already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,627][795538] Environment doom_deathmatch_bots already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,628][795538] Environment doom_duel already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,628][795538] Environment doom_deathmatch_full already registered, overwriting...\u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,629][795538] Environment doom_benchmark already registered, overwriting...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 08:05:26,629][795538] register_encoder_factory: \u001b[0m\n",
+ "\u001b[33m[2023-02-24 08:05:26,640][795538] Loading existing experiment configuration from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json\u001b[0m\n",
+ "\u001b[36m[2023-02-24 08:05:26,640][795538] Overriding arg 'train_for_env_steps' with value 40000000 passed from command line\u001b[0m\n",
+ "\u001b[36m[2023-02-24 08:05:26,645][795538] Experiment dir /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment already exists!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 08:05:26,646][795538] Resuming existing experiment from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 08:05:26,646][795538] Weights and Biases integration disabled\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 08:05:26,649][795538] Environment var CUDA_VISIBLE_DEVICES is 1\u001b[0m\n",
+ "Traceback (most recent call last):\n",
+ " File \"\", line 1, in \n",
+ " File \"/home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/spawn.py\", line 116, in spawn_main\n",
+ " exitcode = _main(fd, parent_sentinel)\n",
+ " File \"/home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/spawn.py\", line 126, in _main\n",
+ " self = reduction.pickle.load(from_parent)\n",
+ "AttributeError: Can't get attribute 'make_vizdoom_encoder' on \n"
+ ]
+ },
+ {
+ "ename": "KeyboardInterrupt",
+ "evalue": "",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[5], line 9\u001b[0m\n\u001b[1;32m 6\u001b[0m env \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mdoom_health_gathering_supreme\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 7\u001b[0m cfg \u001b[39m=\u001b[39m parse_vizdoom_cfg(argv\u001b[39m=\u001b[39m[\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m--env=\u001b[39m\u001b[39m{\u001b[39;00menv\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m--num_workers=8\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m--num_envs_per_worker=4\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m--train_for_env_steps=40000000\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m----> 9\u001b[0m status \u001b[39m=\u001b[39m run_rl(cfg)\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/train.py:37\u001b[0m, in \u001b[0;36mrun_rl\u001b[0;34m(cfg)\u001b[0m\n\u001b[1;32m 32\u001b[0m cfg, runner \u001b[39m=\u001b[39m make_runner(cfg)\n\u001b[1;32m 34\u001b[0m \u001b[39m# here we can register additional message or summary handlers\u001b[39;00m\n\u001b[1;32m 35\u001b[0m \u001b[39m# see sf_examples/dmlab/train_dmlab.py for example\u001b[39;00m\n\u001b[0;32m---> 37\u001b[0m status \u001b[39m=\u001b[39m runner\u001b[39m.\u001b[39;49minit()\n\u001b[1;32m 38\u001b[0m \u001b[39mif\u001b[39;00m status \u001b[39m==\u001b[39m ExperimentStatus\u001b[39m.\u001b[39mSUCCESS:\n\u001b[1;32m 39\u001b[0m status \u001b[39m=\u001b[39m runner\u001b[39m.\u001b[39mrun()\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/algo/runners/runner_parallel.py:21\u001b[0m, in \u001b[0;36mParallelRunner.init\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39minit\u001b[39m(\u001b[39mself\u001b[39m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m StatusCode:\n\u001b[0;32m---> 21\u001b[0m status \u001b[39m=\u001b[39m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49minit()\n\u001b[1;32m 22\u001b[0m \u001b[39mif\u001b[39;00m status \u001b[39m!=\u001b[39m ExperimentStatus\u001b[39m.\u001b[39mSUCCESS:\n\u001b[1;32m 23\u001b[0m \u001b[39mreturn\u001b[39;00m status\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/algo/runners/runner.py:542\u001b[0m, in \u001b[0;36mRunner.init\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 540\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39minit\u001b[39m(\u001b[39mself\u001b[39m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m StatusCode:\n\u001b[1;32m 541\u001b[0m set_global_cuda_envvars(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcfg)\n\u001b[0;32m--> 542\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39menv_info \u001b[39m=\u001b[39m obtain_env_info_in_a_separate_process(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mcfg)\n\u001b[1;32m 544\u001b[0m \u001b[39mfor\u001b[39;00m policy_id \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcfg\u001b[39m.\u001b[39mnum_policies):\n\u001b[1;32m 545\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mreward_shaping[policy_id] \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39menv_info\u001b[39m.\u001b[39mreward_shaping_scheme\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/algo/utils/env_info.py:127\u001b[0m, in \u001b[0;36mobtain_env_info_in_a_separate_process\u001b[0;34m(cfg)\u001b[0m\n\u001b[1;32m 124\u001b[0m p \u001b[39m=\u001b[39m ctx\u001b[39m.\u001b[39mProcess(target\u001b[39m=\u001b[39mspawn_tmp_env_and_get_info, args\u001b[39m=\u001b[39m(sf_context, q, cfg))\n\u001b[1;32m 125\u001b[0m p\u001b[39m.\u001b[39mstart()\n\u001b[0;32m--> 127\u001b[0m env_info \u001b[39m=\u001b[39m q\u001b[39m.\u001b[39;49mget()\n\u001b[1;32m 128\u001b[0m p\u001b[39m.\u001b[39mjoin()\n\u001b[1;32m 130\u001b[0m \u001b[39mif\u001b[39;00m cfg\u001b[39m.\u001b[39muse_env_info_cache:\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/queues.py:103\u001b[0m, in \u001b[0;36mQueue.get\u001b[0;34m(self, block, timeout)\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[39mif\u001b[39;00m block \u001b[39mand\u001b[39;00m timeout \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 102\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_rlock:\n\u001b[0;32m--> 103\u001b[0m res \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_recv_bytes()\n\u001b[1;32m 104\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_sem\u001b[39m.\u001b[39mrelease()\n\u001b[1;32m 105\u001b[0m \u001b[39melse\u001b[39;00m:\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/connection.py:216\u001b[0m, in \u001b[0;36m_ConnectionBase.recv_bytes\u001b[0;34m(self, maxlength)\u001b[0m\n\u001b[1;32m 214\u001b[0m \u001b[39mif\u001b[39;00m maxlength \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m maxlength \u001b[39m<\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[1;32m 215\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\u001b[39m\"\u001b[39m\u001b[39mnegative maxlength\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m--> 216\u001b[0m buf \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_recv_bytes(maxlength)\n\u001b[1;32m 217\u001b[0m \u001b[39mif\u001b[39;00m buf \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 218\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_bad_message_length()\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/connection.py:414\u001b[0m, in \u001b[0;36mConnection._recv_bytes\u001b[0;34m(self, maxsize)\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_recv_bytes\u001b[39m(\u001b[39mself\u001b[39m, maxsize\u001b[39m=\u001b[39m\u001b[39mNone\u001b[39;00m):\n\u001b[0;32m--> 414\u001b[0m buf \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_recv(\u001b[39m4\u001b[39;49m)\n\u001b[1;32m 415\u001b[0m size, \u001b[39m=\u001b[39m struct\u001b[39m.\u001b[39munpack(\u001b[39m\"\u001b[39m\u001b[39m!i\u001b[39m\u001b[39m\"\u001b[39m, buf\u001b[39m.\u001b[39mgetvalue())\n\u001b[1;32m 416\u001b[0m \u001b[39mif\u001b[39;00m size \u001b[39m==\u001b[39m \u001b[39m-\u001b[39m\u001b[39m1\u001b[39m:\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/connection.py:379\u001b[0m, in \u001b[0;36mConnection._recv\u001b[0;34m(self, size, read)\u001b[0m\n\u001b[1;32m 377\u001b[0m remaining \u001b[39m=\u001b[39m size\n\u001b[1;32m 378\u001b[0m \u001b[39mwhile\u001b[39;00m remaining \u001b[39m>\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[0;32m--> 379\u001b[0m chunk \u001b[39m=\u001b[39m read(handle, remaining)\n\u001b[1;32m 380\u001b[0m n \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39m(chunk)\n\u001b[1;32m 381\u001b[0m \u001b[39mif\u001b[39;00m n \u001b[39m==\u001b[39m \u001b[39m0\u001b[39m:\n",
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
+ ]
+ }
+ ],
"source": [
"## Start the training, this should take around 15 minutes\n",
"register_vizdoom_components()\n",
@@ -358,7 +601,7 @@
"# The scenario we train on today is health gathering\n",
"# other scenarios include \"doom_basic\", \"doom_two_colors_easy\", \"doom_dm\", \"doom_dwango5\", \"doom_my_way_home\", \"doom_deadly_corridor\", \"doom_defend_the_center\", \"doom_defend_the_line\"\n",
"env = \"doom_health_gathering_supreme\"\n",
- "cfg = parse_vizdoom_cfg(argv=[f\"--env={env}\", \"--num_workers=8\", \"--num_envs_per_worker=4\", \"--train_for_env_steps=4000000\"])\n",
+ "cfg = parse_vizdoom_cfg(argv=[f\"--env={env}\", \"--num_workers=8\", \"--num_envs_per_worker=4\", \"--train_for_env_steps=40000000\"])\n",
"\n",
"status = run_rl(cfg)"
]
@@ -416,12 +659,12 @@
},
{
"cell_type": "markdown",
- "source": [
- "The agent has learned something, but its performance could be better. We would clearly need to train for longer. But let's upload this model to the Hub."
- ],
"metadata": {
"id": "2A4pf_1VwPqR"
- }
+ },
+ "source": [
+ "The agent has learned something, but its performance could be better. We would clearly need to train for longer. But let's upload this model to the Hub."
+ ]
},
{
"cell_type": "markdown",
@@ -464,11 +707,28 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 6,
"metadata": {
"id": "GoQm_jYSOts0"
},
- "outputs": [],
+ "outputs": [
+ {
+ "ename": "ImportError",
+ "evalue": "The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the `ipywidgets` module: `pip install ipywidgets`.",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/huggingface_hub/_login.py:188\u001b[0m, in \u001b[0;36mnotebook_login\u001b[0;34m()\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 188\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mipywidgets\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mwidgets\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mwidgets\u001b[39;00m \u001b[39m# type: ignore\u001b[39;00m\n\u001b[1;32m 189\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mIPython\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdisplay\u001b[39;00m \u001b[39mimport\u001b[39;00m clear_output, display \u001b[39m# type: ignore\u001b[39;00m\n",
+ "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'ipywidgets'",
+ "\nDuring handling of the above exception, another exception occurred:\n",
+ "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[6], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mhuggingface_hub\u001b[39;00m \u001b[39mimport\u001b[39;00m notebook_login\n\u001b[0;32m----> 2\u001b[0m notebook_login()\n\u001b[1;32m 3\u001b[0m get_ipython()\u001b[39m.\u001b[39msystem(\u001b[39m'\u001b[39m\u001b[39mgit config --global credential.helper store\u001b[39m\u001b[39m'\u001b[39m)\n",
+ "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/huggingface_hub/_login.py:191\u001b[0m, in \u001b[0;36mnotebook_login\u001b[0;34m()\u001b[0m\n\u001b[1;32m 189\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mIPython\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdisplay\u001b[39;00m \u001b[39mimport\u001b[39;00m clear_output, display \u001b[39m# type: ignore\u001b[39;00m\n\u001b[1;32m 190\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mImportError\u001b[39;00m:\n\u001b[0;32m--> 191\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mImportError\u001b[39;00m(\n\u001b[1;32m 192\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mThe `notebook_login` function can only be used in a notebook (Jupyter or\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 193\u001b[0m \u001b[39m\"\u001b[39m\u001b[39m Colab) and you need the `ipywidgets` module: `pip install ipywidgets`.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 194\u001b[0m )\n\u001b[1;32m 196\u001b[0m box_layout \u001b[39m=\u001b[39m widgets\u001b[39m.\u001b[39mLayout(\n\u001b[1;32m 197\u001b[0m display\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mflex\u001b[39m\u001b[39m\"\u001b[39m, flex_flow\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mcolumn\u001b[39m\u001b[39m\"\u001b[39m, align_items\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mcenter\u001b[39m\u001b[39m\"\u001b[39m, width\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m50\u001b[39m\u001b[39m%\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 198\u001b[0m )\n\u001b[1;32m 200\u001b[0m token_widget \u001b[39m=\u001b[39m widgets\u001b[39m.\u001b[39mPassword(description\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mToken:\u001b[39m\u001b[39m\"\u001b[39m)\n",
+ "\u001b[0;31mImportError\u001b[0m: The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the `ipywidgets` module: `pip install ipywidgets`."
+ ]
+ }
+ ],
"source": [
"from huggingface_hub import notebook_login\n",
"notebook_login()\n",
@@ -477,15 +737,231 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 8,
"metadata": {
"id": "sEawW_i0OvJV"
},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33m[2023-02-24 07:58:39,896][784615] Loading existing experiment configuration from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,896][784615] Overriding arg 'num_workers' with value 1 passed from command line\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,897][784615] Adding new argument 'no_render'=True that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,897][784615] Adding new argument 'save_video'=True that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,898][784615] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,898][784615] Adding new argument 'video_name'=None that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,899][784615] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,899][784615] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,900][784615] Adding new argument 'push_to_hub'=True that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,900][784615] Adding new argument 'hf_repository'='chqmatteo/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,900][784615] Adding new argument 'policy_index'=0 that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,901][784615] Adding new argument 'eval_deterministic'=False that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,901][784615] Adding new argument 'train_script'=None that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,902][784615] Adding new argument 'enjoy_script'=None that is not in the saved config file!\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,902][784615] Using frameskip 1 and render_action_repeat=4 for evaluation\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,911][784615] RunningMeanStd input shape: (3, 72, 128)\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,912][784615] RunningMeanStd input shape: (1,)\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,919][784615] ConvEncoder: input_channels=3\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,943][784615] Conv encoder output size: 512\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:39,944][784615] Policy head output size: 512\u001b[0m\n",
+ "\u001b[33m[2023-02-24 07:58:39,980][784615] Loading state from checkpoint /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000268_1097728.pth...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,400][784615] Num frames 100...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,470][784615] Num frames 200...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,530][784615] Num frames 300...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,596][784615] Num frames 400...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:40,684][784615] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:40,685][784615] Avg episode reward: 5.480, avg true_objective: 4.480\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,719][784615] Num frames 500...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,787][784615] Num frames 600...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,850][784615] Num frames 700...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:40,918][784615] Num frames 800...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,005][784615] Num frames 900...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:41,078][784615] Avg episode rewards: #0: 6.640, true rewards: #0: 4.640\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:41,079][784615] Avg episode reward: 6.640, avg true_objective: 4.640\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,129][784615] Num frames 1000...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,202][784615] Num frames 1100...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,276][784615] Num frames 1200...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,360][784615] Num frames 1300...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,429][784615] Num frames 1400...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,493][784615] Num frames 1500...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,554][784615] Num frames 1600...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:41,667][784615] Avg episode rewards: #0: 8.653, true rewards: #0: 5.653\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:41,668][784615] Avg episode reward: 8.653, avg true_objective: 5.653\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,673][784615] Num frames 1700...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,734][784615] Num frames 1800...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,791][784615] Num frames 1900...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,848][784615] Num frames 2000...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,905][784615] Num frames 2100...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:41,961][784615] Num frames 2200...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:42,055][784615] Avg episode rewards: #0: 8.680, true rewards: #0: 5.680\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:42,056][784615] Avg episode reward: 8.680, avg true_objective: 5.680\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,077][784615] Num frames 2300...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,141][784615] Num frames 2400...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,203][784615] Num frames 2500...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,260][784615] Num frames 2600...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:42,346][784615] Avg episode rewards: #0: 7.712, true rewards: #0: 5.312\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:42,348][784615] Avg episode reward: 7.712, avg true_objective: 5.312\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,385][784615] Num frames 2700...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,449][784615] Num frames 2800...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,506][784615] Num frames 2900...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,563][784615] Num frames 3000...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,621][784615] Num frames 3100...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,688][784615] Num frames 3200...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:42,788][784615] Avg episode rewards: #0: 7.940, true rewards: #0: 5.440\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:42,788][784615] Avg episode reward: 7.940, avg true_objective: 5.440\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,817][784615] Num frames 3300...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,890][784615] Num frames 3400...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:42,964][784615] Num frames 3500...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,040][784615] Num frames 3600...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:43,128][784615] Avg episode rewards: #0: 7.354, true rewards: #0: 5.211\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:43,130][784615] Avg episode reward: 7.354, avg true_objective: 5.211\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,174][784615] Num frames 3700...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,248][784615] Num frames 3800...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,316][784615] Num frames 3900...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,383][784615] Num frames 4000...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:43,475][784615] Avg episode rewards: #0: 7.205, true rewards: #0: 5.080\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:43,476][784615] Avg episode reward: 7.205, avg true_objective: 5.080\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,498][784615] Num frames 4100...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,556][784615] Num frames 4200...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,614][784615] Num frames 4300...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,670][784615] Num frames 4400...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,727][784615] Num frames 4500...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,790][784615] Num frames 4600...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:43,891][784615] Avg episode rewards: #0: 7.413, true rewards: #0: 5.191\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:43,891][784615] Avg episode reward: 7.413, avg true_objective: 5.191\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,912][784615] Num frames 4700...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:43,982][784615] Num frames 4800...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:44,049][784615] Num frames 4900...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:44,117][784615] Num frames 5000...\u001b[0m\n",
+ "\u001b[36m[2023-02-24 07:58:44,184][784615] Num frames 5100...\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:44,250][784615] Avg episode rewards: #0: 7.220, true rewards: #0: 5.120\u001b[0m\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:58:44,250][784615] Avg episode reward: 7.220, avg true_objective: 5.120\u001b[0m\n",
+ "ffmpeg version 4.3 Copyright (c) 2000-2020 the FFmpeg developers\n",
+ " built with gcc 7.3.0 (crosstool-NG 1.23.0.449-a04d0)\n",
+ " configuration: --prefix=/opt/conda/conda-bld/ffmpeg_1597178665428/_h_env_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placeh --cc=/opt/conda/conda-bld/ffmpeg_1597178665428/_build_env/bin/x86_64-conda_cos6-linux-gnu-cc --disable-doc --disable-openssl --enable-avresample --enable-gnutls --enable-hardcoded-tables --enable-libfreetype --enable-libopenh264 --enable-pic --enable-pthreads --enable-shared --disable-static --enable-version3 --enable-zlib --enable-libmp3lame\n",
+ " libavutil 56. 51.100 / 56. 51.100\n",
+ " libavcodec 58. 91.100 / 58. 91.100\n",
+ " libavformat 58. 45.100 / 58. 45.100\n",
+ " libavdevice 58. 10.100 / 58. 10.100\n",
+ " libavfilter 7. 85.100 / 7. 85.100\n",
+ " libavresample 4. 0. 0 / 4. 0. 0\n",
+ " libswscale 5. 7.100 / 5. 7.100\n",
+ " libswresample 3. 7.100 / 3. 7.100\n",
+ "Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '/tmp/sf2_chqma/replay.mp4':\n",
+ " Metadata:\n",
+ " major_brand : isom\n",
+ " minor_version : 512\n",
+ " compatible_brands: isomiso2mp41\n",
+ " encoder : Lavf59.27.100\n",
+ " Duration: 00:02:26.57, start: 0.000000, bitrate: 1521 kb/s\n",
+ " Stream #0:0(und): Video: mpeg4 (Simple Profile) (mp4v / 0x7634706D), yuv420p, 240x180 [SAR 1:1 DAR 4:3], 1519 kb/s, 35 fps, 35 tbr, 17920 tbn, 35 tbc (default)\n",
+ " Metadata:\n",
+ " handler_name : VideoHandler\n",
+ "Unknown encoder 'libx264'\n",
+ "\u001b[36m[2023-02-24 07:58:46,584][784615] Replay video saved to /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!\u001b[0m\n",
+ "/home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/huggingface_hub/_commit_api.py:493: UserWarning: About to commit an empty file: 'git.diff'. Are you sure this is intended?\n",
+ " warnings.warn(\n",
+ "\n",
+ "\u001b[A\n",
+ "\n",
+ "best_000000254_1040384_reward_6.010.pth: 0%| | 0.00/34.9M [00:00, ?B/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 0%| | 123k/34.9M [00:00<01:35, 364kB/s] \n",
+ "best_000000254_1040384_reward_6.010.pth: 1%| | 279k/34.9M [00:00<01:00, 576kB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 4%|▍ | 1.42M/34.9M [00:00<00:10, 3.29MB/s]\n",
+ "events.out.tfevents.1677225358.pop-os: 100%|██████████| 72.8k/72.8k [00:01<00:00, 65.3kB/s]s]\n",
+ "\n",
+ "\n",
+ "\u001b[A\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 11%|█ | 3.80M/34.9M [00:01<00:11, 2.67MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 13%|█▎ | 4.55M/34.9M [00:02<00:11, 2.55MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 15%|█▌ | 5.37M/34.9M [00:02<00:13, 2.27MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 21%|██ | 7.19M/34.9M [00:03<00:11, 2.47MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 23%|██▎ | 7.95M/34.9M [00:03<00:11, 2.41MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 27%|██▋ | 9.45M/34.9M [00:04<00:10, 2.32MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 29%|██▉ | 10.2M/34.9M [00:04<00:10, 2.30MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 34%|███▎ | 11.8M/34.9M [00:05<00:09, 2.33MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 36%|███▌ | 12.6M/34.9M [00:05<00:09, 2.35MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 38%|███▊ | 13.4M/34.9M [00:05<00:09, 2.36MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 41%|████ | 14.2M/34.9M [00:06<00:08, 2.34MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 43%|████▎ | 14.9M/34.9M [00:06<00:08, 2.33MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 47%|████▋ | 16.4M/34.9M [00:07<00:08, 2.28MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 49%|████▉ | 17.2M/34.9M [00:07<00:07, 2.29MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 51%|█████▏ | 17.9M/34.9M [00:07<00:07, 2.27MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 53%|█████▎ | 18.7M/34.9M [00:08<00:07, 2.27MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 58%|█████▊ | 20.2M/34.9M [00:08<00:06, 2.24MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 60%|█████▉ | 20.9M/34.9M [00:09<00:06, 2.22MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 62%|██████▏ | 21.8M/34.9M [00:09<00:06, 2.20MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 64%|██████▍ | 22.5M/34.9M [00:10<00:05, 2.09MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 67%|██████▋ | 23.3M/34.9M [00:10<00:05, 2.04MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 69%|██████▉ | 24.0M/34.9M [00:10<00:05, 1.98MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 71%|███████ | 24.8M/34.9M [00:11<00:05, 1.94MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 73%|███████▎ | 25.5M/34.9M [00:11<00:04, 1.90MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 75%|███████▌ | 26.3M/34.9M [00:12<00:04, 1.84MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 77%|███████▋ | 27.0M/34.9M [00:12<00:04, 1.81MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 80%|███████▉ | 27.8M/34.9M [00:12<00:03, 1.79MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 82%|████████▏ | 28.5M/34.9M [00:13<00:03, 1.76MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 84%|████████▍ | 29.3M/34.9M [00:13<00:03, 1.75MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 86%|████████▌ | 30.0M/34.9M [00:14<00:02, 1.74MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 88%|████████▊ | 30.8M/34.9M [00:14<00:02, 1.73MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 90%|█████████ | 31.6M/34.9M [00:15<00:01, 1.72MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 93%|█████████▎| 32.3M/34.9M [00:15<00:01, 1.71MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 95%|█████████▍| 33.1M/34.9M [00:16<00:01, 1.70MB/s]\n",
+ "best_000000254_1040384_reward_6.010.pth: 97%|█████████▋| 33.8M/34.9M [00:16<00:00, 1.70MB/s]\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 99%|█████████▉| 34.6M/34.9M [00:16<00:00, 1.70MB/s]\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "best_000000254_1040384_reward_6.010.pth: 100%|██████████| 34.9M/34.9M [00:18<00:00, 1.89MB/s]\n",
+ "\n",
+ "\n",
+ "\u001b[A\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "\u001b[A\n",
+ "checkpoint_000000268_1097728.pth: 100%|██████████| 34.9M/34.9M [00:23<00:00, 1.49MB/s]\n",
+ "\n",
+ "\n",
+ "Upload 3 LFS files: 100%|██████████| 3/3 [00:23<00:00, 7.81s/it]\n",
+ "\u001b[37m\u001b[1m[2023-02-24 07:59:12,402][784615] The model has been pushed to https://huggingface.co/chqmatteo/rl_course_vizdoom_health_gathering_supreme\u001b[0m\n"
+ ]
+ }
+ ],
"source": [
"from sample_factory.enjoy import enjoy\n",
"\n",
- "hf_username = \"ThomasSimonini\" # insert your HuggingFace username here\n",
+ "hf_username = \"chqmatteo\" # insert your HuggingFace username here\n",
"\n",
"cfg = parse_vizdoom_cfg(argv=[f\"--env={env}\", \"--num_workers=1\", \"--save_video\", \"--no_render\", \"--max_num_episodes=10\", \"--max_num_frames=100000\", \"--push_to_hub\", f\"--hf_repository={hf_username}/rl_course_vizdoom_health_gathering_supreme\"], evaluation=True)\n",
"status = enjoy(cfg)"
@@ -493,14 +969,14 @@
},
{
"cell_type": "markdown",
+ "metadata": {
+ "id": "9PzeXx-qxVvw"
+ },
"source": [
"## Let's load another model\n",
"\n",
"\n"
- ],
- "metadata": {
- "id": "9PzeXx-qxVvw"
- }
+ ]
},
{
"cell_type": "markdown",
@@ -566,16 +1042,16 @@
},
{
"cell_type": "markdown",
+ "metadata": {
+ "id": "ie5YWC3NyKO8"
+ },
"source": [
"## Some additional challenges 🏆: Doom Deathmatch\n",
"\n",
"Training an agent to play a Doom deathmatch **takes many hours on a more beefy machine than is available in Colab**. \n",
"\n",
"Fortunately, we have have **already trained an agent in this scenario and it is available in the 🤗 Hub!** Let’s download the model and visualize the agent’s performance."
- ],
- "metadata": {
- "id": "ie5YWC3NyKO8"
- }
+ ]
},
{
"cell_type": "code",
@@ -591,12 +1067,12 @@
},
{
"cell_type": "markdown",
- "source": [
- "Given the agent plays for a long time the video generation can take **10 minutes**."
- ],
"metadata": {
"id": "7AX_LwxR2FQ0"
- }
+ },
+ "source": [
+ "Given the agent plays for a long time the video generation can take **10 minutes**."
+ ]
},
{
"cell_type": "code",
@@ -623,17 +1099,20 @@
},
{
"cell_type": "markdown",
+ "metadata": {
+ "id": "N6mEC-4zyihx"
+ },
"source": [
"\n",
"You **can try to train your agent in this environment** using the code above, but not on colab.\n",
"**Good luck 🤞**"
- ],
- "metadata": {
- "id": "N6mEC-4zyihx"
- }
+ ]
},
{
"cell_type": "markdown",
+ "metadata": {
+ "id": "YnDAngN6zeeI"
+ },
"source": [
"If you prefer an easier scenario, **why not try training in another ViZDoom scenario such as `doom_deadly_corridor` or `doom_defend_the_center`.**\n",
"\n",
@@ -645,34 +1124,46 @@
"This concludes the last unit. But we are not finished yet! 🤗 The following **bonus section include some of the most interesting, advanced and cutting edge work in Deep Reinforcement Learning**.\n",
"\n",
"## Keep learning, stay awesome 🤗"
- ],
- "metadata": {
- "id": "YnDAngN6zeeI"
- }
+ ]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
- "provenance": [],
"collapsed_sections": [
"PU4FVzaoM6fC",
"nB68Eb9UgC94",
"ez5UhUtYcWXF",
"sgRy6wnrgnij"
],
+ "include_colab_link": true,
"private_outputs": true,
- "include_colab_link": true
+ "provenance": []
},
"gpuClass": "standard",
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "deep-rl-class",
+ "language": "python",
"name": "python3"
},
"language_info": {
- "name": "python"
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.9"
+ },
+ "vscode": {
+ "interpreter": {
+ "hash": "da4ecdf31b09708386948f91c5b725d7113689587e88c28098219103c44ec57b"
+ }
}
},
"nbformat": 4,
"nbformat_minor": 0
-}
\ No newline at end of file
+}