chqmatteo commited on
Commit
c81c30d
1 Parent(s): c45c0f9

Upload . with huggingface_hub

Browse files
.summary/0/events.out.tfevents.1677225751.pop-os ADDED
File without changes
.summary/0/events.out.tfevents.1677225789.pop-os ADDED
File without changes
.summary/0/events.out.tfevents.1677225844.pop-os ADDED
File without changes
.summary/0/events.out.tfevents.1677225926.pop-os ADDED
File without changes
.summary/0/events.out.tfevents.1677225979.pop-os ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da32336adbc8b063c28050a14266b1d5270cfaf6e167b384f8a25317ea9d801e
3
+ size 36926
README.md CHANGED
@@ -15,7 +15,7 @@ model-index:
15
  type: doom_health_gathering_supreme
16
  metrics:
17
  - type: mean_reward
18
- value: 5.12 +/- 1.18
19
  name: mean_reward
20
  verified: false
21
  ---
 
15
  type: doom_health_gathering_supreme
16
  metrics:
17
  - type: mean_reward
18
+ value: 8.70 +/- 5.10
19
  name: mean_reward
20
  verified: false
21
  ---
checkpoint_p0/best_000000389_1593344_reward_13.989.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45dc1f8b4841de7b74b7b7e54da8194d1454e88edf7c65acff4b373abfbb15ca
3
+ size 34928806
checkpoint_p0/checkpoint_000000403_1650688.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f81321e38ba47c35d8fb2fcc7d27b5979c7e799aaa693a854fbd7a5a77202564
3
+ size 34929220
config.json CHANGED
@@ -65,7 +65,7 @@
65
  "summaries_use_frameskip": true,
66
  "heartbeat_interval": 20,
67
  "heartbeat_reporting_interval": 600,
68
- "train_for_env_steps": 4000000,
69
  "train_for_seconds": 10000000000,
70
  "save_every_sec": 120,
71
  "keep_checkpoints": 2,
 
65
  "summaries_use_frameskip": true,
66
  "heartbeat_interval": 20,
67
  "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 40000000,
69
  "train_for_seconds": 10000000000,
70
  "save_every_sec": 120,
71
  "keep_checkpoints": 2,
git.diff CHANGED
@@ -0,0 +1,759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diff --git a/notebooks/unit8/unit8_part2.ipynb b/notebooks/unit8/unit8_part2.ipynb
2
+ index b36924a..d7fd26d 100644
3
+ --- a/notebooks/unit8/unit8_part2.ipynb
4
+ +++ b/notebooks/unit8/unit8_part2.ipynb
5
+ @@ -3,8 +3,8 @@
6
+ {
7
+ "cell_type": "markdown",
8
+ "metadata": {
9
+ - "id": "view-in-github",
10
+ - "colab_type": "text"
11
+ + "colab_type": "text",
12
+ + "id": "view-in-github"
13
+ },
14
+ "source": [
15
+ "<a href=\"https://colab.research.google.com/github/huggingface/deep-rl-class/blob/EdBeeching%2FPPOPart2/notebooks/unit8/unit8_part2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
16
+ @@ -202,11 +202,26 @@
17
+ },
18
+ {
19
+ "cell_type": "code",
20
+ - "execution_count": null,
21
+ + "execution_count": 1,
22
+ "metadata": {
23
+ "id": "RJMxkaldwIVx"
24
+ },
25
+ - "outputs": [],
26
+ + "outputs": [
27
+ + {
28
+ + "ename": "CalledProcessError",
29
+ + "evalue": "Command 'b'# Install ViZDoom deps from \\n# https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux\\n\\napt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \\\\\\nnasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \\\\\\nlibopenal-dev timidity libwildmidi-dev unzip ffmpeg\\n\\n# Boost libraries\\napt-get install libboost-all-dev\\n\\n# Lua binding dependencies\\napt-get install liblua5.1-dev\\n'' returned non-zero exit status 100.",
30
+ + "output_type": "error",
31
+ + "traceback": [
32
+ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
33
+ + "\u001b[0;31mCalledProcessError\u001b[0m Traceback (most recent call last)",
34
+ + "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m get_ipython()\u001b[39m.\u001b[39;49mrun_cell_magic(\u001b[39m'\u001b[39;49m\u001b[39mbash\u001b[39;49m\u001b[39m'\u001b[39;49m, \u001b[39m'\u001b[39;49m\u001b[39m'\u001b[39;49m, \u001b[39m'\u001b[39;49m\u001b[39m# Install ViZDoom deps from \u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m# https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39mapt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \u001b[39;49m\u001b[39m\\\\\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39mnasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \u001b[39;49m\u001b[39m\\\\\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39mlibopenal-dev timidity libwildmidi-dev unzip ffmpeg\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39m# Boost libraries\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39mapt-get install libboost-all-dev\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39m# Lua binding dependencies\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39mapt-get install liblua5.1-dev\u001b[39;49m\u001b[39m\\n\u001b[39;49;00m\u001b[39m'\u001b[39;49m)\n",
35
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/IPython/core/interactiveshell.py:2430\u001b[0m, in \u001b[0;36mInteractiveShell.run_cell_magic\u001b[0;34m(self, magic_name, line, cell)\u001b[0m\n\u001b[1;32m 2428\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mbuiltin_trap:\n\u001b[1;32m 2429\u001b[0m args \u001b[39m=\u001b[39m (magic_arg_s, cell)\n\u001b[0;32m-> 2430\u001b[0m result \u001b[39m=\u001b[39m fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 2432\u001b[0m \u001b[39m# The code below prevents the output from being displayed\u001b[39;00m\n\u001b[1;32m 2433\u001b[0m \u001b[39m# when using magics with decodator @output_can_be_silenced\u001b[39;00m\n\u001b[1;32m 2434\u001b[0m \u001b[39m# when the last Python token in the expression is a ';'.\u001b[39;00m\n\u001b[1;32m 2435\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mgetattr\u001b[39m(fn, magic\u001b[39m.\u001b[39mMAGIC_OUTPUT_CAN_BE_SILENCED, \u001b[39mFalse\u001b[39;00m):\n",
36
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/IPython/core/magics/script.py:153\u001b[0m, in \u001b[0;36mScriptMagics._make_script_magic.<locals>.named_script_magic\u001b[0;34m(line, cell)\u001b[0m\n\u001b[1;32m 151\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m 152\u001b[0m line \u001b[39m=\u001b[39m script\n\u001b[0;32m--> 153\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mshebang(line, cell)\n",
37
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/IPython/core/magics/script.py:305\u001b[0m, in \u001b[0;36mScriptMagics.shebang\u001b[0;34m(self, line, cell)\u001b[0m\n\u001b[1;32m 300\u001b[0m \u001b[39mif\u001b[39;00m args\u001b[39m.\u001b[39mraise_error \u001b[39mand\u001b[39;00m p\u001b[39m.\u001b[39mreturncode \u001b[39m!=\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[1;32m 301\u001b[0m \u001b[39m# If we get here and p.returncode is still None, we must have\u001b[39;00m\n\u001b[1;32m 302\u001b[0m \u001b[39m# killed it but not yet seen its return code. We don't wait for it,\u001b[39;00m\n\u001b[1;32m 303\u001b[0m \u001b[39m# in case it's stuck in uninterruptible sleep. -9 = SIGKILL\u001b[39;00m\n\u001b[1;32m 304\u001b[0m rc \u001b[39m=\u001b[39m p\u001b[39m.\u001b[39mreturncode \u001b[39mor\u001b[39;00m \u001b[39m-\u001b[39m\u001b[39m9\u001b[39m\n\u001b[0;32m--> 305\u001b[0m \u001b[39mraise\u001b[39;00m CalledProcessError(rc, cell)\n",
38
+ + "\u001b[0;31mCalledProcessError\u001b[0m: Command 'b'# Install ViZDoom deps from \\n# https://github.com/mwydmuch/ViZDoom/blob/master/doc/Building.md#-linux\\n\\napt-get install build-essential zlib1g-dev libsdl2-dev libjpeg-dev \\\\\\nnasm tar libbz2-dev libgtk2.0-dev cmake git libfluidsynth-dev libgme-dev \\\\\\nlibopenal-dev timidity libwildmidi-dev unzip ffmpeg\\n\\n# Boost libraries\\napt-get install libboost-all-dev\\n\\n# Lua binding dependencies\\napt-get install liblua5.1-dev\\n'' returned non-zero exit status 100."
39
+ + ]
40
+ + }
41
+ + ],
42
+ "source": [
43
+ "%%capture\n",
44
+ "%%bash\n",
45
+ @@ -236,11 +251,162 @@
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ - "execution_count": null,
50
+ + "execution_count": 2,
51
+ "metadata": {
52
+ "id": "bbqfPZnIsvA6"
53
+ },
54
+ - "outputs": [],
55
+ + "outputs": [
56
+ + {
57
+ + "name": "stdout",
58
+ + "output_type": "stream",
59
+ + "text": [
60
+ + "Collecting sample-factory\n",
61
+ + " Downloading sample_factory-2.0.3-py3-none-any.whl (9.0 MB)\n",
62
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.0/9.0 MB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0mm\n",
63
+ + "\u001b[?25hCollecting huggingface-hub<1.0,>=0.10.0\n",
64
+ + " Downloading huggingface_hub-0.12.1-py3-none-any.whl (190 kB)\n",
65
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m190.3/190.3 kB\u001b[0m \u001b[31m19.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
66
+ + "\u001b[?25hCollecting gym<1.0,>=0.26.1\n",
67
+ + " Downloading gym-0.26.2.tar.gz (721 kB)\n",
68
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m721.7/721.7 kB\u001b[0m \u001b[31m26.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
69
+ + "\u001b[?25h Installing build dependencies ... \u001b[?25ldone\n",
70
+ + "\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n",
71
+ + "\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
72
+ + "\u001b[?25hCollecting colorlog\n",
73
+ + " Downloading colorlog-6.7.0-py2.py3-none-any.whl (11 kB)\n",
74
+ + "Collecting filelock\n",
75
+ + " Using cached filelock-3.9.0-py3-none-any.whl (9.7 kB)\n",
76
+ + "Requirement already satisfied: psutil>=5.7.0 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from sample-factory) (5.9.0)\n",
77
+ + "Collecting faster-fifo<2.0,>=1.4.2\n",
78
+ + " Downloading faster-fifo-1.4.2.tar.gz (84 kB)\n",
79
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m84.6/84.6 kB\u001b[0m \u001b[31m7.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
80
+ + "\u001b[?25h Installing build dependencies ... \u001b[?25ldone\n",
81
+ + "\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n",
82
+ + "\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
83
+ + "\u001b[?25hCollecting pyglet\n",
84
+ + " Downloading pyglet-2.0.4-py3-none-any.whl (831 kB)\n",
85
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m831.0/831.0 kB\u001b[0m \u001b[31m39.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
86
+ + "\u001b[?25hRequirement already satisfied: torch!=1.13.0,<2.0,>=1.9 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from sample-factory) (1.13.1)\n",
87
+ + "Collecting threadpoolctl>=2.0.0\n",
88
+ + " Using cached threadpoolctl-3.1.0-py3-none-any.whl (14 kB)\n",
89
+ + "Collecting signal-slot-mp<2.0,>=1.0.3\n",
90
+ + " Downloading signal_slot_mp-1.0.3-py3-none-any.whl (11 kB)\n",
91
+ + "Collecting opencv-python!=3.4.18.65\n",
92
+ + " Downloading opencv_python-4.7.0.72-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (61.8 MB)\n",
93
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m61.8/61.8 MB\u001b[0m \u001b[31m24.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
94
+ + "\u001b[?25hCollecting wandb>=0.12.9\n",
95
+ + " Downloading wandb-0.13.10-py3-none-any.whl (2.0 MB)\n",
96
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m29.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n",
97
+ + "\u001b[?25hRequirement already satisfied: numpy<2.0,>=1.18.1 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from sample-factory) (1.23.5)\n",
98
+ + "Collecting tensorboard>=1.15.0\n",
99
+ + " Using cached tensorboard-2.12.0-py3-none-any.whl (5.6 MB)\n",
100
+ + "Collecting tensorboardx>=2.0\n",
101
+ + " Downloading tensorboardX-2.6-py2.py3-none-any.whl (114 kB)\n",
102
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m114.5/114.5 kB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
103
+ + "\u001b[?25hRequirement already satisfied: setuptools>=45.2.0 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from faster-fifo<2.0,>=1.4.2->sample-factory) (65.6.3)\n",
104
+ + "Collecting cython>=0.29\n",
105
+ + " Using cached Cython-0.29.33-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (1.9 MB)\n",
106
+ + "Collecting gym-notices>=0.0.4\n",
107
+ + " Downloading gym_notices-0.0.8-py3-none-any.whl (3.0 kB)\n",
108
+ + "Collecting cloudpickle>=1.2.0\n",
109
+ + " Downloading cloudpickle-2.2.1-py3-none-any.whl (25 kB)\n",
110
+ + "Collecting tqdm>=4.42.1\n",
111
+ + " Using cached tqdm-4.64.1-py2.py3-none-any.whl (78 kB)\n",
112
+ + "Collecting pyyaml>=5.1\n",
113
+ + " Using cached PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (682 kB)\n",
114
+ + "Requirement already satisfied: packaging>=20.9 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.10.0->sample-factory) (22.0)\n",
115
+ + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.10.0->sample-factory) (4.4.0)\n",
116
+ + "Requirement already satisfied: requests in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.10.0->sample-factory) (2.28.1)\n",
117
+ + "Collecting absl-py>=0.4\n",
118
+ + " Using cached absl_py-1.4.0-py3-none-any.whl (126 kB)\n",
119
+ + "Collecting tensorboard-plugin-wit>=1.6.0\n",
120
+ + " Using cached tensorboard_plugin_wit-1.8.1-py3-none-any.whl (781 kB)\n",
121
+ + "Collecting markdown>=2.6.8\n",
122
+ + " Using cached Markdown-3.4.1-py3-none-any.whl (93 kB)\n",
123
+ + "Requirement already satisfied: wheel>=0.26 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from tensorboard>=1.15.0->sample-factory) (0.38.4)\n",
124
+ + "Collecting google-auth-oauthlib<0.5,>=0.4.1\n",
125
+ + " Using cached google_auth_oauthlib-0.4.6-py2.py3-none-any.whl (18 kB)\n",
126
+ + "Collecting google-auth<3,>=1.6.3\n",
127
+ + " Downloading google_auth-2.16.1-py2.py3-none-any.whl (177 kB)\n",
128
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m177.2/177.2 kB\u001b[0m \u001b[31m19.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
129
+ + "\u001b[?25hCollecting tensorboard-data-server<0.8.0,>=0.7.0\n",
130
+ + " Using cached tensorboard_data_server-0.7.0-py3-none-manylinux2014_x86_64.whl (6.6 MB)\n",
131
+ + "Collecting grpcio>=1.48.2\n",
132
+ + " Downloading grpcio-1.51.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.8 MB)\n",
133
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.8/4.8 MB\u001b[0m \u001b[31m28.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n",
134
+ + "\u001b[?25hCollecting werkzeug>=1.0.1\n",
135
+ + " Downloading Werkzeug-2.2.3-py3-none-any.whl (233 kB)\n",
136
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m233.6/233.6 kB\u001b[0m \u001b[31m20.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
137
+ + "\u001b[?25hCollecting protobuf>=3.19.6\n",
138
+ + " Downloading protobuf-4.22.0-cp37-abi3-manylinux2014_x86_64.whl (302 kB)\n",
139
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m302.4/302.4 kB\u001b[0m \u001b[31m23.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
140
+ + "\u001b[?25h Using cached protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (1.1 MB)\n",
141
+ + "Collecting GitPython>=1.0.0\n",
142
+ + " Downloading GitPython-3.1.31-py3-none-any.whl (184 kB)\n",
143
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m184.3/184.3 kB\u001b[0m \u001b[31m18.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
144
+ + "\u001b[?25hCollecting docker-pycreds>=0.4.0\n",
145
+ + " Using cached docker_pycreds-0.4.0-py2.py3-none-any.whl (9.0 kB)\n",
146
+ + "Collecting pathtools\n",
147
+ + " Using cached pathtools-0.1.2-py3-none-any.whl\n",
148
+ + "Collecting sentry-sdk>=1.0.0\n",
149
+ + " Downloading sentry_sdk-1.15.0-py2.py3-none-any.whl (181 kB)\n",
150
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m181.3/181.3 kB\u001b[0m \u001b[31m20.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
151
+ + "\u001b[?25hCollecting setproctitle\n",
152
+ + " Using cached setproctitle-1.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (30 kB)\n",
153
+ + "Collecting appdirs>=1.4.3\n",
154
+ + " Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB)\n",
155
+ + "Collecting Click!=8.0.0,>=7.0\n",
156
+ + " Using cached click-8.1.3-py3-none-any.whl (96 kB)\n",
157
+ + "Requirement already satisfied: six>=1.4.0 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from docker-pycreds>=0.4.0->wandb>=0.12.9->sample-factory) (1.16.0)\n",
158
+ + "Collecting gitdb<5,>=4.0.1\n",
159
+ + " Using cached gitdb-4.0.10-py3-none-any.whl (62 kB)\n",
160
+ + "Collecting pyasn1-modules>=0.2.1\n",
161
+ + " Using cached pyasn1_modules-0.2.8-py2.py3-none-any.whl (155 kB)\n",
162
+ + "Collecting rsa<5,>=3.1.4\n",
163
+ + " Using cached rsa-4.9-py3-none-any.whl (34 kB)\n",
164
+ + "Collecting cachetools<6.0,>=2.0.0\n",
165
+ + " Using cached cachetools-5.3.0-py3-none-any.whl (9.3 kB)\n",
166
+ + "Collecting requests-oauthlib>=0.7.0\n",
167
+ + " Using cached requests_oauthlib-1.3.1-py2.py3-none-any.whl (23 kB)\n",
168
+ + "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (1.26.14)\n",
169
+ + "Requirement already satisfied: certifi>=2017.4.17 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (2022.12.7)\n",
170
+ + "Requirement already satisfied: charset-normalizer<3,>=2 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (2.0.4)\n",
171
+ + "Requirement already satisfied: idna<4,>=2.5 in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from requests->huggingface-hub<1.0,>=0.10.0->sample-factory) (3.4)\n",
172
+ + "Collecting MarkupSafe>=2.1.1\n",
173
+ + " Using cached MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (25 kB)\n",
174
+ + "Collecting smmap<6,>=3.0.1\n",
175
+ + " Using cached smmap-5.0.0-py3-none-any.whl (24 kB)\n",
176
+ + "Collecting pyasn1<0.5.0,>=0.4.6\n",
177
+ + " Using cached pyasn1-0.4.8-py2.py3-none-any.whl (77 kB)\n",
178
+ + "Collecting oauthlib>=3.0.0\n",
179
+ + " Using cached oauthlib-3.2.2-py3-none-any.whl (151 kB)\n",
180
+ + "Building wheels for collected packages: faster-fifo, gym\n",
181
+ + " Building wheel for faster-fifo (pyproject.toml) ... \u001b[?25ldone\n",
182
+ + "\u001b[?25h Created wheel for faster-fifo: filename=faster_fifo-1.4.2-cp310-cp310-linux_x86_64.whl size=78864 sha256=db802db0bb9e4e639957e0cc929a5055871d73dac2a78aab10dd613331d4e086\n",
183
+ + " Stored in directory: /home/chqma/.cache/pip/wheels/46/57/35/44590621055121fe1a2f1ae60846e531621498f6d6e48c8975\n",
184
+ + " Building wheel for gym (pyproject.toml) ... \u001b[?25ldone\n",
185
+ + "\u001b[?25h Created wheel for gym: filename=gym-0.26.2-py3-none-any.whl size=827634 sha256=2ecaeda2a512edc3ad62eb999df1ba486447df53cdcd38d3cd5acc3b0b013d44\n",
186
+ + " Stored in directory: /home/chqma/.cache/pip/wheels/ae/5f/67/64914473eb34e9ba89dbc7eefe7e9be8f6673fbc6f0273b29f\n",
187
+ + "Successfully built faster-fifo gym\n",
188
+ + "Installing collected packages: tensorboard-plugin-wit, pyglet, pyasn1, pathtools, gym-notices, appdirs, tqdm, threadpoolctl, tensorboard-data-server, smmap, setproctitle, sentry-sdk, rsa, pyyaml, pyasn1-modules, protobuf, opencv-python, oauthlib, MarkupSafe, markdown, grpcio, filelock, docker-pycreds, cython, colorlog, cloudpickle, Click, cachetools, absl-py, werkzeug, tensorboardx, requests-oauthlib, huggingface-hub, gym, google-auth, gitdb, faster-fifo, signal-slot-mp, google-auth-oauthlib, GitPython, wandb, tensorboard, sample-factory\n",
189
+ + "Successfully installed Click-8.1.3 GitPython-3.1.31 MarkupSafe-2.1.2 absl-py-1.4.0 appdirs-1.4.4 cachetools-5.3.0 cloudpickle-2.2.1 colorlog-6.7.0 cython-0.29.33 docker-pycreds-0.4.0 faster-fifo-1.4.2 filelock-3.9.0 gitdb-4.0.10 google-auth-2.16.1 google-auth-oauthlib-0.4.6 grpcio-1.51.3 gym-0.26.2 gym-notices-0.0.8 huggingface-hub-0.12.1 markdown-3.4.1 oauthlib-3.2.2 opencv-python-4.7.0.72 pathtools-0.1.2 protobuf-3.20.3 pyasn1-0.4.8 pyasn1-modules-0.2.8 pyglet-2.0.4 pyyaml-6.0 requests-oauthlib-1.3.1 rsa-4.9 sample-factory-2.0.3 sentry-sdk-1.15.0 setproctitle-1.3.2 signal-slot-mp-1.0.3 smmap-5.0.0 tensorboard-2.12.0 tensorboard-data-server-0.7.0 tensorboard-plugin-wit-1.8.1 tensorboardx-2.6 threadpoolctl-3.1.0 tqdm-4.64.1 wandb-0.13.10 werkzeug-2.2.3\n",
190
+ + "Collecting vizdoom\n",
191
+ + " Downloading vizdoom-1.1.14.tar.gz (15.7 MB)\n",
192
+ + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
193
+ + "\u001b[?25h Installing build dependencies ... \u001b[?25ldone\n",
194
+ + "\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n",
195
+ + "\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
196
+ + "\u001b[?25hRequirement already satisfied: numpy in /home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages (from vizdoom) (1.23.5)\n",
197
+ + "Building wheels for collected packages: vizdoom\n",
198
+ + " Building wheel for vizdoom (pyproject.toml) ... \u001b[?25ldone\n",
199
+ + "\u001b[?25h Created wheel for vizdoom: filename=vizdoom-1.1.14-cp310-cp310-linux_x86_64.whl size=14192416 sha256=732a3631973e8da574807abc6da03d63e48238fdf6024c6f34c11e5c4dcf2056\n",
200
+ + " Stored in directory: /home/chqma/.cache/pip/wheels/a4/13/80/6927dae582137aef0836f48491051c797a5de184891b8ca6c5\n",
201
+ + "Successfully built vizdoom\n",
202
+ + "Installing collected packages: vizdoom\n",
203
+ + "Successfully installed vizdoom-1.1.14\n"
204
+ + ]
205
+ + }
206
+ + ],
207
+ "source": [
208
+ "# install python libraries\n",
209
+ "!pip install sample-factory\n",
210
+ @@ -258,24 +424,24 @@
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ - "execution_count": null,
215
+ + "execution_count": 6,
216
+ "metadata": {
217
+ "id": "bCgZbeiavcDU"
218
+ },
219
+ "outputs": [],
220
+ "source": [
221
+ "import functools\n",
222
+ - "\n",
223
+ + "from encoder import make_vizdoom_encoder\n",
224
+ "from sample_factory.algo.utils.context import global_model_factory\n",
225
+ "from sample_factory.cfg.arguments import parse_full_cfg, parse_sf_args\n",
226
+ "from sample_factory.envs.env_utils import register_env\n",
227
+ "from sample_factory.train import run_rl\n",
228
+ "\n",
229
+ - "from sf_examples.vizdoom.doom.doom_model import make_vizdoom_encoder\n",
230
+ "from sf_examples.vizdoom.doom.doom_params import add_doom_env_args, doom_override_defaults\n",
231
+ "from sf_examples.vizdoom.doom.doom_utils import DOOM_ENVS, make_doom_env_from_spec\n",
232
+ "\n",
233
+ "\n",
234
+ + "\n",
235
+ "# Registers all the ViZDoom environments\n",
236
+ "def register_vizdoom_envs():\n",
237
+ " for env_spec in DOOM_ENVS:\n",
238
+ @@ -346,11 +512,88 @@
239
+ },
240
+ {
241
+ "cell_type": "code",
242
+ - "execution_count": null,
243
+ + "execution_count": 1,
244
+ + "metadata": {},
245
+ + "outputs": [
246
+ + {
247
+ + "name": "stdout",
248
+ + "output_type": "stream",
249
+ + "text": [
250
+ + "env: CUDA_VISIBLE_DEVICES=1\n"
251
+ + ]
252
+ + }
253
+ + ],
254
+ + "source": [
255
+ + "%env CUDA_VISIBLE_DEVICES=1"
256
+ + ]
257
+ + },
258
+ + {
259
+ + "cell_type": "code",
260
+ + "execution_count": 5,
261
+ "metadata": {
262
+ "id": "y_TeicMvyKHP"
263
+ },
264
+ - "outputs": [],
265
+ + "outputs": [
266
+ + {
267
+ + "name": "stderr",
268
+ + "output_type": "stream",
269
+ + "text": [
270
+ + "\u001b[33m[2023-02-24 08:05:26,614][795538] Environment doom_basic already registered, overwriting...\u001b[0m\n",
271
+ + "\u001b[33m[2023-02-24 08:05:26,616][795538] Environment doom_two_colors_easy already registered, overwriting...\u001b[0m\n",
272
+ + "\u001b[33m[2023-02-24 08:05:26,617][795538] Environment doom_two_colors_hard already registered, overwriting...\u001b[0m\n",
273
+ + "\u001b[33m[2023-02-24 08:05:26,619][795538] Environment doom_dm already registered, overwriting...\u001b[0m\n",
274
+ + "\u001b[33m[2023-02-24 08:05:26,619][795538] Environment doom_dwango5 already registered, overwriting...\u001b[0m\n",
275
+ + "\u001b[33m[2023-02-24 08:05:26,620][795538] Environment doom_my_way_home_flat_actions already registered, overwriting...\u001b[0m\n",
276
+ + "\u001b[33m[2023-02-24 08:05:26,621][795538] Environment doom_defend_the_center_flat_actions already registered, overwriting...\u001b[0m\n",
277
+ + "\u001b[33m[2023-02-24 08:05:26,621][795538] Environment doom_my_way_home already registered, overwriting...\u001b[0m\n",
278
+ + "\u001b[33m[2023-02-24 08:05:26,623][795538] Environment doom_deadly_corridor already registered, overwriting...\u001b[0m\n",
279
+ + "\u001b[33m[2023-02-24 08:05:26,623][795538] Environment doom_defend_the_center already registered, overwriting...\u001b[0m\n",
280
+ + "\u001b[33m[2023-02-24 08:05:26,624][795538] Environment doom_defend_the_line already registered, overwriting...\u001b[0m\n",
281
+ + "\u001b[33m[2023-02-24 08:05:26,624][795538] Environment doom_health_gathering already registered, overwriting...\u001b[0m\n",
282
+ + "\u001b[33m[2023-02-24 08:05:26,625][795538] Environment doom_health_gathering_supreme already registered, overwriting...\u001b[0m\n",
283
+ + "\u001b[33m[2023-02-24 08:05:26,625][795538] Environment doom_battle already registered, overwriting...\u001b[0m\n",
284
+ + "\u001b[33m[2023-02-24 08:05:26,626][795538] Environment doom_battle2 already registered, overwriting...\u001b[0m\n",
285
+ + "\u001b[33m[2023-02-24 08:05:26,626][795538] Environment doom_duel_bots already registered, overwriting...\u001b[0m\n",
286
+ + "\u001b[33m[2023-02-24 08:05:26,627][795538] Environment doom_deathmatch_bots already registered, overwriting...\u001b[0m\n",
287
+ + "\u001b[33m[2023-02-24 08:05:26,628][795538] Environment doom_duel already registered, overwriting...\u001b[0m\n",
288
+ + "\u001b[33m[2023-02-24 08:05:26,628][795538] Environment doom_deathmatch_full already registered, overwriting...\u001b[0m\n",
289
+ + "\u001b[33m[2023-02-24 08:05:26,629][795538] Environment doom_benchmark already registered, overwriting...\u001b[0m\n",
290
+ + "\u001b[36m[2023-02-24 08:05:26,629][795538] register_encoder_factory: <function make_vizdoom_encoder at 0x7efc41caae60>\u001b[0m\n",
291
+ + "\u001b[33m[2023-02-24 08:05:26,640][795538] Loading existing experiment configuration from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json\u001b[0m\n",
292
+ + "\u001b[36m[2023-02-24 08:05:26,640][795538] Overriding arg 'train_for_env_steps' with value 40000000 passed from command line\u001b[0m\n",
293
+ + "\u001b[36m[2023-02-24 08:05:26,645][795538] Experiment dir /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment already exists!\u001b[0m\n",
294
+ + "\u001b[36m[2023-02-24 08:05:26,646][795538] Resuming existing experiment from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment...\u001b[0m\n",
295
+ + "\u001b[36m[2023-02-24 08:05:26,646][795538] Weights and Biases integration disabled\u001b[0m\n",
296
+ + "\u001b[37m\u001b[1m[2023-02-24 08:05:26,649][795538] Environment var CUDA_VISIBLE_DEVICES is 1\u001b[0m\n",
297
+ + "Traceback (most recent call last):\n",
298
+ + " File \"<string>\", line 1, in <module>\n",
299
+ + " File \"/home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/spawn.py\", line 116, in spawn_main\n",
300
+ + " exitcode = _main(fd, parent_sentinel)\n",
301
+ + " File \"/home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/spawn.py\", line 126, in _main\n",
302
+ + " self = reduction.pickle.load(from_parent)\n",
303
+ + "AttributeError: Can't get attribute 'make_vizdoom_encoder' on <module '__main__' (built-in)>\n"
304
+ + ]
305
+ + },
306
+ + {
307
+ + "ename": "KeyboardInterrupt",
308
+ + "evalue": "",
309
+ + "output_type": "error",
310
+ + "traceback": [
311
+ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
312
+ + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
313
+ + "Cell \u001b[0;32mIn[5], line 9\u001b[0m\n\u001b[1;32m 6\u001b[0m env \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mdoom_health_gathering_supreme\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 7\u001b[0m cfg \u001b[39m=\u001b[39m parse_vizdoom_cfg(argv\u001b[39m=\u001b[39m[\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m--env=\u001b[39m\u001b[39m{\u001b[39;00menv\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m--num_workers=8\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m--num_envs_per_worker=4\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39m--train_for_env_steps=40000000\u001b[39m\u001b[39m\"\u001b[39m])\n\u001b[0;32m----> 9\u001b[0m status \u001b[39m=\u001b[39m run_rl(cfg)\n",
314
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/train.py:37\u001b[0m, in \u001b[0;36mrun_rl\u001b[0;34m(cfg)\u001b[0m\n\u001b[1;32m 32\u001b[0m cfg, runner \u001b[39m=\u001b[39m make_runner(cfg)\n\u001b[1;32m 34\u001b[0m \u001b[39m# here we can register additional message or summary handlers\u001b[39;00m\n\u001b[1;32m 35\u001b[0m \u001b[39m# see sf_examples/dmlab/train_dmlab.py for example\u001b[39;00m\n\u001b[0;32m---> 37\u001b[0m status \u001b[39m=\u001b[39m runner\u001b[39m.\u001b[39;49minit()\n\u001b[1;32m 38\u001b[0m \u001b[39mif\u001b[39;00m status \u001b[39m==\u001b[39m ExperimentStatus\u001b[39m.\u001b[39mSUCCESS:\n\u001b[1;32m 39\u001b[0m status \u001b[39m=\u001b[39m runner\u001b[39m.\u001b[39mrun()\n",
315
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/algo/runners/runner_parallel.py:21\u001b[0m, in \u001b[0;36mParallelRunner.init\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39minit\u001b[39m(\u001b[39mself\u001b[39m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m StatusCode:\n\u001b[0;32m---> 21\u001b[0m status \u001b[39m=\u001b[39m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49minit()\n\u001b[1;32m 22\u001b[0m \u001b[39mif\u001b[39;00m status \u001b[39m!=\u001b[39m ExperimentStatus\u001b[39m.\u001b[39mSUCCESS:\n\u001b[1;32m 23\u001b[0m \u001b[39mreturn\u001b[39;00m status\n",
316
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/algo/runners/runner.py:542\u001b[0m, in \u001b[0;36mRunner.init\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 540\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39minit\u001b[39m(\u001b[39mself\u001b[39m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m StatusCode:\n\u001b[1;32m 541\u001b[0m set_global_cuda_envvars(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcfg)\n\u001b[0;32m--> 542\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39menv_info \u001b[39m=\u001b[39m obtain_env_info_in_a_separate_process(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mcfg)\n\u001b[1;32m 544\u001b[0m \u001b[39mfor\u001b[39;00m policy_id \u001b[39min\u001b[39;00m \u001b[39mrange\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcfg\u001b[39m.\u001b[39mnum_policies):\n\u001b[1;32m 545\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mreward_shaping[policy_id] \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39menv_info\u001b[39m.\u001b[39mreward_shaping_scheme\n",
317
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/sample_factory/algo/utils/env_info.py:127\u001b[0m, in \u001b[0;36mobtain_env_info_in_a_separate_process\u001b[0;34m(cfg)\u001b[0m\n\u001b[1;32m 124\u001b[0m p \u001b[39m=\u001b[39m ctx\u001b[39m.\u001b[39mProcess(target\u001b[39m=\u001b[39mspawn_tmp_env_and_get_info, args\u001b[39m=\u001b[39m(sf_context, q, cfg))\n\u001b[1;32m 125\u001b[0m p\u001b[39m.\u001b[39mstart()\n\u001b[0;32m--> 127\u001b[0m env_info \u001b[39m=\u001b[39m q\u001b[39m.\u001b[39;49mget()\n\u001b[1;32m 128\u001b[0m p\u001b[39m.\u001b[39mjoin()\n\u001b[1;32m 130\u001b[0m \u001b[39mif\u001b[39;00m cfg\u001b[39m.\u001b[39muse_env_info_cache:\n",
318
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/queues.py:103\u001b[0m, in \u001b[0;36mQueue.get\u001b[0;34m(self, block, timeout)\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[39mif\u001b[39;00m block \u001b[39mand\u001b[39;00m timeout \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 102\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_rlock:\n\u001b[0;32m--> 103\u001b[0m res \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_recv_bytes()\n\u001b[1;32m 104\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_sem\u001b[39m.\u001b[39mrelease()\n\u001b[1;32m 105\u001b[0m \u001b[39melse\u001b[39;00m:\n",
319
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/connection.py:216\u001b[0m, in \u001b[0;36m_ConnectionBase.recv_bytes\u001b[0;34m(self, maxlength)\u001b[0m\n\u001b[1;32m 214\u001b[0m \u001b[39mif\u001b[39;00m maxlength \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m maxlength \u001b[39m<\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[1;32m 215\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\u001b[39m\"\u001b[39m\u001b[39mnegative maxlength\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m--> 216\u001b[0m buf \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_recv_bytes(maxlength)\n\u001b[1;32m 217\u001b[0m \u001b[39mif\u001b[39;00m buf \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 218\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_bad_message_length()\n",
320
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/connection.py:414\u001b[0m, in \u001b[0;36mConnection._recv_bytes\u001b[0;34m(self, maxsize)\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_recv_bytes\u001b[39m(\u001b[39mself\u001b[39m, maxsize\u001b[39m=\u001b[39m\u001b[39mNone\u001b[39;00m):\n\u001b[0;32m--> 414\u001b[0m buf \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_recv(\u001b[39m4\u001b[39;49m)\n\u001b[1;32m 415\u001b[0m size, \u001b[39m=\u001b[39m struct\u001b[39m.\u001b[39munpack(\u001b[39m\"\u001b[39m\u001b[39m!i\u001b[39m\u001b[39m\"\u001b[39m, buf\u001b[39m.\u001b[39mgetvalue())\n\u001b[1;32m 416\u001b[0m \u001b[39mif\u001b[39;00m size \u001b[39m==\u001b[39m \u001b[39m-\u001b[39m\u001b[39m1\u001b[39m:\n",
321
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/multiprocessing/connection.py:379\u001b[0m, in \u001b[0;36mConnection._recv\u001b[0;34m(self, size, read)\u001b[0m\n\u001b[1;32m 377\u001b[0m remaining \u001b[39m=\u001b[39m size\n\u001b[1;32m 378\u001b[0m \u001b[39mwhile\u001b[39;00m remaining \u001b[39m>\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[0;32m--> 379\u001b[0m chunk \u001b[39m=\u001b[39m read(handle, remaining)\n\u001b[1;32m 380\u001b[0m n \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39m(chunk)\n\u001b[1;32m 381\u001b[0m \u001b[39mif\u001b[39;00m n \u001b[39m==\u001b[39m \u001b[39m0\u001b[39m:\n",
322
+ + "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
323
+ + ]
324
+ + }
325
+ + ],
326
+ "source": [
327
+ "## Start the training, this should take around 15 minutes\n",
328
+ "register_vizdoom_components()\n",
329
+ @@ -358,7 +601,7 @@
330
+ "# The scenario we train on today is health gathering\n",
331
+ "# other scenarios include \"doom_basic\", \"doom_two_colors_easy\", \"doom_dm\", \"doom_dwango5\", \"doom_my_way_home\", \"doom_deadly_corridor\", \"doom_defend_the_center\", \"doom_defend_the_line\"\n",
332
+ "env = \"doom_health_gathering_supreme\"\n",
333
+ - "cfg = parse_vizdoom_cfg(argv=[f\"--env={env}\", \"--num_workers=8\", \"--num_envs_per_worker=4\", \"--train_for_env_steps=4000000\"])\n",
334
+ + "cfg = parse_vizdoom_cfg(argv=[f\"--env={env}\", \"--num_workers=8\", \"--num_envs_per_worker=4\", \"--train_for_env_steps=40000000\"])\n",
335
+ "\n",
336
+ "status = run_rl(cfg)"
337
+ ]
338
+ @@ -416,12 +659,12 @@
339
+ },
340
+ {
341
+ "cell_type": "markdown",
342
+ - "source": [
343
+ - "The agent has learned something, but its performance could be better. We would clearly need to train for longer. But let's upload this model to the Hub."
344
+ - ],
345
+ "metadata": {
346
+ "id": "2A4pf_1VwPqR"
347
+ - }
348
+ + },
349
+ + "source": [
350
+ + "The agent has learned something, but its performance could be better. We would clearly need to train for longer. But let's upload this model to the Hub."
351
+ + ]
352
+ },
353
+ {
354
+ "cell_type": "markdown",
355
+ @@ -464,11 +707,28 @@
356
+ },
357
+ {
358
+ "cell_type": "code",
359
+ - "execution_count": null,
360
+ + "execution_count": 6,
361
+ "metadata": {
362
+ "id": "GoQm_jYSOts0"
363
+ },
364
+ - "outputs": [],
365
+ + "outputs": [
366
+ + {
367
+ + "ename": "ImportError",
368
+ + "evalue": "The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the `ipywidgets` module: `pip install ipywidgets`.",
369
+ + "output_type": "error",
370
+ + "traceback": [
371
+ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
372
+ + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
373
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/huggingface_hub/_login.py:188\u001b[0m, in \u001b[0;36mnotebook_login\u001b[0;34m()\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 188\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mipywidgets\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mwidgets\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mwidgets\u001b[39;00m \u001b[39m# type: ignore\u001b[39;00m\n\u001b[1;32m 189\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mIPython\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdisplay\u001b[39;00m \u001b[39mimport\u001b[39;00m clear_output, display \u001b[39m# type: ignore\u001b[39;00m\n",
374
+ + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'ipywidgets'",
375
+ + "\nDuring handling of the above exception, another exception occurred:\n",
376
+ + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
377
+ + "Cell \u001b[0;32mIn[6], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mhuggingface_hub\u001b[39;00m \u001b[39mimport\u001b[39;00m notebook_login\n\u001b[0;32m----> 2\u001b[0m notebook_login()\n\u001b[1;32m 3\u001b[0m get_ipython()\u001b[39m.\u001b[39msystem(\u001b[39m'\u001b[39m\u001b[39mgit config --global credential.helper store\u001b[39m\u001b[39m'\u001b[39m)\n",
378
+ + "File \u001b[0;32m~/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/huggingface_hub/_login.py:191\u001b[0m, in \u001b[0;36mnotebook_login\u001b[0;34m()\u001b[0m\n\u001b[1;32m 189\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mIPython\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdisplay\u001b[39;00m \u001b[39mimport\u001b[39;00m clear_output, display \u001b[39m# type: ignore\u001b[39;00m\n\u001b[1;32m 190\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mImportError\u001b[39;00m:\n\u001b[0;32m--> 191\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mImportError\u001b[39;00m(\n\u001b[1;32m 192\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mThe `notebook_login` function can only be used in a notebook (Jupyter or\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 193\u001b[0m \u001b[39m\"\u001b[39m\u001b[39m Colab) and you need the `ipywidgets` module: `pip install ipywidgets`.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 194\u001b[0m )\n\u001b[1;32m 196\u001b[0m box_layout \u001b[39m=\u001b[39m widgets\u001b[39m.\u001b[39mLayout(\n\u001b[1;32m 197\u001b[0m display\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mflex\u001b[39m\u001b[39m\"\u001b[39m, flex_flow\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mcolumn\u001b[39m\u001b[39m\"\u001b[39m, align_items\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mcenter\u001b[39m\u001b[39m\"\u001b[39m, width\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m50\u001b[39m\u001b[39m%\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 198\u001b[0m )\n\u001b[1;32m 200\u001b[0m token_widget \u001b[39m=\u001b[39m widgets\u001b[39m.\u001b[39mPassword(description\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mToken:\u001b[39m\u001b[39m\"\u001b[39m)\n",
379
+ + "\u001b[0;31mImportError\u001b[0m: The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the `ipywidgets` module: `pip install ipywidgets`."
380
+ + ]
381
+ + }
382
+ + ],
383
+ "source": [
384
+ "from huggingface_hub import notebook_login\n",
385
+ "notebook_login()\n",
386
+ @@ -477,15 +737,231 @@
387
+ },
388
+ {
389
+ "cell_type": "code",
390
+ - "execution_count": null,
391
+ + "execution_count": 8,
392
+ "metadata": {
393
+ "id": "sEawW_i0OvJV"
394
+ },
395
+ - "outputs": [],
396
+ + "outputs": [
397
+ + {
398
+ + "name": "stderr",
399
+ + "output_type": "stream",
400
+ + "text": [
401
+ + "\u001b[33m[2023-02-24 07:58:39,896][784615] Loading existing experiment configuration from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json\u001b[0m\n",
402
+ + "\u001b[36m[2023-02-24 07:58:39,896][784615] Overriding arg 'num_workers' with value 1 passed from command line\u001b[0m\n",
403
+ + "\u001b[36m[2023-02-24 07:58:39,897][784615] Adding new argument 'no_render'=True that is not in the saved config file!\u001b[0m\n",
404
+ + "\u001b[36m[2023-02-24 07:58:39,897][784615] Adding new argument 'save_video'=True that is not in the saved config file!\u001b[0m\n",
405
+ + "\u001b[36m[2023-02-24 07:58:39,898][784615] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!\u001b[0m\n",
406
+ + "\u001b[36m[2023-02-24 07:58:39,898][784615] Adding new argument 'video_name'=None that is not in the saved config file!\u001b[0m\n",
407
+ + "\u001b[36m[2023-02-24 07:58:39,899][784615] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!\u001b[0m\n",
408
+ + "\u001b[36m[2023-02-24 07:58:39,899][784615] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!\u001b[0m\n",
409
+ + "\u001b[36m[2023-02-24 07:58:39,900][784615] Adding new argument 'push_to_hub'=True that is not in the saved config file!\u001b[0m\n",
410
+ + "\u001b[36m[2023-02-24 07:58:39,900][784615] Adding new argument 'hf_repository'='chqmatteo/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!\u001b[0m\n",
411
+ + "\u001b[36m[2023-02-24 07:58:39,900][784615] Adding new argument 'policy_index'=0 that is not in the saved config file!\u001b[0m\n",
412
+ + "\u001b[36m[2023-02-24 07:58:39,901][784615] Adding new argument 'eval_deterministic'=False that is not in the saved config file!\u001b[0m\n",
413
+ + "\u001b[36m[2023-02-24 07:58:39,901][784615] Adding new argument 'train_script'=None that is not in the saved config file!\u001b[0m\n",
414
+ + "\u001b[36m[2023-02-24 07:58:39,902][784615] Adding new argument 'enjoy_script'=None that is not in the saved config file!\u001b[0m\n",
415
+ + "\u001b[36m[2023-02-24 07:58:39,902][784615] Using frameskip 1 and render_action_repeat=4 for evaluation\u001b[0m\n",
416
+ + "\u001b[36m[2023-02-24 07:58:39,911][784615] RunningMeanStd input shape: (3, 72, 128)\u001b[0m\n",
417
+ + "\u001b[36m[2023-02-24 07:58:39,912][784615] RunningMeanStd input shape: (1,)\u001b[0m\n",
418
+ + "\u001b[36m[2023-02-24 07:58:39,919][784615] ConvEncoder: input_channels=3\u001b[0m\n",
419
+ + "\u001b[36m[2023-02-24 07:58:39,943][784615] Conv encoder output size: 512\u001b[0m\n",
420
+ + "\u001b[36m[2023-02-24 07:58:39,944][784615] Policy head output size: 512\u001b[0m\n",
421
+ + "\u001b[33m[2023-02-24 07:58:39,980][784615] Loading state from checkpoint /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000268_1097728.pth...\u001b[0m\n",
422
+ + "\u001b[36m[2023-02-24 07:58:40,400][784615] Num frames 100...\u001b[0m\n",
423
+ + "\u001b[36m[2023-02-24 07:58:40,470][784615] Num frames 200...\u001b[0m\n",
424
+ + "\u001b[36m[2023-02-24 07:58:40,530][784615] Num frames 300...\u001b[0m\n",
425
+ + "\u001b[36m[2023-02-24 07:58:40,596][784615] Num frames 400...\u001b[0m\n",
426
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:40,684][784615] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480\u001b[0m\n",
427
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:40,685][784615] Avg episode reward: 5.480, avg true_objective: 4.480\u001b[0m\n",
428
+ + "\u001b[36m[2023-02-24 07:58:40,719][784615] Num frames 500...\u001b[0m\n",
429
+ + "\u001b[36m[2023-02-24 07:58:40,787][784615] Num frames 600...\u001b[0m\n",
430
+ + "\u001b[36m[2023-02-24 07:58:40,850][784615] Num frames 700...\u001b[0m\n",
431
+ + "\u001b[36m[2023-02-24 07:58:40,918][784615] Num frames 800...\u001b[0m\n",
432
+ + "\u001b[36m[2023-02-24 07:58:41,005][784615] Num frames 900...\u001b[0m\n",
433
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:41,078][784615] Avg episode rewards: #0: 6.640, true rewards: #0: 4.640\u001b[0m\n",
434
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:41,079][784615] Avg episode reward: 6.640, avg true_objective: 4.640\u001b[0m\n",
435
+ + "\u001b[36m[2023-02-24 07:58:41,129][784615] Num frames 1000...\u001b[0m\n",
436
+ + "\u001b[36m[2023-02-24 07:58:41,202][784615] Num frames 1100...\u001b[0m\n",
437
+ + "\u001b[36m[2023-02-24 07:58:41,276][784615] Num frames 1200...\u001b[0m\n",
438
+ + "\u001b[36m[2023-02-24 07:58:41,360][784615] Num frames 1300...\u001b[0m\n",
439
+ + "\u001b[36m[2023-02-24 07:58:41,429][784615] Num frames 1400...\u001b[0m\n",
440
+ + "\u001b[36m[2023-02-24 07:58:41,493][784615] Num frames 1500...\u001b[0m\n",
441
+ + "\u001b[36m[2023-02-24 07:58:41,554][784615] Num frames 1600...\u001b[0m\n",
442
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:41,667][784615] Avg episode rewards: #0: 8.653, true rewards: #0: 5.653\u001b[0m\n",
443
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:41,668][784615] Avg episode reward: 8.653, avg true_objective: 5.653\u001b[0m\n",
444
+ + "\u001b[36m[2023-02-24 07:58:41,673][784615] Num frames 1700...\u001b[0m\n",
445
+ + "\u001b[36m[2023-02-24 07:58:41,734][784615] Num frames 1800...\u001b[0m\n",
446
+ + "\u001b[36m[2023-02-24 07:58:41,791][784615] Num frames 1900...\u001b[0m\n",
447
+ + "\u001b[36m[2023-02-24 07:58:41,848][784615] Num frames 2000...\u001b[0m\n",
448
+ + "\u001b[36m[2023-02-24 07:58:41,905][784615] Num frames 2100...\u001b[0m\n",
449
+ + "\u001b[36m[2023-02-24 07:58:41,961][784615] Num frames 2200...\u001b[0m\n",
450
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:42,055][784615] Avg episode rewards: #0: 8.680, true rewards: #0: 5.680\u001b[0m\n",
451
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:42,056][784615] Avg episode reward: 8.680, avg true_objective: 5.680\u001b[0m\n",
452
+ + "\u001b[36m[2023-02-24 07:58:42,077][784615] Num frames 2300...\u001b[0m\n",
453
+ + "\u001b[36m[2023-02-24 07:58:42,141][784615] Num frames 2400...\u001b[0m\n",
454
+ + "\u001b[36m[2023-02-24 07:58:42,203][784615] Num frames 2500...\u001b[0m\n",
455
+ + "\u001b[36m[2023-02-24 07:58:42,260][784615] Num frames 2600...\u001b[0m\n",
456
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:42,346][784615] Avg episode rewards: #0: 7.712, true rewards: #0: 5.312\u001b[0m\n",
457
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:42,348][784615] Avg episode reward: 7.712, avg true_objective: 5.312\u001b[0m\n",
458
+ + "\u001b[36m[2023-02-24 07:58:42,385][784615] Num frames 2700...\u001b[0m\n",
459
+ + "\u001b[36m[2023-02-24 07:58:42,449][784615] Num frames 2800...\u001b[0m\n",
460
+ + "\u001b[36m[2023-02-24 07:58:42,506][784615] Num frames 2900...\u001b[0m\n",
461
+ + "\u001b[36m[2023-02-24 07:58:42,563][784615] Num frames 3000...\u001b[0m\n",
462
+ + "\u001b[36m[2023-02-24 07:58:42,621][784615] Num frames 3100...\u001b[0m\n",
463
+ + "\u001b[36m[2023-02-24 07:58:42,688][784615] Num frames 3200...\u001b[0m\n",
464
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:42,788][784615] Avg episode rewards: #0: 7.940, true rewards: #0: 5.440\u001b[0m\n",
465
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:42,788][784615] Avg episode reward: 7.940, avg true_objective: 5.440\u001b[0m\n",
466
+ + "\u001b[36m[2023-02-24 07:58:42,817][784615] Num frames 3300...\u001b[0m\n",
467
+ + "\u001b[36m[2023-02-24 07:58:42,890][784615] Num frames 3400...\u001b[0m\n",
468
+ + "\u001b[36m[2023-02-24 07:58:42,964][784615] Num frames 3500...\u001b[0m\n",
469
+ + "\u001b[36m[2023-02-24 07:58:43,040][784615] Num frames 3600...\u001b[0m\n",
470
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:43,128][784615] Avg episode rewards: #0: 7.354, true rewards: #0: 5.211\u001b[0m\n",
471
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:43,130][784615] Avg episode reward: 7.354, avg true_objective: 5.211\u001b[0m\n",
472
+ + "\u001b[36m[2023-02-24 07:58:43,174][784615] Num frames 3700...\u001b[0m\n",
473
+ + "\u001b[36m[2023-02-24 07:58:43,248][784615] Num frames 3800...\u001b[0m\n",
474
+ + "\u001b[36m[2023-02-24 07:58:43,316][784615] Num frames 3900...\u001b[0m\n",
475
+ + "\u001b[36m[2023-02-24 07:58:43,383][784615] Num frames 4000...\u001b[0m\n",
476
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:43,475][784615] Avg episode rewards: #0: 7.205, true rewards: #0: 5.080\u001b[0m\n",
477
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:43,476][784615] Avg episode reward: 7.205, avg true_objective: 5.080\u001b[0m\n",
478
+ + "\u001b[36m[2023-02-24 07:58:43,498][784615] Num frames 4100...\u001b[0m\n",
479
+ + "\u001b[36m[2023-02-24 07:58:43,556][784615] Num frames 4200...\u001b[0m\n",
480
+ + "\u001b[36m[2023-02-24 07:58:43,614][784615] Num frames 4300...\u001b[0m\n",
481
+ + "\u001b[36m[2023-02-24 07:58:43,670][784615] Num frames 4400...\u001b[0m\n",
482
+ + "\u001b[36m[2023-02-24 07:58:43,727][784615] Num frames 4500...\u001b[0m\n",
483
+ + "\u001b[36m[2023-02-24 07:58:43,790][784615] Num frames 4600...\u001b[0m\n",
484
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:43,891][784615] Avg episode rewards: #0: 7.413, true rewards: #0: 5.191\u001b[0m\n",
485
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:43,891][784615] Avg episode reward: 7.413, avg true_objective: 5.191\u001b[0m\n",
486
+ + "\u001b[36m[2023-02-24 07:58:43,912][784615] Num frames 4700...\u001b[0m\n",
487
+ + "\u001b[36m[2023-02-24 07:58:43,982][784615] Num frames 4800...\u001b[0m\n",
488
+ + "\u001b[36m[2023-02-24 07:58:44,049][784615] Num frames 4900...\u001b[0m\n",
489
+ + "\u001b[36m[2023-02-24 07:58:44,117][784615] Num frames 5000...\u001b[0m\n",
490
+ + "\u001b[36m[2023-02-24 07:58:44,184][784615] Num frames 5100...\u001b[0m\n",
491
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:44,250][784615] Avg episode rewards: #0: 7.220, true rewards: #0: 5.120\u001b[0m\n",
492
+ + "\u001b[37m\u001b[1m[2023-02-24 07:58:44,250][784615] Avg episode reward: 7.220, avg true_objective: 5.120\u001b[0m\n",
493
+ + "ffmpeg version 4.3 Copyright (c) 2000-2020 the FFmpeg developers\n",
494
+ + " built with gcc 7.3.0 (crosstool-NG 1.23.0.449-a04d0)\n",
495
+ + " configuration: --prefix=/opt/conda/conda-bld/ffmpeg_1597178665428/_h_env_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placeh --cc=/opt/conda/conda-bld/ffmpeg_1597178665428/_build_env/bin/x86_64-conda_cos6-linux-gnu-cc --disable-doc --disable-openssl --enable-avresample --enable-gnutls --enable-hardcoded-tables --enable-libfreetype --enable-libopenh264 --enable-pic --enable-pthreads --enable-shared --disable-static --enable-version3 --enable-zlib --enable-libmp3lame\n",
496
+ + " libavutil 56. 51.100 / 56. 51.100\n",
497
+ + " libavcodec 58. 91.100 / 58. 91.100\n",
498
+ + " libavformat 58. 45.100 / 58. 45.100\n",
499
+ + " libavdevice 58. 10.100 / 58. 10.100\n",
500
+ + " libavfilter 7. 85.100 / 7. 85.100\n",
501
+ + " libavresample 4. 0. 0 / 4. 0. 0\n",
502
+ + " libswscale 5. 7.100 / 5. 7.100\n",
503
+ + " libswresample 3. 7.100 / 3. 7.100\n",
504
+ + "Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '/tmp/sf2_chqma/replay.mp4':\n",
505
+ + " Metadata:\n",
506
+ + " major_brand : isom\n",
507
+ + " minor_version : 512\n",
508
+ + " compatible_brands: isomiso2mp41\n",
509
+ + " encoder : Lavf59.27.100\n",
510
+ + " Duration: 00:02:26.57, start: 0.000000, bitrate: 1521 kb/s\n",
511
+ + " Stream #0:0(und): Video: mpeg4 (Simple Profile) (mp4v / 0x7634706D), yuv420p, 240x180 [SAR 1:1 DAR 4:3], 1519 kb/s, 35 fps, 35 tbr, 17920 tbn, 35 tbc (default)\n",
512
+ + " Metadata:\n",
513
+ + " handler_name : VideoHandler\n",
514
+ + "Unknown encoder 'libx264'\n",
515
+ + "\u001b[36m[2023-02-24 07:58:46,584][784615] Replay video saved to /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!\u001b[0m\n",
516
+ + "/home/chqma/miniconda3/envs/deep-rl-class/lib/python3.10/site-packages/huggingface_hub/_commit_api.py:493: UserWarning: About to commit an empty file: 'git.diff'. Are you sure this is intended?\n",
517
+ + " warnings.warn(\n",
518
+ + "\n",
519
+ + "\u001b[A\n",
520
+ + "\n",
521
+ + "best_000000254_1040384_reward_6.010.pth: 0%| | 0.00/34.9M [00:00<?, ?B/s]\n",
522
+ + "best_000000254_1040384_reward_6.010.pth: 0%| | 123k/34.9M [00:00<01:35, 364kB/s] \n",
523
+ + "best_000000254_1040384_reward_6.010.pth: 1%| | 279k/34.9M [00:00<01:00, 576kB/s]\n",
524
+ + "best_000000254_1040384_reward_6.010.pth: 4%|▍ | 1.42M/34.9M [00:00<00:10, 3.29MB/s]\n",
525
+ + "events.out.tfevents.1677225358.pop-os: 100%|██████████| 72.8k/72.8k [00:01<00:00, 65.3kB/s]s]\n",
526
+ + "\n",
527
+ + "\n",
528
+ + "\u001b[A\u001b[A\n",
529
+ + "best_000000254_1040384_reward_6.010.pth: 11%|█ | 3.80M/34.9M [00:01<00:11, 2.67MB/s]\n",
530
+ + "best_000000254_1040384_reward_6.010.pth: 13%|█▎ | 4.55M/34.9M [00:02<00:11, 2.55MB/s]\n",
531
+ + "best_000000254_1040384_reward_6.010.pth: 15%|█▌ | 5.37M/34.9M [00:02<00:13, 2.27MB/s]\n",
532
+ + "best_000000254_1040384_reward_6.010.pth: 21%|██ | 7.19M/34.9M [00:03<00:11, 2.47MB/s]\n",
533
+ + "best_000000254_1040384_reward_6.010.pth: 23%|██▎ | 7.95M/34.9M [00:03<00:11, 2.41MB/s]\n",
534
+ + "best_000000254_1040384_reward_6.010.pth: 27%|██▋ | 9.45M/34.9M [00:04<00:10, 2.32MB/s]\n",
535
+ + "best_000000254_1040384_reward_6.010.pth: 29%|██▉ | 10.2M/34.9M [00:04<00:10, 2.30MB/s]\n",
536
+ + "best_000000254_1040384_reward_6.010.pth: 34%|███▎ | 11.8M/34.9M [00:05<00:09, 2.33MB/s]\n",
537
+ + "best_000000254_1040384_reward_6.010.pth: 36%|███▌ | 12.6M/34.9M [00:05<00:09, 2.35MB/s]\n",
538
+ + "best_000000254_1040384_reward_6.010.pth: 38%|███▊ | 13.4M/34.9M [00:05<00:09, 2.36MB/s]\n",
539
+ + "best_000000254_1040384_reward_6.010.pth: 41%|████ | 14.2M/34.9M [00:06<00:08, 2.34MB/s]\n",
540
+ + "best_000000254_1040384_reward_6.010.pth: 43%|████▎ | 14.9M/34.9M [00:06<00:08, 2.33MB/s]\n",
541
+ + "best_000000254_1040384_reward_6.010.pth: 47%|████▋ | 16.4M/34.9M [00:07<00:08, 2.28MB/s]\n",
542
+ + "best_000000254_1040384_reward_6.010.pth: 49%|████▉ | 17.2M/34.9M [00:07<00:07, 2.29MB/s]\n",
543
+ + "best_000000254_1040384_reward_6.010.pth: 51%|█████▏ | 17.9M/34.9M [00:07<00:07, 2.27MB/s]\n",
544
+ + "best_000000254_1040384_reward_6.010.pth: 53%|█████▎ | 18.7M/34.9M [00:08<00:07, 2.27MB/s]\n",
545
+ + "best_000000254_1040384_reward_6.010.pth: 58%|█████▊ | 20.2M/34.9M [00:08<00:06, 2.24MB/s]\n",
546
+ + "best_000000254_1040384_reward_6.010.pth: 60%|█████▉ | 20.9M/34.9M [00:09<00:06, 2.22MB/s]\n",
547
+ + "best_000000254_1040384_reward_6.010.pth: 62%|██████▏ | 21.8M/34.9M [00:09<00:06, 2.20MB/s]\n",
548
+ + "\u001b[A\n",
549
+ + "best_000000254_1040384_reward_6.010.pth: 64%|██████▍ | 22.5M/34.9M [00:10<00:05, 2.09MB/s]\n",
550
+ + "best_000000254_1040384_reward_6.010.pth: 67%|██████▋ | 23.3M/34.9M [00:10<00:05, 2.04MB/s]\n",
551
+ + "best_000000254_1040384_reward_6.010.pth: 69%|██████▉ | 24.0M/34.9M [00:10<00:05, 1.98MB/s]\n",
552
+ + "\u001b[A\n",
553
+ + "best_000000254_1040384_reward_6.010.pth: 71%|███████ | 24.8M/34.9M [00:11<00:05, 1.94MB/s]\n",
554
+ + "best_000000254_1040384_reward_6.010.pth: 73%|███████▎ | 25.5M/34.9M [00:11<00:04, 1.90MB/s]\n",
555
+ + "\u001b[A\n",
556
+ + "best_000000254_1040384_reward_6.010.pth: 75%|███████▌ | 26.3M/34.9M [00:12<00:04, 1.84MB/s]\n",
557
+ + "best_000000254_1040384_reward_6.010.pth: 77%|███████▋ | 27.0M/34.9M [00:12<00:04, 1.81MB/s]\n",
558
+ + "\u001b[A\n",
559
+ + "best_000000254_1040384_reward_6.010.pth: 80%|███████▉ | 27.8M/34.9M [00:12<00:03, 1.79MB/s]\n",
560
+ + "\u001b[A\n",
561
+ + "best_000000254_1040384_reward_6.010.pth: 82%|████████▏ | 28.5M/34.9M [00:13<00:03, 1.76MB/s]\n",
562
+ + "best_000000254_1040384_reward_6.010.pth: 84%|████████▍ | 29.3M/34.9M [00:13<00:03, 1.75MB/s]\n",
563
+ + "\u001b[A\n",
564
+ + "best_000000254_1040384_reward_6.010.pth: 86%|████████▌ | 30.0M/34.9M [00:14<00:02, 1.74MB/s]\n",
565
+ + "\u001b[A\n",
566
+ + "best_000000254_1040384_reward_6.010.pth: 88%|████████▊ | 30.8M/34.9M [00:14<00:02, 1.73MB/s]\n",
567
+ + "best_000000254_1040384_reward_6.010.pth: 90%|█████████ | 31.6M/34.9M [00:15<00:01, 1.72MB/s]\n",
568
+ + "\u001b[A\n",
569
+ + "best_000000254_1040384_reward_6.010.pth: 93%|█████████▎| 32.3M/34.9M [00:15<00:01, 1.71MB/s]\n",
570
+ + "\u001b[A\n",
571
+ + "best_000000254_1040384_reward_6.010.pth: 95%|█████████▍| 33.1M/34.9M [00:16<00:01, 1.70MB/s]\n",
572
+ + "best_000000254_1040384_reward_6.010.pth: 97%|█████████▋| 33.8M/34.9M [00:16<00:00, 1.70MB/s]\n",
573
+ + "\u001b[A\n",
574
+ + "best_000000254_1040384_reward_6.010.pth: 99%|█████████▉| 34.6M/34.9M [00:16<00:00, 1.70MB/s]\n",
575
+ + "\u001b[A\n",
576
+ + "\u001b[A\n",
577
+ + "\u001b[A\n",
578
+ + "\u001b[A\n",
579
+ + "\u001b[A\n",
580
+ + "\u001b[A\n",
581
+ + "best_000000254_1040384_reward_6.010.pth: 100%|██████████| 34.9M/34.9M [00:18<00:00, 1.89MB/s]\n",
582
+ + "\n",
583
+ + "\n",
584
+ + "\u001b[A\u001b[A\n",
585
+ + "\u001b[A\n",
586
+ + "\u001b[A\n",
587
+ + "\u001b[A\n",
588
+ + "\u001b[A\n",
589
+ + "\u001b[A\n",
590
+ + "\u001b[A\n",
591
+ + "\u001b[A\n",
592
+ + "\u001b[A\n",
593
+ + "\u001b[A\n",
594
+ + "\u001b[A\n",
595
+ + "\u001b[A\n",
596
+ + "\u001b[A\n",
597
+ + "\u001b[A\n",
598
+ + "\u001b[A\n",
599
+ + "\u001b[A\n",
600
+ + "\u001b[A\n",
601
+ + "\u001b[A\n",
602
+ + "\u001b[A\n",
603
+ + "\u001b[A\n",
604
+ + "\u001b[A\n",
605
+ + "checkpoint_000000268_1097728.pth: 100%|██████████| 34.9M/34.9M [00:23<00:00, 1.49MB/s]\n",
606
+ + "\n",
607
+ + "\n",
608
+ + "Upload 3 LFS files: 100%|██████████| 3/3 [00:23<00:00, 7.81s/it]\n",
609
+ + "\u001b[37m\u001b[1m[2023-02-24 07:59:12,402][784615] The model has been pushed to https://huggingface.co/chqmatteo/rl_course_vizdoom_health_gathering_supreme\u001b[0m\n"
610
+ + ]
611
+ + }
612
+ + ],
613
+ "source": [
614
+ "from sample_factory.enjoy import enjoy\n",
615
+ "\n",
616
+ - "hf_username = \"ThomasSimonini\" # insert your HuggingFace username here\n",
617
+ + "hf_username = \"chqmatteo\" # insert your HuggingFace username here\n",
618
+ "\n",
619
+ "cfg = parse_vizdoom_cfg(argv=[f\"--env={env}\", \"--num_workers=1\", \"--save_video\", \"--no_render\", \"--max_num_episodes=10\", \"--max_num_frames=100000\", \"--push_to_hub\", f\"--hf_repository={hf_username}/rl_course_vizdoom_health_gathering_supreme\"], evaluation=True)\n",
620
+ "status = enjoy(cfg)"
621
+ @@ -493,14 +969,14 @@
622
+ },
623
+ {
624
+ "cell_type": "markdown",
625
+ + "metadata": {
626
+ + "id": "9PzeXx-qxVvw"
627
+ + },
628
+ "source": [
629
+ "## Let's load another model\n",
630
+ "\n",
631
+ "\n"
632
+ - ],
633
+ - "metadata": {
634
+ - "id": "9PzeXx-qxVvw"
635
+ - }
636
+ + ]
637
+ },
638
+ {
639
+ "cell_type": "markdown",
640
+ @@ -566,16 +1042,16 @@
641
+ },
642
+ {
643
+ "cell_type": "markdown",
644
+ + "metadata": {
645
+ + "id": "ie5YWC3NyKO8"
646
+ + },
647
+ "source": [
648
+ "## Some additional challenges 🏆: Doom Deathmatch\n",
649
+ "\n",
650
+ "Training an agent to play a Doom deathmatch **takes many hours on a more beefy machine than is available in Colab**. \n",
651
+ "\n",
652
+ "Fortunately, we have have **already trained an agent in this scenario and it is available in the 🤗 Hub!** Let’s download the model and visualize the agent’s performance."
653
+ - ],
654
+ - "metadata": {
655
+ - "id": "ie5YWC3NyKO8"
656
+ - }
657
+ + ]
658
+ },
659
+ {
660
+ "cell_type": "code",
661
+ @@ -591,12 +1067,12 @@
662
+ },
663
+ {
664
+ "cell_type": "markdown",
665
+ - "source": [
666
+ - "Given the agent plays for a long time the video generation can take **10 minutes**."
667
+ - ],
668
+ "metadata": {
669
+ "id": "7AX_LwxR2FQ0"
670
+ - }
671
+ + },
672
+ + "source": [
673
+ + "Given the agent plays for a long time the video generation can take **10 minutes**."
674
+ + ]
675
+ },
676
+ {
677
+ "cell_type": "code",
678
+ @@ -623,17 +1099,20 @@
679
+ },
680
+ {
681
+ "cell_type": "markdown",
682
+ + "metadata": {
683
+ + "id": "N6mEC-4zyihx"
684
+ + },
685
+ "source": [
686
+ "\n",
687
+ "You **can try to train your agent in this environment** using the code above, but not on colab.\n",
688
+ "**Good luck 🤞**"
689
+ - ],
690
+ - "metadata": {
691
+ - "id": "N6mEC-4zyihx"
692
+ - }
693
+ + ]
694
+ },
695
+ {
696
+ "cell_type": "markdown",
697
+ + "metadata": {
698
+ + "id": "YnDAngN6zeeI"
699
+ + },
700
+ "source": [
701
+ "If you prefer an easier scenario, **why not try training in another ViZDoom scenario such as `doom_deadly_corridor` or `doom_defend_the_center`.**\n",
702
+ "\n",
703
+ @@ -645,34 +1124,46 @@
704
+ "This concludes the last unit. But we are not finished yet! 🤗 The following **bonus section include some of the most interesting, advanced and cutting edge work in Deep Reinforcement Learning**.\n",
705
+ "\n",
706
+ "## Keep learning, stay awesome 🤗"
707
+ - ],
708
+ - "metadata": {
709
+ - "id": "YnDAngN6zeeI"
710
+ - }
711
+ + ]
712
+ }
713
+ ],
714
+ "metadata": {
715
+ "accelerator": "GPU",
716
+ "colab": {
717
+ - "provenance": [],
718
+ "collapsed_sections": [
719
+ "PU4FVzaoM6fC",
720
+ "nB68Eb9UgC94",
721
+ "ez5UhUtYcWXF",
722
+ "sgRy6wnrgnij"
723
+ ],
724
+ + "include_colab_link": true,
725
+ "private_outputs": true,
726
+ - "include_colab_link": true
727
+ + "provenance": []
728
+ },
729
+ "gpuClass": "standard",
730
+ "kernelspec": {
731
+ - "display_name": "Python 3",
732
+ + "display_name": "deep-rl-class",
733
+ + "language": "python",
734
+ "name": "python3"
735
+ },
736
+ "language_info": {
737
+ - "name": "python"
738
+ + "codemirror_mode": {
739
+ + "name": "ipython",
740
+ + "version": 3
741
+ + },
742
+ + "file_extension": ".py",
743
+ + "mimetype": "text/x-python",
744
+ + "name": "python",
745
+ + "nbconvert_exporter": "python",
746
+ + "pygments_lexer": "ipython3",
747
+ + "version": "3.10.9"
748
+ + },
749
+ + "vscode": {
750
+ + "interpreter": {
751
+ + "hash": "da4ecdf31b09708386948f91c5b725d7113689587e88c28098219103c44ec57b"
752
+ + }
753
+ }
754
+ },
755
+ "nbformat": 4,
756
+ "nbformat_minor": 0
757
+ -}
758
+
759
+ +}
sf_log.txt CHANGED
@@ -426,3 +426,408 @@ main_loop: 82.2510
426
  [2023-02-24 07:58:44,250][784615] Avg episode rewards: #0: 7.220, true rewards: #0: 5.120
427
  [2023-02-24 07:58:44,250][784615] Avg episode reward: 7.220, avg true_objective: 5.120
428
  [2023-02-24 07:58:46,584][784615] Replay video saved to /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
  [2023-02-24 07:58:44,250][784615] Avg episode rewards: #0: 7.220, true rewards: #0: 5.120
427
  [2023-02-24 07:58:44,250][784615] Avg episode reward: 7.220, avg true_objective: 5.120
428
  [2023-02-24 07:58:46,584][784615] Replay video saved to /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!
429
+ [2023-02-24 07:59:12,402][784615] The model has been pushed to https://huggingface.co/chqmatteo/rl_course_vizdoom_health_gathering_supreme
430
+ [2023-02-24 08:02:31,136][784615] Environment doom_basic already registered, overwriting...
431
+ [2023-02-24 08:02:31,137][784615] Environment doom_two_colors_easy already registered, overwriting...
432
+ [2023-02-24 08:02:31,137][784615] Environment doom_two_colors_hard already registered, overwriting...
433
+ [2023-02-24 08:02:31,138][784615] Environment doom_dm already registered, overwriting...
434
+ [2023-02-24 08:02:31,138][784615] Environment doom_dwango5 already registered, overwriting...
435
+ [2023-02-24 08:02:31,139][784615] Environment doom_my_way_home_flat_actions already registered, overwriting...
436
+ [2023-02-24 08:02:31,139][784615] Environment doom_defend_the_center_flat_actions already registered, overwriting...
437
+ [2023-02-24 08:02:31,139][784615] Environment doom_my_way_home already registered, overwriting...
438
+ [2023-02-24 08:02:31,140][784615] Environment doom_deadly_corridor already registered, overwriting...
439
+ [2023-02-24 08:02:31,140][784615] Environment doom_defend_the_center already registered, overwriting...
440
+ [2023-02-24 08:02:31,140][784615] Environment doom_defend_the_line already registered, overwriting...
441
+ [2023-02-24 08:02:31,141][784615] Environment doom_health_gathering already registered, overwriting...
442
+ [2023-02-24 08:02:31,141][784615] Environment doom_health_gathering_supreme already registered, overwriting...
443
+ [2023-02-24 08:02:31,142][784615] Environment doom_battle already registered, overwriting...
444
+ [2023-02-24 08:02:31,142][784615] Environment doom_battle2 already registered, overwriting...
445
+ [2023-02-24 08:02:31,142][784615] Environment doom_duel_bots already registered, overwriting...
446
+ [2023-02-24 08:02:31,142][784615] Environment doom_deathmatch_bots already registered, overwriting...
447
+ [2023-02-24 08:02:31,143][784615] Environment doom_duel already registered, overwriting...
448
+ [2023-02-24 08:02:31,143][784615] Environment doom_deathmatch_full already registered, overwriting...
449
+ [2023-02-24 08:02:31,143][784615] Environment doom_benchmark already registered, overwriting...
450
+ [2023-02-24 08:02:31,144][784615] register_encoder_factory: <function make_vizdoom_encoder at 0x7f9fbe7ae680>
451
+ [2023-02-24 08:02:31,153][784615] Loading existing experiment configuration from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json
452
+ [2023-02-24 08:02:31,154][784615] Overriding arg 'train_for_env_steps' with value 40000000 passed from command line
453
+ [2023-02-24 08:02:31,157][784615] Experiment dir /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment already exists!
454
+ [2023-02-24 08:02:31,158][784615] Resuming existing experiment from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment...
455
+ [2023-02-24 08:02:31,158][784615] Weights and Biases integration disabled
456
+ [2023-02-24 08:02:31,159][784615] Environment var CUDA_VISIBLE_DEVICES is 1
457
+ [2023-02-24 08:03:09,472][784615] Environment doom_basic already registered, overwriting...
458
+ [2023-02-24 08:03:09,474][784615] Environment doom_two_colors_easy already registered, overwriting...
459
+ [2023-02-24 08:03:09,475][784615] Environment doom_two_colors_hard already registered, overwriting...
460
+ [2023-02-24 08:03:09,476][784615] Environment doom_dm already registered, overwriting...
461
+ [2023-02-24 08:03:09,477][784615] Environment doom_dwango5 already registered, overwriting...
462
+ [2023-02-24 08:03:09,477][784615] Environment doom_my_way_home_flat_actions already registered, overwriting...
463
+ [2023-02-24 08:03:09,478][784615] Environment doom_defend_the_center_flat_actions already registered, overwriting...
464
+ [2023-02-24 08:03:09,478][784615] Environment doom_my_way_home already registered, overwriting...
465
+ [2023-02-24 08:03:09,479][784615] Environment doom_deadly_corridor already registered, overwriting...
466
+ [2023-02-24 08:03:09,479][784615] Environment doom_defend_the_center already registered, overwriting...
467
+ [2023-02-24 08:03:09,480][784615] Environment doom_defend_the_line already registered, overwriting...
468
+ [2023-02-24 08:03:09,480][784615] Environment doom_health_gathering already registered, overwriting...
469
+ [2023-02-24 08:03:09,481][784615] Environment doom_health_gathering_supreme already registered, overwriting...
470
+ [2023-02-24 08:03:09,481][784615] Environment doom_battle already registered, overwriting...
471
+ [2023-02-24 08:03:09,482][784615] Environment doom_battle2 already registered, overwriting...
472
+ [2023-02-24 08:03:09,482][784615] Environment doom_duel_bots already registered, overwriting...
473
+ [2023-02-24 08:03:09,483][784615] Environment doom_deathmatch_bots already registered, overwriting...
474
+ [2023-02-24 08:03:09,483][784615] Environment doom_duel already registered, overwriting...
475
+ [2023-02-24 08:03:09,483][784615] Environment doom_deathmatch_full already registered, overwriting...
476
+ [2023-02-24 08:03:09,484][784615] Environment doom_benchmark already registered, overwriting...
477
+ [2023-02-24 08:03:09,484][784615] register_encoder_factory: <function make_vizdoom_encoder at 0x7f9fbe7ae680>
478
+ [2023-02-24 08:03:09,494][784615] Loading existing experiment configuration from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json
479
+ [2023-02-24 08:03:09,495][784615] Overriding arg 'train_for_env_steps' with value 40000000 passed from command line
480
+ [2023-02-24 08:03:09,498][784615] Experiment dir /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment already exists!
481
+ [2023-02-24 08:03:09,499][784615] Resuming existing experiment from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment...
482
+ [2023-02-24 08:03:09,499][784615] Weights and Biases integration disabled
483
+ [2023-02-24 08:03:09,500][784615] Environment var CUDA_VISIBLE_DEVICES is 1
484
+ [2023-02-24 08:06:20,559][795538] Saving configuration to /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json...
485
+ [2023-02-24 08:06:20,725][795538] Rollout worker 0 uses device cpu
486
+ [2023-02-24 08:06:20,726][795538] Rollout worker 1 uses device cpu
487
+ [2023-02-24 08:06:20,726][795538] Rollout worker 2 uses device cpu
488
+ [2023-02-24 08:06:20,727][795538] Rollout worker 3 uses device cpu
489
+ [2023-02-24 08:06:20,727][795538] Rollout worker 4 uses device cpu
490
+ [2023-02-24 08:06:20,727][795538] Rollout worker 5 uses device cpu
491
+ [2023-02-24 08:06:20,728][795538] Rollout worker 6 uses device cpu
492
+ [2023-02-24 08:06:20,728][795538] Rollout worker 7 uses device cpu
493
+ [2023-02-24 08:06:20,767][795538] Using GPUs [0] for process 0 (actually maps to GPUs [1])
494
+ [2023-02-24 08:06:20,768][795538] InferenceWorker_p0-w0: min num requests: 2
495
+ [2023-02-24 08:06:20,819][795538] Starting all processes...
496
+ [2023-02-24 08:06:20,820][795538] Starting process learner_proc0
497
+ [2023-02-24 08:06:20,869][795538] Starting all processes...
498
+ [2023-02-24 08:06:20,872][795538] Starting process inference_proc0-0
499
+ [2023-02-24 08:06:20,873][795538] Starting process rollout_proc0
500
+ [2023-02-24 08:06:20,873][795538] Starting process rollout_proc1
501
+ [2023-02-24 08:06:20,873][795538] Starting process rollout_proc2
502
+ [2023-02-24 08:06:20,873][795538] Starting process rollout_proc3
503
+ [2023-02-24 08:06:20,874][795538] Starting process rollout_proc4
504
+ [2023-02-24 08:06:20,874][795538] Starting process rollout_proc5
505
+ [2023-02-24 08:06:20,874][795538] Starting process rollout_proc6
506
+ [2023-02-24 08:06:20,875][795538] Starting process rollout_proc7
507
+ [2023-02-24 08:06:22,179][795628] Worker 1 uses CPU cores [1]
508
+ [2023-02-24 08:06:22,214][795634] Worker 6 uses CPU cores [6]
509
+ [2023-02-24 08:06:22,319][795626] Low niceness requires sudo!
510
+ [2023-02-24 08:06:22,319][795626] Using GPUs [0] for process 0 (actually maps to GPUs [1])
511
+ [2023-02-24 08:06:22,319][795626] Set environment var CUDA_VISIBLE_DEVICES to '1' (GPU indices [0]) for inference process 0
512
+ [2023-02-24 08:06:22,336][795626] Num visible devices: 1
513
+ [2023-02-24 08:06:22,343][795613] Low niceness requires sudo!
514
+ [2023-02-24 08:06:22,343][795613] Using GPUs [0] for process 0 (actually maps to GPUs [1])
515
+ [2023-02-24 08:06:22,344][795613] Set environment var CUDA_VISIBLE_DEVICES to '1' (GPU indices [0]) for learning process 0
516
+ [2023-02-24 08:06:22,361][795613] Num visible devices: 1
517
+ [2023-02-24 08:06:22,366][795632] Worker 5 uses CPU cores [5]
518
+ [2023-02-24 08:06:22,394][795613] Starting seed is not provided
519
+ [2023-02-24 08:06:22,394][795613] Using GPUs [0] for process 0 (actually maps to GPUs [1])
520
+ [2023-02-24 08:06:22,394][795613] Initializing actor-critic model on device cuda:0
521
+ [2023-02-24 08:06:22,395][795613] RunningMeanStd input shape: (3, 72, 128)
522
+ [2023-02-24 08:06:22,395][795613] RunningMeanStd input shape: (1,)
523
+ [2023-02-24 08:06:22,409][795613] ConvEncoder: input_channels=3
524
+ [2023-02-24 08:06:22,497][795613] Conv encoder output size: 512
525
+ [2023-02-24 08:06:22,497][795613] Policy head output size: 512
526
+ [2023-02-24 08:06:22,506][795613] Created Actor Critic model with architecture:
527
+ [2023-02-24 08:06:22,507][795613] ActorCriticSharedWeights(
528
+ (obs_normalizer): ObservationNormalizer(
529
+ (running_mean_std): RunningMeanStdDictInPlace(
530
+ (running_mean_std): ModuleDict(
531
+ (obs): RunningMeanStdInPlace()
532
+ )
533
+ )
534
+ )
535
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
536
+ (encoder): VizdoomEncoder(
537
+ (basic_encoder): ConvEncoder(
538
+ (enc): RecursiveScriptModule(
539
+ original_name=ConvEncoderImpl
540
+ (conv_head): RecursiveScriptModule(
541
+ original_name=Sequential
542
+ (0): RecursiveScriptModule(original_name=Conv2d)
543
+ (1): RecursiveScriptModule(original_name=ELU)
544
+ (2): RecursiveScriptModule(original_name=Conv2d)
545
+ (3): RecursiveScriptModule(original_name=ELU)
546
+ (4): RecursiveScriptModule(original_name=Conv2d)
547
+ (5): RecursiveScriptModule(original_name=ELU)
548
+ )
549
+ (mlp_layers): RecursiveScriptModule(
550
+ original_name=Sequential
551
+ (0): RecursiveScriptModule(original_name=Linear)
552
+ (1): RecursiveScriptModule(original_name=ELU)
553
+ )
554
+ )
555
+ )
556
+ )
557
+ (core): ModelCoreRNN(
558
+ (core): GRU(512, 512)
559
+ )
560
+ (decoder): MlpDecoder(
561
+ (mlp): Identity()
562
+ )
563
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
564
+ (action_parameterization): ActionParameterizationDefault(
565
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
566
+ )
567
+ )
568
+ [2023-02-24 08:06:22,514][795631] Worker 4 uses CPU cores [4]
569
+ [2023-02-24 08:06:22,542][795633] Worker 7 uses CPU cores [7]
570
+ [2023-02-24 08:06:22,548][795630] Worker 3 uses CPU cores [3]
571
+ [2023-02-24 08:06:22,590][795629] Worker 2 uses CPU cores [2]
572
+ [2023-02-24 08:06:22,604][795627] Worker 0 uses CPU cores [0]
573
+ [2023-02-24 08:06:25,136][795613] Using optimizer <class 'torch.optim.adam.Adam'>
574
+ [2023-02-24 08:06:25,137][795613] Loading state from checkpoint /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000268_1097728.pth...
575
+ [2023-02-24 08:06:25,196][795613] Loading model from checkpoint
576
+ [2023-02-24 08:06:25,198][795613] Loaded experiment state at self.train_step=268, self.env_steps=1097728
577
+ [2023-02-24 08:06:25,198][795613] Initialized policy 0 weights for model version 268
578
+ [2023-02-24 08:06:25,199][795613] LearnerWorker_p0 finished initialization!
579
+ [2023-02-24 08:06:25,199][795613] Using GPUs [0] for process 0 (actually maps to GPUs [1])
580
+ [2023-02-24 08:06:26,272][795626] RunningMeanStd input shape: (3, 72, 128)
581
+ [2023-02-24 08:06:26,272][795626] RunningMeanStd input shape: (1,)
582
+ [2023-02-24 08:06:26,280][795626] ConvEncoder: input_channels=3
583
+ [2023-02-24 08:06:26,341][795626] Conv encoder output size: 512
584
+ [2023-02-24 08:06:26,342][795626] Policy head output size: 512
585
+ [2023-02-24 08:06:27,341][795538] Inference worker 0-0 is ready!
586
+ [2023-02-24 08:06:27,341][795538] All inference workers are ready! Signal rollout workers to start!
587
+ [2023-02-24 08:06:27,358][795628] Doom resolution: 160x120, resize resolution: (128, 72)
588
+ [2023-02-24 08:06:27,358][795631] Doom resolution: 160x120, resize resolution: (128, 72)
589
+ [2023-02-24 08:06:27,358][795627] Doom resolution: 160x120, resize resolution: (128, 72)
590
+ [2023-02-24 08:06:27,359][795629] Doom resolution: 160x120, resize resolution: (128, 72)
591
+ [2023-02-24 08:06:27,364][795634] Doom resolution: 160x120, resize resolution: (128, 72)
592
+ [2023-02-24 08:06:27,381][795632] Doom resolution: 160x120, resize resolution: (128, 72)
593
+ [2023-02-24 08:06:27,380][795630] Doom resolution: 160x120, resize resolution: (128, 72)
594
+ [2023-02-24 08:06:27,394][795633] Doom resolution: 160x120, resize resolution: (128, 72)
595
+ [2023-02-24 08:06:27,569][795627] Decorrelating experience for 0 frames...
596
+ [2023-02-24 08:06:27,619][795634] Decorrelating experience for 0 frames...
597
+ [2023-02-24 08:06:27,650][795629] Decorrelating experience for 0 frames...
598
+ [2023-02-24 08:06:27,665][795630] Decorrelating experience for 0 frames...
599
+ [2023-02-24 08:06:27,805][795634] Decorrelating experience for 32 frames...
600
+ [2023-02-24 08:06:27,881][795630] Decorrelating experience for 32 frames...
601
+ [2023-02-24 08:06:27,882][795627] Decorrelating experience for 32 frames...
602
+ [2023-02-24 08:06:27,882][795632] Decorrelating experience for 0 frames...
603
+ [2023-02-24 08:06:28,090][795630] Decorrelating experience for 64 frames...
604
+ [2023-02-24 08:06:28,092][795632] Decorrelating experience for 32 frames...
605
+ [2023-02-24 08:06:28,106][795627] Decorrelating experience for 64 frames...
606
+ [2023-02-24 08:06:28,308][795632] Decorrelating experience for 64 frames...
607
+ [2023-02-24 08:06:28,376][795628] Decorrelating experience for 0 frames...
608
+ [2023-02-24 08:06:28,383][795630] Decorrelating experience for 96 frames...
609
+ [2023-02-24 08:06:28,383][795629] Decorrelating experience for 32 frames...
610
+ [2023-02-24 08:06:28,395][795627] Decorrelating experience for 96 frames...
611
+ [2023-02-24 08:06:28,625][795628] Decorrelating experience for 32 frames...
612
+ [2023-02-24 08:06:28,660][795633] Decorrelating experience for 0 frames...
613
+ [2023-02-24 08:06:28,720][795632] Decorrelating experience for 96 frames...
614
+ [2023-02-24 08:06:28,729][795631] Decorrelating experience for 0 frames...
615
+ [2023-02-24 08:06:28,907][795633] Decorrelating experience for 32 frames...
616
+ [2023-02-24 08:06:28,955][795628] Decorrelating experience for 64 frames...
617
+ [2023-02-24 08:06:28,975][795634] Decorrelating experience for 64 frames...
618
+ [2023-02-24 08:06:29,141][795633] Decorrelating experience for 64 frames...
619
+ [2023-02-24 08:06:29,169][795631] Decorrelating experience for 32 frames...
620
+ [2023-02-24 08:06:29,237][795628] Decorrelating experience for 96 frames...
621
+ [2023-02-24 08:06:29,253][795629] Decorrelating experience for 64 frames...
622
+ [2023-02-24 08:06:29,425][795631] Decorrelating experience for 64 frames...
623
+ [2023-02-24 08:06:29,531][795629] Decorrelating experience for 96 frames...
624
+ [2023-02-24 08:06:29,537][795634] Decorrelating experience for 96 frames...
625
+ [2023-02-24 08:06:29,541][795633] Decorrelating experience for 96 frames...
626
+ [2023-02-24 08:06:29,684][795631] Decorrelating experience for 96 frames...
627
+ [2023-02-24 08:06:29,698][795538] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 1097728. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
628
+ [2023-02-24 08:06:30,658][795613] Signal inference workers to stop experience collection...
629
+ [2023-02-24 08:06:30,662][795626] InferenceWorker_p0-w0: stopping experience collection
630
+ [2023-02-24 08:06:32,942][795613] Signal inference workers to resume experience collection...
631
+ [2023-02-24 08:06:32,942][795626] InferenceWorker_p0-w0: resuming experience collection
632
+ [2023-02-24 08:06:34,697][795538] Fps is (10 sec: 5734.4, 60 sec: 5734.4, 300 sec: 5734.4). Total num frames: 1126400. Throughput: 0: 628.8. Samples: 3144. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
633
+ [2023-02-24 08:06:34,699][795538] Avg episode reward: [(0, '5.223')]
634
+ [2023-02-24 08:06:35,433][795626] Updated weights for policy 0, policy_version 278 (0.0234)
635
+ [2023-02-24 08:06:38,065][795626] Updated weights for policy 0, policy_version 288 (0.0006)
636
+ [2023-02-24 08:06:39,698][795538] Fps is (10 sec: 10649.4, 60 sec: 10649.4, 300 sec: 10649.4). Total num frames: 1204224. Throughput: 0: 2626.3. Samples: 26264. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
637
+ [2023-02-24 08:06:39,699][795538] Avg episode reward: [(0, '6.797')]
638
+ [2023-02-24 08:06:39,702][795613] Saving new best policy, reward=6.797!
639
+ [2023-02-24 08:06:40,702][795626] Updated weights for policy 0, policy_version 298 (0.0006)
640
+ [2023-02-24 08:06:40,762][795538] Heartbeat connected on Batcher_0
641
+ [2023-02-24 08:06:40,764][795538] Heartbeat connected on LearnerWorker_p0
642
+ [2023-02-24 08:06:40,771][795538] Heartbeat connected on InferenceWorker_p0-w0
643
+ [2023-02-24 08:06:40,775][795538] Heartbeat connected on RolloutWorker_w0
644
+ [2023-02-24 08:06:40,777][795538] Heartbeat connected on RolloutWorker_w2
645
+ [2023-02-24 08:06:40,778][795538] Heartbeat connected on RolloutWorker_w1
646
+ [2023-02-24 08:06:40,779][795538] Heartbeat connected on RolloutWorker_w3
647
+ [2023-02-24 08:06:40,783][795538] Heartbeat connected on RolloutWorker_w4
648
+ [2023-02-24 08:06:40,784][795538] Heartbeat connected on RolloutWorker_w5
649
+ [2023-02-24 08:06:40,819][795538] Heartbeat connected on RolloutWorker_w7
650
+ [2023-02-24 08:06:40,825][795538] Heartbeat connected on RolloutWorker_w6
651
+ [2023-02-24 08:06:43,242][795626] Updated weights for policy 0, policy_version 308 (0.0007)
652
+ [2023-02-24 08:06:44,698][795538] Fps is (10 sec: 15564.8, 60 sec: 12288.0, 300 sec: 12288.0). Total num frames: 1282048. Throughput: 0: 2536.4. Samples: 38046. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
653
+ [2023-02-24 08:06:44,698][795538] Avg episode reward: [(0, '7.215')]
654
+ [2023-02-24 08:06:44,700][795613] Saving new best policy, reward=7.215!
655
+ [2023-02-24 08:06:45,889][795626] Updated weights for policy 0, policy_version 318 (0.0007)
656
+ [2023-02-24 08:06:48,495][795626] Updated weights for policy 0, policy_version 328 (0.0007)
657
+ [2023-02-24 08:06:49,698][795538] Fps is (10 sec: 15565.1, 60 sec: 13107.2, 300 sec: 13107.2). Total num frames: 1359872. Throughput: 0: 3079.6. Samples: 61592. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
658
+ [2023-02-24 08:06:49,699][795538] Avg episode reward: [(0, '8.493')]
659
+ [2023-02-24 08:06:49,702][795613] Saving new best policy, reward=8.493!
660
+ [2023-02-24 08:06:51,107][795626] Updated weights for policy 0, policy_version 338 (0.0007)
661
+ [2023-02-24 08:06:53,724][795626] Updated weights for policy 0, policy_version 348 (0.0007)
662
+ [2023-02-24 08:06:54,698][795538] Fps is (10 sec: 15564.7, 60 sec: 13598.7, 300 sec: 13598.7). Total num frames: 1437696. Throughput: 0: 3404.3. Samples: 85108. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
663
+ [2023-02-24 08:06:54,699][795538] Avg episode reward: [(0, '10.669')]
664
+ [2023-02-24 08:06:54,700][795613] Saving new best policy, reward=10.669!
665
+ [2023-02-24 08:06:56,371][795626] Updated weights for policy 0, policy_version 358 (0.0007)
666
+ [2023-02-24 08:06:58,972][795626] Updated weights for policy 0, policy_version 368 (0.0007)
667
+ [2023-02-24 08:06:59,697][795538] Fps is (10 sec: 15564.8, 60 sec: 13926.4, 300 sec: 13926.4). Total num frames: 1515520. Throughput: 0: 3226.5. Samples: 96796. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
668
+ [2023-02-24 08:06:59,698][795538] Avg episode reward: [(0, '13.259')]
669
+ [2023-02-24 08:06:59,701][795613] Saving new best policy, reward=13.259!
670
+ [2023-02-24 08:07:01,613][795626] Updated weights for policy 0, policy_version 378 (0.0007)
671
+ [2023-02-24 08:07:04,206][795626] Updated weights for policy 0, policy_version 388 (0.0007)
672
+ [2023-02-24 08:07:04,698][795538] Fps is (10 sec: 15564.9, 60 sec: 14160.5, 300 sec: 14160.5). Total num frames: 1593344. Throughput: 0: 3437.5. Samples: 120312. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
673
+ [2023-02-24 08:07:04,698][795538] Avg episode reward: [(0, '13.989')]
674
+ [2023-02-24 08:07:04,699][795613] Saving new best policy, reward=13.989!
675
+ [2023-02-24 08:07:06,831][795626] Updated weights for policy 0, policy_version 398 (0.0007)
676
+ [2023-02-24 08:07:08,206][795538] Keyboard interrupt detected in the event loop EvtLoop [Runner_EvtLoop, process=main process 795538], exiting...
677
+ [2023-02-24 08:07:08,207][795538] Runner profile tree view:
678
+ main_loop: 47.3880
679
+ [2023-02-24 08:07:08,208][795613] Stopping Batcher_0...
680
+ [2023-02-24 08:07:08,208][795613] Loop batcher_evt_loop terminating...
681
+ [2023-02-24 08:07:08,208][795538] Collected {0: 1650688}, FPS: 11668.8
682
+ [2023-02-24 08:07:08,209][795613] Saving /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000403_1650688.pth...
683
+ [2023-02-24 08:07:08,216][795632] Stopping RolloutWorker_w5...
684
+ [2023-02-24 08:07:08,216][795632] Loop rollout_proc5_evt_loop terminating...
685
+ [2023-02-24 08:07:08,218][795630] Stopping RolloutWorker_w3...
686
+ [2023-02-24 08:07:08,218][795630] Loop rollout_proc3_evt_loop terminating...
687
+ [2023-02-24 08:07:08,218][795628] Stopping RolloutWorker_w1...
688
+ [2023-02-24 08:07:08,218][795633] Stopping RolloutWorker_w7...
689
+ [2023-02-24 08:07:08,219][795628] Loop rollout_proc1_evt_loop terminating...
690
+ [2023-02-24 08:07:08,219][795633] Loop rollout_proc7_evt_loop terminating...
691
+ [2023-02-24 08:07:08,221][795631] Stopping RolloutWorker_w4...
692
+ [2023-02-24 08:07:08,221][795631] Loop rollout_proc4_evt_loop terminating...
693
+ [2023-02-24 08:07:08,228][795629] Stopping RolloutWorker_w2...
694
+ [2023-02-24 08:07:08,228][795629] Loop rollout_proc2_evt_loop terminating...
695
+ [2023-02-24 08:07:08,230][795627] Stopping RolloutWorker_w0...
696
+ [2023-02-24 08:07:08,230][795634] Stopping RolloutWorker_w6...
697
+ [2023-02-24 08:07:08,230][795634] Loop rollout_proc6_evt_loop terminating...
698
+ [2023-02-24 08:07:08,230][795627] Loop rollout_proc0_evt_loop terminating...
699
+ [2023-02-24 08:07:08,252][795626] Weights refcount: 2 0
700
+ [2023-02-24 08:07:08,256][795626] Stopping InferenceWorker_p0-w0...
701
+ [2023-02-24 08:07:08,260][795626] Loop inference_proc0-0_evt_loop terminating...
702
+ [2023-02-24 08:07:08,376][795613] Stopping LearnerWorker_p0...
703
+ [2023-02-24 08:07:08,377][795613] Loop learner_proc0_evt_loop terminating...
704
+ [2023-02-24 08:07:38,064][795538] Loading existing experiment configuration from /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/config.json
705
+ [2023-02-24 08:07:38,065][795538] Overriding arg 'num_workers' with value 1 passed from command line
706
+ [2023-02-24 08:07:38,065][795538] Adding new argument 'no_render'=True that is not in the saved config file!
707
+ [2023-02-24 08:07:38,065][795538] Adding new argument 'save_video'=True that is not in the saved config file!
708
+ [2023-02-24 08:07:38,066][795538] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
709
+ [2023-02-24 08:07:38,066][795538] Adding new argument 'video_name'=None that is not in the saved config file!
710
+ [2023-02-24 08:07:38,066][795538] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
711
+ [2023-02-24 08:07:38,067][795538] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
712
+ [2023-02-24 08:07:38,067][795538] Adding new argument 'push_to_hub'=True that is not in the saved config file!
713
+ [2023-02-24 08:07:38,068][795538] Adding new argument 'hf_repository'='chqmatteo/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
714
+ [2023-02-24 08:07:38,068][795538] Adding new argument 'policy_index'=0 that is not in the saved config file!
715
+ [2023-02-24 08:07:38,068][795538] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
716
+ [2023-02-24 08:07:38,068][795538] Adding new argument 'train_script'=None that is not in the saved config file!
717
+ [2023-02-24 08:07:38,069][795538] Adding new argument 'enjoy_script'=None that is not in the saved config file!
718
+ [2023-02-24 08:07:38,069][795538] Using frameskip 1 and render_action_repeat=4 for evaluation
719
+ [2023-02-24 08:07:38,076][795538] Doom resolution: 160x120, resize resolution: (128, 72)
720
+ [2023-02-24 08:07:38,077][795538] RunningMeanStd input shape: (3, 72, 128)
721
+ [2023-02-24 08:07:38,078][795538] RunningMeanStd input shape: (1,)
722
+ [2023-02-24 08:07:38,086][795538] ConvEncoder: input_channels=3
723
+ [2023-02-24 08:07:38,169][795538] Conv encoder output size: 512
724
+ [2023-02-24 08:07:38,170][795538] Policy head output size: 512
725
+ [2023-02-24 08:07:40,757][795538] Loading state from checkpoint /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000403_1650688.pth...
726
+ [2023-02-24 08:07:42,930][795538] Num frames 100...
727
+ [2023-02-24 08:07:43,007][795538] Num frames 200...
728
+ [2023-02-24 08:07:43,075][795538] Num frames 300...
729
+ [2023-02-24 08:07:43,138][795538] Num frames 400...
730
+ [2023-02-24 08:07:43,207][795538] Num frames 500...
731
+ [2023-02-24 08:07:43,275][795538] Num frames 600...
732
+ [2023-02-24 08:07:43,349][795538] Num frames 700...
733
+ [2023-02-24 08:07:43,409][795538] Avg episode rewards: #0: 16.090, true rewards: #0: 7.090
734
+ [2023-02-24 08:07:43,410][795538] Avg episode reward: 16.090, avg true_objective: 7.090
735
+ [2023-02-24 08:07:43,472][795538] Num frames 800...
736
+ [2023-02-24 08:07:43,536][795538] Num frames 900...
737
+ [2023-02-24 08:07:43,605][795538] Num frames 1000...
738
+ [2023-02-24 08:07:43,689][795538] Num frames 1100...
739
+ [2023-02-24 08:07:43,790][795538] Avg episode rewards: #0: 10.785, true rewards: #0: 5.785
740
+ [2023-02-24 08:07:43,791][795538] Avg episode reward: 10.785, avg true_objective: 5.785
741
+ [2023-02-24 08:07:43,819][795538] Num frames 1200...
742
+ [2023-02-24 08:07:43,885][795538] Num frames 1300...
743
+ [2023-02-24 08:07:43,956][795538] Num frames 1400...
744
+ [2023-02-24 08:07:44,024][795538] Num frames 1500...
745
+ [2023-02-24 08:07:44,102][795538] Num frames 1600...
746
+ [2023-02-24 08:07:44,182][795538] Num frames 1700...
747
+ [2023-02-24 08:07:44,280][795538] Avg episode rewards: #0: 10.217, true rewards: #0: 5.883
748
+ [2023-02-24 08:07:44,280][795538] Avg episode reward: 10.217, avg true_objective: 5.883
749
+ [2023-02-24 08:07:44,305][795538] Num frames 1800...
750
+ [2023-02-24 08:07:44,373][795538] Num frames 1900...
751
+ [2023-02-24 08:07:44,443][795538] Num frames 2000...
752
+ [2023-02-24 08:07:44,519][795538] Num frames 2100...
753
+ [2023-02-24 08:07:44,607][795538] Num frames 2200...
754
+ [2023-02-24 08:07:44,678][795538] Num frames 2300...
755
+ [2023-02-24 08:07:44,753][795538] Num frames 2400...
756
+ [2023-02-24 08:07:44,838][795538] Num frames 2500...
757
+ [2023-02-24 08:07:44,919][795538] Num frames 2600...
758
+ [2023-02-24 08:07:45,012][795538] Avg episode rewards: #0: 12.653, true rewards: #0: 6.652
759
+ [2023-02-24 08:07:45,013][795538] Avg episode reward: 12.653, avg true_objective: 6.652
760
+ [2023-02-24 08:07:45,040][795538] Num frames 2700...
761
+ [2023-02-24 08:07:45,115][795538] Num frames 2800...
762
+ [2023-02-24 08:07:45,187][795538] Num frames 2900...
763
+ [2023-02-24 08:07:45,261][795538] Num frames 3000...
764
+ [2023-02-24 08:07:45,381][795538] Num frames 3100...
765
+ [2023-02-24 08:07:45,448][795538] Num frames 3200...
766
+ [2023-02-24 08:07:45,513][795538] Num frames 3300...
767
+ [2023-02-24 08:07:45,581][795538] Num frames 3400...
768
+ [2023-02-24 08:07:45,652][795538] Num frames 3500...
769
+ [2023-02-24 08:07:45,717][795538] Num frames 3600...
770
+ [2023-02-24 08:07:45,790][795538] Num frames 3700...
771
+ [2023-02-24 08:07:45,860][795538] Num frames 3800...
772
+ [2023-02-24 08:07:45,935][795538] Num frames 3900...
773
+ [2023-02-24 08:07:46,006][795538] Num frames 4000...
774
+ [2023-02-24 08:07:46,078][795538] Num frames 4100...
775
+ [2023-02-24 08:07:46,213][795538] Avg episode rewards: #0: 17.194, true rewards: #0: 8.394
776
+ [2023-02-24 08:07:46,214][795538] Avg episode reward: 17.194, avg true_objective: 8.394
777
+ [2023-02-24 08:07:46,216][795538] Num frames 4200...
778
+ [2023-02-24 08:07:46,291][795538] Num frames 4300...
779
+ [2023-02-24 08:07:46,369][795538] Num frames 4400...
780
+ [2023-02-24 08:07:46,449][795538] Num frames 4500...
781
+ [2023-02-24 08:07:46,522][795538] Num frames 4600...
782
+ [2023-02-24 08:07:46,591][795538] Num frames 4700...
783
+ [2023-02-24 08:07:46,662][795538] Num frames 4800...
784
+ [2023-02-24 08:07:46,732][795538] Num frames 4900...
785
+ [2023-02-24 08:07:46,831][795538] Avg episode rewards: #0: 16.942, true rewards: #0: 8.275
786
+ [2023-02-24 08:07:46,831][795538] Avg episode reward: 16.942, avg true_objective: 8.275
787
+ [2023-02-24 08:07:46,855][795538] Num frames 5000...
788
+ [2023-02-24 08:07:46,919][795538] Num frames 5100...
789
+ [2023-02-24 08:07:46,986][795538] Num frames 5200...
790
+ [2023-02-24 08:07:47,056][795538] Num frames 5300...
791
+ [2023-02-24 08:07:47,141][795538] Num frames 5400...
792
+ [2023-02-24 08:07:47,210][795538] Num frames 5500...
793
+ [2023-02-24 08:07:47,278][795538] Num frames 5600...
794
+ [2023-02-24 08:07:47,382][795538] Avg episode rewards: #0: 16.394, true rewards: #0: 8.109
795
+ [2023-02-24 08:07:47,383][795538] Avg episode reward: 16.394, avg true_objective: 8.109
796
+ [2023-02-24 08:07:47,406][795538] Num frames 5700...
797
+ [2023-02-24 08:07:47,483][795538] Num frames 5800...
798
+ [2023-02-24 08:07:47,567][795538] Num frames 5900...
799
+ [2023-02-24 08:07:47,636][795538] Num frames 6000...
800
+ [2023-02-24 08:07:47,706][795538] Num frames 6100...
801
+ [2023-02-24 08:07:47,783][795538] Num frames 6200...
802
+ [2023-02-24 08:07:47,861][795538] Num frames 6300...
803
+ [2023-02-24 08:07:47,935][795538] Num frames 6400...
804
+ [2023-02-24 08:07:48,009][795538] Num frames 6500...
805
+ [2023-02-24 08:07:48,093][795538] Num frames 6600...
806
+ [2023-02-24 08:07:48,166][795538] Num frames 6700...
807
+ [2023-02-24 08:07:48,237][795538] Num frames 6800...
808
+ [2023-02-24 08:07:48,313][795538] Num frames 6900...
809
+ [2023-02-24 08:07:48,383][795538] Num frames 7000...
810
+ [2023-02-24 08:07:48,450][795538] Num frames 7100...
811
+ [2023-02-24 08:07:48,516][795538] Num frames 7200...
812
+ [2023-02-24 08:07:48,594][795538] Num frames 7300...
813
+ [2023-02-24 08:07:48,669][795538] Num frames 7400...
814
+ [2023-02-24 08:07:48,742][795538] Num frames 7500...
815
+ [2023-02-24 08:07:48,814][795538] Num frames 7600...
816
+ [2023-02-24 08:07:48,895][795538] Num frames 7700...
817
+ [2023-02-24 08:07:48,996][795538] Avg episode rewards: #0: 21.345, true rewards: #0: 9.720
818
+ [2023-02-24 08:07:48,997][795538] Avg episode reward: 21.345, avg true_objective: 9.720
819
+ [2023-02-24 08:07:49,018][795538] Num frames 7800...
820
+ [2023-02-24 08:07:49,092][795538] Num frames 7900...
821
+ [2023-02-24 08:07:49,171][795538] Num frames 8000...
822
+ [2023-02-24 08:07:49,241][795538] Num frames 8100...
823
+ [2023-02-24 08:07:49,311][795538] Num frames 8200...
824
+ [2023-02-24 08:07:49,424][795538] Avg episode rewards: #0: 20.098, true rewards: #0: 9.209
825
+ [2023-02-24 08:07:49,425][795538] Avg episode reward: 20.098, avg true_objective: 9.209
826
+ [2023-02-24 08:07:49,434][795538] Num frames 8300...
827
+ [2023-02-24 08:07:49,502][795538] Num frames 8400...
828
+ [2023-02-24 08:07:49,568][795538] Num frames 8500...
829
+ [2023-02-24 08:07:49,643][795538] Num frames 8600...
830
+ [2023-02-24 08:07:49,707][795538] Num frames 8700...
831
+ [2023-02-24 08:07:49,763][795538] Avg episode rewards: #0: 18.704, true rewards: #0: 8.704
832
+ [2023-02-24 08:07:49,764][795538] Avg episode reward: 18.704, avg true_objective: 8.704
833
+ [2023-02-24 08:07:53,791][795538] Replay video saved to /mnt/chqma/data-ssd-01/dataset/oss/RWKV-LM/deep-rl-class/notebooks/unit8/train_dir/default_experiment/replay.mp4!