HilbertS commited on
Commit
7d90a69
·
1 Parent(s): 8bd3758

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1688482360.4c41bf599422 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e2c311d468e3240ab8832eed36f0db1db3b85b84163a6041d35b446347bd3a3
3
+ size 40
.summary/0/events.out.tfevents.1688482563.4c41bf599422 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25b950b8049f5c82f645139bf8f653c0a7613b33df1845b19ea84a06956d6252
3
+ size 68047
.summary/0/events.out.tfevents.1688482767.4c41bf599422 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1ad7eee8f02d44e079ad8faa000e046ba9f6551110bfefad139ba37a9e362fd
3
+ size 2409
.summary/0/events.out.tfevents.1688482948.4c41bf599422 ADDED
File without changes
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ model-index:
8
+ - name: APPO
9
+ results:
10
+ - task:
11
+ type: reinforcement-learning
12
+ name: reinforcement-learning
13
+ dataset:
14
+ name: doom_health_gathering_supreme
15
+ type: doom_health_gathering_supreme
16
+ metrics:
17
+ - type: mean_reward
18
+ value: 4.56 +/- 0.74
19
+ name: mean_reward
20
+ verified: false
21
+ ---
22
+
23
+ A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r HilbertS/rl_course_vizdoom_health_gathering_supreme
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
cfg.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/content/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "obs_subtract_mean": 0.0,
50
+ "obs_scale": 255.0,
51
+ "normalize_input": true,
52
+ "normalize_input_keys": null,
53
+ "decorrelate_experience_max_seconds": 0,
54
+ "decorrelate_envs_on_one_worker": true,
55
+ "actor_worker_gpus": [],
56
+ "set_workers_cpu_affinity": true,
57
+ "force_envs_single_thread": false,
58
+ "default_niceness": 0,
59
+ "log_to_file": true,
60
+ "experiment_summaries_interval": 10,
61
+ "flush_summaries_interval": 30,
62
+ "stats_avg": 100,
63
+ "summaries_use_frameskip": true,
64
+ "heartbeat_interval": 20,
65
+ "heartbeat_reporting_interval": 600,
66
+ "train_for_env_steps": 25000,
67
+ "train_for_seconds": 10000000000,
68
+ "save_every_sec": 120,
69
+ "keep_checkpoints": 2,
70
+ "load_checkpoint_kind": "latest",
71
+ "save_milestones_sec": -1,
72
+ "save_best_every_sec": 5,
73
+ "save_best_metric": "reward",
74
+ "save_best_after": 100000,
75
+ "benchmark": false,
76
+ "encoder_mlp_layers": [
77
+ 512,
78
+ 512
79
+ ],
80
+ "encoder_conv_architecture": "convnet_simple",
81
+ "encoder_conv_mlp_layers": [
82
+ 512
83
+ ],
84
+ "use_rnn": true,
85
+ "rnn_size": 512,
86
+ "rnn_type": "gru",
87
+ "rnn_num_layers": 1,
88
+ "decoder_mlp_layers": [],
89
+ "nonlinearity": "elu",
90
+ "policy_initialization": "orthogonal",
91
+ "policy_init_gain": 1.0,
92
+ "actor_critic_share_weights": true,
93
+ "adaptive_stddev": true,
94
+ "continuous_tanh_scale": 0.0,
95
+ "initial_stddev": 1.0,
96
+ "use_env_info_cache": false,
97
+ "env_gpu_actions": false,
98
+ "env_gpu_observations": true,
99
+ "env_frameskip": 4,
100
+ "env_framestack": 1,
101
+ "pixel_format": "CHW",
102
+ "use_record_episode_statistics": false,
103
+ "with_wandb": false,
104
+ "wandb_user": null,
105
+ "wandb_project": "sample_factory",
106
+ "wandb_group": null,
107
+ "wandb_job_type": "SF",
108
+ "wandb_tags": [],
109
+ "with_pbt": false,
110
+ "pbt_mix_policies_in_one_env": true,
111
+ "pbt_period_env_steps": 5000000,
112
+ "pbt_start_mutation": 20000000,
113
+ "pbt_replace_fraction": 0.3,
114
+ "pbt_mutation_rate": 0.15,
115
+ "pbt_replace_reward_gap": 0.1,
116
+ "pbt_replace_reward_gap_absolute": 1e-06,
117
+ "pbt_optimize_gamma": false,
118
+ "pbt_target_objective": "true_objective",
119
+ "pbt_perturb_min": 1.1,
120
+ "pbt_perturb_max": 1.5,
121
+ "num_agents": -1,
122
+ "num_humans": 0,
123
+ "num_bots": -1,
124
+ "start_bot_difficulty": null,
125
+ "timelimit": null,
126
+ "res_w": 128,
127
+ "res_h": 72,
128
+ "wide_aspect_ratio": false,
129
+ "eval_env_frameskip": 1,
130
+ "fps": 35,
131
+ "command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
132
+ "cli_args": {
133
+ "env": "doom_health_gathering_supreme",
134
+ "num_workers": 8,
135
+ "num_envs_per_worker": 4,
136
+ "train_for_env_steps": 4000000
137
+ },
138
+ "git_hash": "unknown",
139
+ "git_repo_name": "not a git repository",
140
+ "train_script": ".usr.local.lib.python3.10.dist-packages.ipykernel_launcher"
141
+ }
checkpoint_p0/best_000000112_458752_reward_4.720.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa01e96f24316f451761602db0c9bee351f367917dc559e2f2adc23ad44385c1
3
+ size 34928614
checkpoint_p0/checkpoint_000000112_458752.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bf72ed4c23507a81c897a14d729e46b1fd1e2537398862b19aeed69530aad37
3
+ size 34929028
checkpoint_p0/checkpoint_000000113_462848.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce7d2579da71521b3a1fa1140d4c0c5ac3cbaaa71a65742ca09dbd881c28c0e4
3
+ size 34929220
config.json ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/content/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "obs_subtract_mean": 0.0,
50
+ "obs_scale": 255.0,
51
+ "normalize_input": true,
52
+ "normalize_input_keys": null,
53
+ "decorrelate_experience_max_seconds": 0,
54
+ "decorrelate_envs_on_one_worker": true,
55
+ "actor_worker_gpus": [],
56
+ "set_workers_cpu_affinity": true,
57
+ "force_envs_single_thread": false,
58
+ "default_niceness": 0,
59
+ "log_to_file": true,
60
+ "experiment_summaries_interval": 10,
61
+ "flush_summaries_interval": 30,
62
+ "stats_avg": 100,
63
+ "summaries_use_frameskip": true,
64
+ "heartbeat_interval": 20,
65
+ "heartbeat_reporting_interval": 600,
66
+ "train_for_env_steps": 25000,
67
+ "train_for_seconds": 10000000000,
68
+ "save_every_sec": 120,
69
+ "keep_checkpoints": 2,
70
+ "load_checkpoint_kind": "latest",
71
+ "save_milestones_sec": -1,
72
+ "save_best_every_sec": 5,
73
+ "save_best_metric": "reward",
74
+ "save_best_after": 100000,
75
+ "benchmark": false,
76
+ "encoder_mlp_layers": [
77
+ 512,
78
+ 512
79
+ ],
80
+ "encoder_conv_architecture": "convnet_simple",
81
+ "encoder_conv_mlp_layers": [
82
+ 512
83
+ ],
84
+ "use_rnn": true,
85
+ "rnn_size": 512,
86
+ "rnn_type": "gru",
87
+ "rnn_num_layers": 1,
88
+ "decoder_mlp_layers": [],
89
+ "nonlinearity": "elu",
90
+ "policy_initialization": "orthogonal",
91
+ "policy_init_gain": 1.0,
92
+ "actor_critic_share_weights": true,
93
+ "adaptive_stddev": true,
94
+ "continuous_tanh_scale": 0.0,
95
+ "initial_stddev": 1.0,
96
+ "use_env_info_cache": false,
97
+ "env_gpu_actions": false,
98
+ "env_gpu_observations": true,
99
+ "env_frameskip": 4,
100
+ "env_framestack": 1,
101
+ "pixel_format": "CHW",
102
+ "use_record_episode_statistics": false,
103
+ "with_wandb": false,
104
+ "wandb_user": null,
105
+ "wandb_project": "sample_factory",
106
+ "wandb_group": null,
107
+ "wandb_job_type": "SF",
108
+ "wandb_tags": [],
109
+ "with_pbt": false,
110
+ "pbt_mix_policies_in_one_env": true,
111
+ "pbt_period_env_steps": 5000000,
112
+ "pbt_start_mutation": 20000000,
113
+ "pbt_replace_fraction": 0.3,
114
+ "pbt_mutation_rate": 0.15,
115
+ "pbt_replace_reward_gap": 0.1,
116
+ "pbt_replace_reward_gap_absolute": 1e-06,
117
+ "pbt_optimize_gamma": false,
118
+ "pbt_target_objective": "true_objective",
119
+ "pbt_perturb_min": 1.1,
120
+ "pbt_perturb_max": 1.5,
121
+ "num_agents": -1,
122
+ "num_humans": 0,
123
+ "num_bots": -1,
124
+ "start_bot_difficulty": null,
125
+ "timelimit": null,
126
+ "res_w": 128,
127
+ "res_h": 72,
128
+ "wide_aspect_ratio": false,
129
+ "eval_env_frameskip": 1,
130
+ "fps": 35,
131
+ "command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
132
+ "cli_args": {
133
+ "env": "doom_health_gathering_supreme",
134
+ "num_workers": 8,
135
+ "num_envs_per_worker": 4,
136
+ "train_for_env_steps": 4000000
137
+ },
138
+ "git_hash": "unknown",
139
+ "git_repo_name": "not a git repository",
140
+ "train_script": ".usr.local.lib.python3.10.dist-packages.ipykernel_launcher",
141
+ "lr_adaptive_min": 1e-06,
142
+ "lr_adaptive_max": 0.01
143
+ }
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:463bde631ab069a76964585704799764fbe4fceee77e43db534eb6cbb14fa44b
3
+ size 7192687
sf_log.txt ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-07-04 14:52:45,345][00220] Saving configuration to /content/train_dir/default_experiment/config.json...
2
+ [2023-07-04 14:52:45,348][00220] Rollout worker 0 uses device cpu
3
+ [2023-07-04 14:52:45,352][00220] Rollout worker 1 uses device cpu
4
+ [2023-07-04 14:52:45,355][00220] Rollout worker 2 uses device cpu
5
+ [2023-07-04 14:52:45,357][00220] Rollout worker 3 uses device cpu
6
+ [2023-07-04 14:52:45,358][00220] Rollout worker 4 uses device cpu
7
+ [2023-07-04 14:52:45,359][00220] Rollout worker 5 uses device cpu
8
+ [2023-07-04 14:52:45,363][00220] Rollout worker 6 uses device cpu
9
+ [2023-07-04 14:52:45,364][00220] Rollout worker 7 uses device cpu
10
+ [2023-07-04 14:52:45,565][00220] Using GPUs [0] for process 0 (actually maps to GPUs [0])
11
+ [2023-07-04 14:52:45,567][00220] InferenceWorker_p0-w0: min num requests: 2
12
+ [2023-07-04 14:52:45,611][00220] Starting all processes...
13
+ [2023-07-04 14:52:45,614][00220] Starting process learner_proc0
14
+ [2023-07-04 14:52:45,621][00220] EvtLoop [Runner_EvtLoop, process=main process 220] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=()
15
+ Traceback (most recent call last):
16
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
17
+ slot_callable(*args)
18
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start
19
+ self._start_processes()
20
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes
21
+ p.start()
22
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start
23
+ self._process.start()
24
+ File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start
25
+ self._popen = self._Popen(self)
26
+ File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen
27
+ return Popen(process_obj)
28
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__
29
+ super().__init__(process_obj)
30
+ File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__
31
+ self._launch(process_obj)
32
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch
33
+ reduction.dump(process_obj, fp)
34
+ File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump
35
+ ForkingPickler(file, protocol).dump(obj)
36
+ TypeError: cannot pickle 'TLSBuffer' object
37
+ [2023-07-04 14:52:45,628][00220] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop
38
+ [2023-07-04 14:52:45,631][00220] Uncaught exception in Runner evt loop
39
+ Traceback (most recent call last):
40
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run
41
+ evt_loop_status = self.event_loop.exec()
42
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 403, in exec
43
+ raise exc
44
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 399, in exec
45
+ while self._loop_iteration():
46
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 383, in _loop_iteration
47
+ self._process_signal(s)
48
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 358, in _process_signal
49
+ raise exc
50
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
51
+ slot_callable(*args)
52
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start
53
+ self._start_processes()
54
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes
55
+ p.start()
56
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start
57
+ self._process.start()
58
+ File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start
59
+ self._popen = self._Popen(self)
60
+ File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen
61
+ return Popen(process_obj)
62
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__
63
+ super().__init__(process_obj)
64
+ File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__
65
+ self._launch(process_obj)
66
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch
67
+ reduction.dump(process_obj, fp)
68
+ File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump
69
+ ForkingPickler(file, protocol).dump(obj)
70
+ TypeError: cannot pickle 'TLSBuffer' object
71
+ [2023-07-04 14:52:45,636][00220] Runner profile tree view:
72
+ main_loop: 0.0248
73
+ [2023-07-04 14:52:45,637][00220] Collected {}, FPS: 0.0
74
+ [2023-07-04 14:56:08,351][13487] Saving configuration to /content/train_dir/default_experiment/cfg.json...
75
+ [2023-07-04 14:56:08,364][13487] Rollout worker 0 uses device cpu
76
+ [2023-07-04 14:56:08,368][13487] Rollout worker 1 uses device cpu
77
+ [2023-07-04 14:56:08,371][13487] Rollout worker 2 uses device cpu
78
+ [2023-07-04 14:56:08,378][13487] Rollout worker 3 uses device cpu
79
+ [2023-07-04 14:56:08,380][13487] Rollout worker 4 uses device cpu
80
+ [2023-07-04 14:56:08,381][13487] Rollout worker 5 uses device cpu
81
+ [2023-07-04 14:56:08,383][13487] Rollout worker 6 uses device cpu
82
+ [2023-07-04 14:56:08,384][13487] Rollout worker 7 uses device cpu
83
+ [2023-07-04 14:56:08,681][13487] Using GPUs [0] for process 0 (actually maps to GPUs [0])
84
+ [2023-07-04 14:56:08,686][13487] InferenceWorker_p0-w0: min num requests: 2
85
+ [2023-07-04 14:56:08,755][13487] Starting all processes...
86
+ [2023-07-04 14:56:08,764][13487] Starting process learner_proc0
87
+ [2023-07-04 14:56:08,849][13487] Starting all processes...
88
+ [2023-07-04 14:56:08,965][13487] Starting process inference_proc0-0
89
+ [2023-07-04 14:56:08,971][13487] Starting process rollout_proc0
90
+ [2023-07-04 14:56:08,971][13487] Starting process rollout_proc1
91
+ [2023-07-04 14:56:08,971][13487] Starting process rollout_proc2
92
+ [2023-07-04 14:56:08,971][13487] Starting process rollout_proc3
93
+ [2023-07-04 14:56:08,972][13487] Starting process rollout_proc4
94
+ [2023-07-04 14:56:08,972][13487] Starting process rollout_proc5
95
+ [2023-07-04 14:56:08,972][13487] Starting process rollout_proc6
96
+ [2023-07-04 14:56:08,972][13487] Starting process rollout_proc7
97
+ [2023-07-04 14:56:22,840][13825] Using GPUs [0] for process 0 (actually maps to GPUs [0])
98
+ [2023-07-04 14:56:22,855][13825] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
99
+ [2023-07-04 14:56:22,937][13825] Num visible devices: 1
100
+ [2023-07-04 14:56:22,984][13825] Starting seed is not provided
101
+ [2023-07-04 14:56:22,984][13825] Using GPUs [0] for process 0 (actually maps to GPUs [0])
102
+ [2023-07-04 14:56:22,984][13825] Initializing actor-critic model on device cuda:0
103
+ [2023-07-04 14:56:22,985][13825] RunningMeanStd input shape: (3, 72, 128)
104
+ [2023-07-04 14:56:22,988][13825] RunningMeanStd input shape: (1,)
105
+ [2023-07-04 14:56:23,216][13825] ConvEncoder: input_channels=3
106
+ [2023-07-04 14:56:24,689][13838] Using GPUs [0] for process 0 (actually maps to GPUs [0])
107
+ [2023-07-04 14:56:24,689][13838] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
108
+ [2023-07-04 14:56:24,866][13838] Num visible devices: 1
109
+ [2023-07-04 14:56:25,312][13825] Conv encoder output size: 512
110
+ [2023-07-04 14:56:25,328][13825] Policy head output size: 512
111
+ [2023-07-04 14:56:25,504][13846] Worker 7 uses CPU cores [1]
112
+ [2023-07-04 14:56:25,522][13825] Created Actor Critic model with architecture:
113
+ [2023-07-04 14:56:25,533][13825] ActorCriticSharedWeights(
114
+ (obs_normalizer): ObservationNormalizer(
115
+ (running_mean_std): RunningMeanStdDictInPlace(
116
+ (running_mean_std): ModuleDict(
117
+ (obs): RunningMeanStdInPlace()
118
+ )
119
+ )
120
+ )
121
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
122
+ (encoder): VizdoomEncoder(
123
+ (basic_encoder): ConvEncoder(
124
+ (enc): RecursiveScriptModule(
125
+ original_name=ConvEncoderImpl
126
+ (conv_head): RecursiveScriptModule(
127
+ original_name=Sequential
128
+ (0): RecursiveScriptModule(original_name=Conv2d)
129
+ (1): RecursiveScriptModule(original_name=ELU)
130
+ (2): RecursiveScriptModule(original_name=Conv2d)
131
+ (3): RecursiveScriptModule(original_name=ELU)
132
+ (4): RecursiveScriptModule(original_name=Conv2d)
133
+ (5): RecursiveScriptModule(original_name=ELU)
134
+ )
135
+ (mlp_layers): RecursiveScriptModule(
136
+ original_name=Sequential
137
+ (0): RecursiveScriptModule(original_name=Linear)
138
+ (1): RecursiveScriptModule(original_name=ELU)
139
+ )
140
+ )
141
+ )
142
+ )
143
+ (core): ModelCoreRNN(
144
+ (core): GRU(512, 512)
145
+ )
146
+ (decoder): MlpDecoder(
147
+ (mlp): Identity()
148
+ )
149
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
150
+ (action_parameterization): ActionParameterizationDefault(
151
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
152
+ )
153
+ )
154
+ [2023-07-04 14:56:25,542][13844] Worker 4 uses CPU cores [0]
155
+ [2023-07-04 14:56:25,565][13840] Worker 1 uses CPU cores [1]
156
+ [2023-07-04 14:56:25,624][13841] Worker 2 uses CPU cores [0]
157
+ [2023-07-04 14:56:25,641][13845] Worker 6 uses CPU cores [0]
158
+ [2023-07-04 14:56:25,811][13842] Worker 3 uses CPU cores [1]
159
+ [2023-07-04 14:56:25,832][13839] Worker 0 uses CPU cores [0]
160
+ [2023-07-04 14:56:25,886][13843] Worker 5 uses CPU cores [1]
161
+ [2023-07-04 14:56:28,667][13487] Heartbeat connected on Batcher_0
162
+ [2023-07-04 14:56:28,682][13487] Heartbeat connected on InferenceWorker_p0-w0
163
+ [2023-07-04 14:56:28,697][13487] Heartbeat connected on RolloutWorker_w0
164
+ [2023-07-04 14:56:28,707][13487] Heartbeat connected on RolloutWorker_w1
165
+ [2023-07-04 14:56:28,714][13487] Heartbeat connected on RolloutWorker_w2
166
+ [2023-07-04 14:56:28,720][13487] Heartbeat connected on RolloutWorker_w3
167
+ [2023-07-04 14:56:28,724][13487] Heartbeat connected on RolloutWorker_w4
168
+ [2023-07-04 14:56:28,729][13487] Heartbeat connected on RolloutWorker_w5
169
+ [2023-07-04 14:56:28,753][13487] Heartbeat connected on RolloutWorker_w6
170
+ [2023-07-04 14:56:28,759][13487] Heartbeat connected on RolloutWorker_w7
171
+ [2023-07-04 14:56:32,229][13825] Using optimizer <class 'torch.optim.adam.Adam'>
172
+ [2023-07-04 14:56:32,230][13825] No checkpoints found
173
+ [2023-07-04 14:56:32,230][13825] Did not load from checkpoint, starting from scratch!
174
+ [2023-07-04 14:56:32,230][13825] Initialized policy 0 weights for model version 0
175
+ [2023-07-04 14:56:32,233][13825] Using GPUs [0] for process 0 (actually maps to GPUs [0])
176
+ [2023-07-04 14:56:32,240][13825] LearnerWorker_p0 finished initialization!
177
+ [2023-07-04 14:56:32,241][13487] Heartbeat connected on LearnerWorker_p0
178
+ [2023-07-04 14:56:32,459][13838] RunningMeanStd input shape: (3, 72, 128)
179
+ [2023-07-04 14:56:32,460][13838] RunningMeanStd input shape: (1,)
180
+ [2023-07-04 14:56:32,473][13838] ConvEncoder: input_channels=3
181
+ [2023-07-04 14:56:32,585][13838] Conv encoder output size: 512
182
+ [2023-07-04 14:56:32,586][13838] Policy head output size: 512
183
+ [2023-07-04 14:56:33,357][13487] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
184
+ [2023-07-04 14:56:33,926][13487] Inference worker 0-0 is ready!
185
+ [2023-07-04 14:56:33,928][13487] All inference workers are ready! Signal rollout workers to start!
186
+ [2023-07-04 14:56:34,046][13840] Doom resolution: 160x120, resize resolution: (128, 72)
187
+ [2023-07-04 14:56:34,065][13841] Doom resolution: 160x120, resize resolution: (128, 72)
188
+ [2023-07-04 14:56:34,066][13846] Doom resolution: 160x120, resize resolution: (128, 72)
189
+ [2023-07-04 14:56:34,074][13842] Doom resolution: 160x120, resize resolution: (128, 72)
190
+ [2023-07-04 14:56:34,075][13843] Doom resolution: 160x120, resize resolution: (128, 72)
191
+ [2023-07-04 14:56:34,079][13845] Doom resolution: 160x120, resize resolution: (128, 72)
192
+ [2023-07-04 14:56:34,077][13839] Doom resolution: 160x120, resize resolution: (128, 72)
193
+ [2023-07-04 14:56:34,081][13844] Doom resolution: 160x120, resize resolution: (128, 72)
194
+ [2023-07-04 14:56:35,868][13840] Decorrelating experience for 0 frames...
195
+ [2023-07-04 14:56:35,862][13843] Decorrelating experience for 0 frames...
196
+ [2023-07-04 14:56:35,860][13846] Decorrelating experience for 0 frames...
197
+ [2023-07-04 14:56:36,440][13844] Decorrelating experience for 0 frames...
198
+ [2023-07-04 14:56:36,449][13841] Decorrelating experience for 0 frames...
199
+ [2023-07-04 14:56:36,444][13839] Decorrelating experience for 0 frames...
200
+ [2023-07-04 14:56:36,450][13845] Decorrelating experience for 0 frames...
201
+ [2023-07-04 14:56:37,968][13842] Decorrelating experience for 0 frames...
202
+ [2023-07-04 14:56:37,979][13843] Decorrelating experience for 32 frames...
203
+ [2023-07-04 14:56:37,986][13840] Decorrelating experience for 32 frames...
204
+ [2023-07-04 14:56:38,089][13839] Decorrelating experience for 32 frames...
205
+ [2023-07-04 14:56:38,355][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
206
+ [2023-07-04 14:56:38,401][13846] Decorrelating experience for 32 frames...
207
+ [2023-07-04 14:56:39,856][13842] Decorrelating experience for 32 frames...
208
+ [2023-07-04 14:56:40,243][13843] Decorrelating experience for 64 frames...
209
+ [2023-07-04 14:56:40,337][13844] Decorrelating experience for 32 frames...
210
+ [2023-07-04 14:56:40,507][13841] Decorrelating experience for 32 frames...
211
+ [2023-07-04 14:56:40,623][13846] Decorrelating experience for 64 frames...
212
+ [2023-07-04 14:56:40,968][13839] Decorrelating experience for 64 frames...
213
+ [2023-07-04 14:56:42,209][13845] Decorrelating experience for 32 frames...
214
+ [2023-07-04 14:56:42,228][13840] Decorrelating experience for 64 frames...
215
+ [2023-07-04 14:56:42,476][13842] Decorrelating experience for 64 frames...
216
+ [2023-07-04 14:56:42,619][13844] Decorrelating experience for 64 frames...
217
+ [2023-07-04 14:56:42,739][13843] Decorrelating experience for 96 frames...
218
+ [2023-07-04 14:56:42,924][13841] Decorrelating experience for 64 frames...
219
+ [2023-07-04 14:56:43,355][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
220
+ [2023-07-04 14:56:43,972][13842] Decorrelating experience for 96 frames...
221
+ [2023-07-04 14:56:44,395][13840] Decorrelating experience for 96 frames...
222
+ [2023-07-04 14:56:45,050][13839] Decorrelating experience for 96 frames...
223
+ [2023-07-04 14:56:45,170][13845] Decorrelating experience for 64 frames...
224
+ [2023-07-04 14:56:45,431][13844] Decorrelating experience for 96 frames...
225
+ [2023-07-04 14:56:45,813][13846] Decorrelating experience for 96 frames...
226
+ [2023-07-04 14:56:46,956][13841] Decorrelating experience for 96 frames...
227
+ [2023-07-04 14:56:47,275][13845] Decorrelating experience for 96 frames...
228
+ [2023-07-04 14:56:48,355][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 2.1. Samples: 32. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
229
+ [2023-07-04 14:56:48,361][13487] Avg episode reward: [(0, '1.747')]
230
+ [2023-07-04 14:56:49,762][13825] Signal inference workers to stop experience collection...
231
+ [2023-07-04 14:56:49,786][13838] InferenceWorker_p0-w0: stopping experience collection
232
+ [2023-07-04 14:56:52,138][13825] Signal inference workers to resume experience collection...
233
+ [2023-07-04 14:56:52,138][13838] InferenceWorker_p0-w0: resuming experience collection
234
+ [2023-07-04 14:56:53,355][13487] Fps is (10 sec: 409.6, 60 sec: 204.8, 300 sec: 204.8). Total num frames: 4096. Throughput: 0: 120.3. Samples: 2406. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
235
+ [2023-07-04 14:56:53,365][13487] Avg episode reward: [(0, '2.567')]
236
+ [2023-07-04 14:56:58,359][13487] Fps is (10 sec: 2048.0, 60 sec: 819.3, 300 sec: 819.3). Total num frames: 20480. Throughput: 0: 242.9. Samples: 6072. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
237
+ [2023-07-04 14:56:58,364][13487] Avg episode reward: [(0, '3.442')]
238
+ [2023-07-04 14:57:03,356][13487] Fps is (10 sec: 2867.2, 60 sec: 1092.3, 300 sec: 1092.3). Total num frames: 32768. Throughput: 0: 266.5. Samples: 7996. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
239
+ [2023-07-04 14:57:03,363][13487] Avg episode reward: [(0, '3.844')]
240
+ [2023-07-04 14:57:05,411][13838] Updated weights for policy 0, policy_version 10 (0.0013)
241
+ [2023-07-04 14:57:08,358][13487] Fps is (10 sec: 2866.5, 60 sec: 1404.3, 300 sec: 1404.3). Total num frames: 49152. Throughput: 0: 341.9. Samples: 11968. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
242
+ [2023-07-04 14:57:08,362][13487] Avg episode reward: [(0, '4.352')]
243
+ [2023-07-04 14:57:13,355][13487] Fps is (10 sec: 3276.9, 60 sec: 1638.5, 300 sec: 1638.5). Total num frames: 65536. Throughput: 0: 424.9. Samples: 16996. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
244
+ [2023-07-04 14:57:13,364][13487] Avg episode reward: [(0, '4.580')]
245
+ [2023-07-04 14:57:17,518][13838] Updated weights for policy 0, policy_version 20 (0.0028)
246
+ [2023-07-04 14:57:18,355][13487] Fps is (10 sec: 3277.7, 60 sec: 1820.5, 300 sec: 1820.5). Total num frames: 81920. Throughput: 0: 438.8. Samples: 19746. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
247
+ [2023-07-04 14:57:18,362][13487] Avg episode reward: [(0, '4.505')]
248
+ [2023-07-04 14:57:23,355][13487] Fps is (10 sec: 3276.8, 60 sec: 1966.2, 300 sec: 1966.2). Total num frames: 98304. Throughput: 0: 544.0. Samples: 24482. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
249
+ [2023-07-04 14:57:23,364][13487] Avg episode reward: [(0, '4.418')]
250
+ [2023-07-04 14:57:28,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2010.8, 300 sec: 2010.8). Total num frames: 110592. Throughput: 0: 618.8. Samples: 27846. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
251
+ [2023-07-04 14:57:28,358][13487] Avg episode reward: [(0, '4.334')]
252
+ [2023-07-04 14:57:28,364][13825] Saving new best policy, reward=4.334!
253
+ [2023-07-04 14:57:33,355][13487] Fps is (10 sec: 2048.0, 60 sec: 1979.8, 300 sec: 1979.8). Total num frames: 118784. Throughput: 0: 653.3. Samples: 29430. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
254
+ [2023-07-04 14:57:33,365][13487] Avg episode reward: [(0, '4.259')]
255
+ [2023-07-04 14:57:33,917][13838] Updated weights for policy 0, policy_version 30 (0.0029)
256
+ [2023-07-04 14:57:38,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2321.1, 300 sec: 2142.6). Total num frames: 139264. Throughput: 0: 705.0. Samples: 34132. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
257
+ [2023-07-04 14:57:38,358][13487] Avg episode reward: [(0, '4.359')]
258
+ [2023-07-04 14:57:38,361][13825] Saving new best policy, reward=4.359!
259
+ [2023-07-04 14:57:43,355][13487] Fps is (10 sec: 3686.4, 60 sec: 2594.1, 300 sec: 2223.6). Total num frames: 155648. Throughput: 0: 743.9. Samples: 39548. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
260
+ [2023-07-04 14:57:43,359][13487] Avg episode reward: [(0, '4.429')]
261
+ [2023-07-04 14:57:43,367][13825] Saving new best policy, reward=4.429!
262
+ [2023-07-04 14:57:46,324][13838] Updated weights for policy 0, policy_version 40 (0.0014)
263
+ [2023-07-04 14:57:48,358][13487] Fps is (10 sec: 2866.3, 60 sec: 2798.8, 300 sec: 2239.1). Total num frames: 167936. Throughput: 0: 741.1. Samples: 41348. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
264
+ [2023-07-04 14:57:48,370][13487] Avg episode reward: [(0, '4.436')]
265
+ [2023-07-04 14:57:48,372][13825] Saving new best policy, reward=4.436!
266
+ [2023-07-04 14:57:53,358][13487] Fps is (10 sec: 2456.9, 60 sec: 2935.3, 300 sec: 2252.8). Total num frames: 180224. Throughput: 0: 727.6. Samples: 44708. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
267
+ [2023-07-04 14:57:53,364][13487] Avg episode reward: [(0, '4.338')]
268
+ [2023-07-04 14:57:58,356][13487] Fps is (10 sec: 2458.2, 60 sec: 2867.2, 300 sec: 2264.9). Total num frames: 192512. Throughput: 0: 701.2. Samples: 48550. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
269
+ [2023-07-04 14:57:58,358][13487] Avg episode reward: [(0, '4.355')]
270
+ [2023-07-04 14:58:01,471][13838] Updated weights for policy 0, policy_version 50 (0.0025)
271
+ [2023-07-04 14:58:03,355][13487] Fps is (10 sec: 2868.0, 60 sec: 2935.5, 300 sec: 2321.1). Total num frames: 208896. Throughput: 0: 701.0. Samples: 51292. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
272
+ [2023-07-04 14:58:03,362][13487] Avg episode reward: [(0, '4.364')]
273
+ [2023-07-04 14:58:03,372][13825] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000051_208896.pth...
274
+ [2023-07-04 14:58:08,355][13487] Fps is (10 sec: 3277.0, 60 sec: 2935.6, 300 sec: 2371.4). Total num frames: 225280. Throughput: 0: 711.7. Samples: 56508. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
275
+ [2023-07-04 14:58:08,360][13487] Avg episode reward: [(0, '4.315')]
276
+ [2023-07-04 14:58:13,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2867.2, 300 sec: 2375.7). Total num frames: 237568. Throughput: 0: 720.0. Samples: 60244. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
277
+ [2023-07-04 14:58:13,360][13487] Avg episode reward: [(0, '4.179')]
278
+ [2023-07-04 14:58:15,912][13838] Updated weights for policy 0, policy_version 60 (0.0016)
279
+ [2023-07-04 14:58:18,355][13487] Fps is (10 sec: 2457.5, 60 sec: 2798.9, 300 sec: 2379.6). Total num frames: 249856. Throughput: 0: 727.3. Samples: 62160. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
280
+ [2023-07-04 14:58:18,360][13487] Avg episode reward: [(0, '4.397')]
281
+ [2023-07-04 14:58:23,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2798.9, 300 sec: 2420.4). Total num frames: 266240. Throughput: 0: 727.3. Samples: 66860. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
282
+ [2023-07-04 14:58:23,357][13487] Avg episode reward: [(0, '4.491')]
283
+ [2023-07-04 14:58:23,418][13825] Saving new best policy, reward=4.491!
284
+ [2023-07-04 14:58:27,577][13838] Updated weights for policy 0, policy_version 70 (0.0032)
285
+ [2023-07-04 14:58:28,355][13487] Fps is (10 sec: 3686.5, 60 sec: 2935.5, 300 sec: 2493.3). Total num frames: 286720. Throughput: 0: 734.4. Samples: 72596. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
286
+ [2023-07-04 14:58:28,358][13487] Avg episode reward: [(0, '4.461')]
287
+ [2023-07-04 14:58:33,355][13487] Fps is (10 sec: 3686.4, 60 sec: 3072.0, 300 sec: 2525.9). Total num frames: 303104. Throughput: 0: 752.8. Samples: 75222. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
288
+ [2023-07-04 14:58:33,358][13487] Avg episode reward: [(0, '4.361')]
289
+ [2023-07-04 14:58:38,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2935.5, 300 sec: 2523.2). Total num frames: 315392. Throughput: 0: 765.6. Samples: 79156. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
290
+ [2023-07-04 14:58:38,358][13487] Avg episode reward: [(0, '4.276')]
291
+ [2023-07-04 14:58:41,845][13838] Updated weights for policy 0, policy_version 80 (0.0032)
292
+ [2023-07-04 14:58:43,355][13487] Fps is (10 sec: 2457.6, 60 sec: 2867.2, 300 sec: 2520.7). Total num frames: 327680. Throughput: 0: 766.1. Samples: 83024. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
293
+ [2023-07-04 14:58:43,357][13487] Avg episode reward: [(0, '4.358')]
294
+ [2023-07-04 14:58:48,355][13487] Fps is (10 sec: 3276.8, 60 sec: 3003.9, 300 sec: 2579.0). Total num frames: 348160. Throughput: 0: 770.8. Samples: 85976. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
295
+ [2023-07-04 14:58:48,360][13487] Avg episode reward: [(0, '4.336')]
296
+ [2023-07-04 14:58:52,573][13838] Updated weights for policy 0, policy_version 90 (0.0013)
297
+ [2023-07-04 14:58:53,357][13487] Fps is (10 sec: 4095.3, 60 sec: 3140.3, 300 sec: 2633.1). Total num frames: 368640. Throughput: 0: 792.8. Samples: 92186. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
298
+ [2023-07-04 14:58:53,360][13487] Avg episode reward: [(0, '4.373')]
299
+ [2023-07-04 14:58:58,355][13487] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 2627.1). Total num frames: 380928. Throughput: 0: 807.2. Samples: 96570. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
300
+ [2023-07-04 14:58:58,358][13487] Avg episode reward: [(0, '4.562')]
301
+ [2023-07-04 14:58:58,364][13825] Saving new best policy, reward=4.562!
302
+ [2023-07-04 14:59:03,355][13487] Fps is (10 sec: 2867.7, 60 sec: 3140.3, 300 sec: 2648.8). Total num frames: 397312. Throughput: 0: 807.0. Samples: 98474. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
303
+ [2023-07-04 14:59:03,367][13487] Avg episode reward: [(0, '4.574')]
304
+ [2023-07-04 14:59:03,382][13825] Saving new best policy, reward=4.574!
305
+ [2023-07-04 14:59:07,557][13838] Updated weights for policy 0, policy_version 100 (0.0027)
306
+ [2023-07-04 14:59:08,355][13487] Fps is (10 sec: 2867.2, 60 sec: 3072.0, 300 sec: 2642.6). Total num frames: 409600. Throughput: 0: 791.1. Samples: 102458. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
307
+ [2023-07-04 14:59:08,363][13487] Avg episode reward: [(0, '4.634')]
308
+ [2023-07-04 14:59:08,369][13825] Saving new best policy, reward=4.634!
309
+ [2023-07-04 14:59:13,355][13487] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 2713.6). Total num frames: 434176. Throughput: 0: 800.0. Samples: 108596. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
310
+ [2023-07-04 14:59:13,357][13487] Avg episode reward: [(0, '4.654')]
311
+ [2023-07-04 14:59:13,371][13825] Saving new best policy, reward=4.654!
312
+ [2023-07-04 14:59:17,911][13838] Updated weights for policy 0, policy_version 110 (0.0022)
313
+ [2023-07-04 14:59:18,355][13487] Fps is (10 sec: 4096.0, 60 sec: 3345.1, 300 sec: 2730.7). Total num frames: 450560. Throughput: 0: 809.2. Samples: 111636. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
314
+ [2023-07-04 14:59:18,360][13487] Avg episode reward: [(0, '4.615')]
315
+ [2023-07-04 14:59:23,355][13487] Fps is (10 sec: 2457.6, 60 sec: 3208.5, 300 sec: 2698.6). Total num frames: 458752. Throughput: 0: 800.2. Samples: 115164. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
316
+ [2023-07-04 14:59:23,363][13487] Avg episode reward: [(0, '4.720')]
317
+ [2023-07-04 14:59:23,384][13825] Saving new best policy, reward=4.720!
318
+ [2023-07-04 14:59:24,904][13487] Keyboard interrupt detected in the event loop EvtLoop [Runner_EvtLoop, process=main process 13487], exiting...
319
+ [2023-07-04 14:59:24,912][13825] Stopping Batcher_0...
320
+ [2023-07-04 14:59:24,914][13825] Loop batcher_evt_loop terminating...
321
+ [2023-07-04 14:59:24,913][13825] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000112_458752.pth...
322
+ [2023-07-04 14:59:24,911][13487] Runner profile tree view:
323
+ main_loop: 196.1569
324
+ [2023-07-04 14:59:24,931][13487] Collected {0: 458752}, FPS: 2338.7
325
+ [2023-07-04 14:59:25,139][13845] EvtLoop [rollout_proc6_evt_loop, process=rollout_proc6] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance6'), args=(0, 0)
326
+ Traceback (most recent call last):
327
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
328
+ slot_callable(*args)
329
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
330
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
331
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
332
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
333
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
334
+ return self.env.step(action)
335
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
336
+ obs, rew, terminated, truncated, info = self.env.step(action)
337
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
338
+ obs, rew, terminated, truncated, info = self.env.step(action)
339
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
340
+ observation, reward, terminated, truncated, info = self.env.step(action)
341
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
342
+ observation, reward, terminated, truncated, info = self.env.step(action)
343
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
344
+ obs, reward, terminated, truncated, info = self.env.step(action)
345
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
346
+ return self.env.step(action)
347
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
348
+ obs, reward, terminated, truncated, info = self.env.step(action)
349
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
350
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
351
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
352
+ [2023-07-04 14:59:25,197][13845] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc6_evt_loop
353
+ [2023-07-04 14:59:25,069][13841] EvtLoop [rollout_proc2_evt_loop, process=rollout_proc2] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance2'), args=(1, 0)
354
+ Traceback (most recent call last):
355
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
356
+ slot_callable(*args)
357
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
358
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
359
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
360
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
361
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
362
+ return self.env.step(action)
363
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
364
+ obs, rew, terminated, truncated, info = self.env.step(action)
365
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
366
+ obs, rew, terminated, truncated, info = self.env.step(action)
367
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
368
+ observation, reward, terminated, truncated, info = self.env.step(action)
369
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
370
+ observation, reward, terminated, truncated, info = self.env.step(action)
371
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
372
+ obs, reward, terminated, truncated, info = self.env.step(action)
373
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
374
+ return self.env.step(action)
375
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
376
+ obs, reward, terminated, truncated, info = self.env.step(action)
377
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
378
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
379
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
380
+ [2023-07-04 14:59:25,225][13838] Weights refcount: 2 0
381
+ [2023-07-04 14:59:25,335][13841] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc2_evt_loop
382
+ [2023-07-04 14:59:25,343][13838] Stopping InferenceWorker_p0-w0...
383
+ [2023-07-04 14:59:25,344][13838] Loop inference_proc0-0_evt_loop terminating...
384
+ [2023-07-04 14:59:25,319][13839] EvtLoop [rollout_proc0_evt_loop, process=rollout_proc0] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance0'), args=(0, 0)
385
+ Traceback (most recent call last):
386
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
387
+ slot_callable(*args)
388
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
389
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
390
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
391
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
392
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
393
+ return self.env.step(action)
394
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
395
+ obs, rew, terminated, truncated, info = self.env.step(action)
396
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
397
+ obs, rew, terminated, truncated, info = self.env.step(action)
398
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
399
+ observation, reward, terminated, truncated, info = self.env.step(action)
400
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
401
+ observation, reward, terminated, truncated, info = self.env.step(action)
402
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
403
+ obs, reward, terminated, truncated, info = self.env.step(action)
404
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
405
+ return self.env.step(action)
406
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
407
+ obs, reward, terminated, truncated, info = self.env.step(action)
408
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
409
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
410
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
411
+ [2023-07-04 14:59:25,204][13840] EvtLoop [rollout_proc1_evt_loop, process=rollout_proc1] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance1'), args=(0, 0)
412
+ Traceback (most recent call last):
413
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
414
+ slot_callable(*args)
415
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
416
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
417
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
418
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
419
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
420
+ return self.env.step(action)
421
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
422
+ obs, rew, terminated, truncated, info = self.env.step(action)
423
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
424
+ obs, rew, terminated, truncated, info = self.env.step(action)
425
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
426
+ observation, reward, terminated, truncated, info = self.env.step(action)
427
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
428
+ observation, reward, terminated, truncated, info = self.env.step(action)
429
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
430
+ obs, reward, terminated, truncated, info = self.env.step(action)
431
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
432
+ return self.env.step(action)
433
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
434
+ obs, reward, terminated, truncated, info = self.env.step(action)
435
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
436
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
437
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
438
+ [2023-07-04 14:59:25,386][13840] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc1_evt_loop
439
+ [2023-07-04 14:59:25,167][13843] EvtLoop [rollout_proc5_evt_loop, process=rollout_proc5] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance5'), args=(1, 0)
440
+ Traceback (most recent call last):
441
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
442
+ slot_callable(*args)
443
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
444
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
445
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
446
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
447
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
448
+ return self.env.step(action)
449
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
450
+ obs, rew, terminated, truncated, info = self.env.step(action)
451
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
452
+ obs, rew, terminated, truncated, info = self.env.step(action)
453
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
454
+ observation, reward, terminated, truncated, info = self.env.step(action)
455
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
456
+ observation, reward, terminated, truncated, info = self.env.step(action)
457
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
458
+ obs, reward, terminated, truncated, info = self.env.step(action)
459
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
460
+ return self.env.step(action)
461
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
462
+ obs, reward, terminated, truncated, info = self.env.step(action)
463
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
464
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
465
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
466
+ [2023-07-04 14:59:25,387][13843] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc5_evt_loop
467
+ [2023-07-04 14:59:25,188][13846] EvtLoop [rollout_proc7_evt_loop, process=rollout_proc7] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance7'), args=(0, 0)
468
+ Traceback (most recent call last):
469
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
470
+ slot_callable(*args)
471
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
472
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
473
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
474
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
475
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
476
+ return self.env.step(action)
477
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
478
+ obs, rew, terminated, truncated, info = self.env.step(action)
479
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
480
+ obs, rew, terminated, truncated, info = self.env.step(action)
481
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
482
+ observation, reward, terminated, truncated, info = self.env.step(action)
483
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
484
+ observation, reward, terminated, truncated, info = self.env.step(action)
485
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
486
+ obs, reward, terminated, truncated, info = self.env.step(action)
487
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
488
+ return self.env.step(action)
489
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
490
+ obs, reward, terminated, truncated, info = self.env.step(action)
491
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
492
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
493
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
494
+ [2023-07-04 14:59:25,388][13846] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc7_evt_loop
495
+ [2023-07-04 14:59:25,235][13844] EvtLoop [rollout_proc4_evt_loop, process=rollout_proc4] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance4'), args=(1, 0)
496
+ Traceback (most recent call last):
497
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
498
+ slot_callable(*args)
499
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
500
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
501
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
502
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
503
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
504
+ return self.env.step(action)
505
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
506
+ obs, rew, terminated, truncated, info = self.env.step(action)
507
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
508
+ obs, rew, terminated, truncated, info = self.env.step(action)
509
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
510
+ observation, reward, terminated, truncated, info = self.env.step(action)
511
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
512
+ observation, reward, terminated, truncated, info = self.env.step(action)
513
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
514
+ obs, reward, terminated, truncated, info = self.env.step(action)
515
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
516
+ return self.env.step(action)
517
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
518
+ obs, reward, terminated, truncated, info = self.env.step(action)
519
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
520
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
521
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
522
+ [2023-07-04 14:59:25,408][13844] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc4_evt_loop
523
+ [2023-07-04 14:59:25,248][13842] EvtLoop [rollout_proc3_evt_loop, process=rollout_proc3] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance3'), args=(1, 0)
524
+ Traceback (most recent call last):
525
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
526
+ slot_callable(*args)
527
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
528
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
529
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 632, in advance_rollouts
530
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
531
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
532
+ return self.env.step(action)
533
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step
534
+ obs, rew, terminated, truncated, info = self.env.step(action)
535
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step
536
+ obs, rew, terminated, truncated, info = self.env.step(action)
537
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
538
+ observation, reward, terminated, truncated, info = self.env.step(action)
539
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 384, in step
540
+ observation, reward, terminated, truncated, info = self.env.step(action)
541
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 88, in step
542
+ obs, reward, terminated, truncated, info = self.env.step(action)
543
+ File "/usr/local/lib/python3.10/dist-packages/gym/core.py", line 319, in step
544
+ return self.env.step(action)
545
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
546
+ obs, reward, terminated, truncated, info = self.env.step(action)
547
+ File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
548
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
549
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
550
+ [2023-07-04 14:59:25,417][13842] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc3_evt_loop
551
+ [2023-07-04 14:59:25,365][13839] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc0_evt_loop
552
+ [2023-07-04 14:59:25,570][13825] Stopping LearnerWorker_p0...
553
+ [2023-07-04 14:59:25,571][13825] Loop learner_proc0_evt_loop terminating...
554
+ [2023-07-04 14:59:27,388][13487] Environment doom_basic already registered, overwriting...
555
+ [2023-07-04 14:59:27,391][13487] Environment doom_two_colors_easy already registered, overwriting...
556
+ [2023-07-04 14:59:27,396][13487] Environment doom_two_colors_hard already registered, overwriting...
557
+ [2023-07-04 14:59:27,403][13487] Environment doom_dm already registered, overwriting...
558
+ [2023-07-04 14:59:27,409][13487] Environment doom_dwango5 already registered, overwriting...
559
+ [2023-07-04 14:59:27,416][13487] Environment doom_my_way_home_flat_actions already registered, overwriting...
560
+ [2023-07-04 14:59:27,428][13487] Environment doom_defend_the_center_flat_actions already registered, overwriting...
561
+ [2023-07-04 14:59:27,431][13487] Environment doom_my_way_home already registered, overwriting...
562
+ [2023-07-04 14:59:27,432][13487] Environment doom_deadly_corridor already registered, overwriting...
563
+ [2023-07-04 14:59:27,436][13487] Environment doom_defend_the_center already registered, overwriting...
564
+ [2023-07-04 14:59:27,437][13487] Environment doom_defend_the_line already registered, overwriting...
565
+ [2023-07-04 14:59:27,441][13487] Environment doom_health_gathering already registered, overwriting...
566
+ [2023-07-04 14:59:27,445][13487] Environment doom_health_gathering_supreme already registered, overwriting...
567
+ [2023-07-04 14:59:27,447][13487] Environment doom_battle already registered, overwriting...
568
+ [2023-07-04 14:59:27,453][13487] Environment doom_battle2 already registered, overwriting...
569
+ [2023-07-04 14:59:27,454][13487] Environment doom_duel_bots already registered, overwriting...
570
+ [2023-07-04 14:59:27,458][13487] Environment doom_deathmatch_bots already registered, overwriting...
571
+ [2023-07-04 14:59:27,459][13487] Environment doom_duel already registered, overwriting...
572
+ [2023-07-04 14:59:27,462][13487] Environment doom_deathmatch_full already registered, overwriting...
573
+ [2023-07-04 14:59:27,463][13487] Environment doom_benchmark already registered, overwriting...
574
+ [2023-07-04 14:59:27,468][13487] register_encoder_factory: <function make_vizdoom_encoder at 0x7f4c7f7d0af0>
575
+ [2023-07-04 14:59:27,519][13487] Loading existing experiment configuration from /content/train_dir/default_experiment/cfg.json
576
+ [2023-07-04 14:59:27,536][13487] Overriding arg 'train_for_env_steps' with value 25000 passed from command line
577
+ [2023-07-04 14:59:27,547][13487] Experiment dir /content/train_dir/default_experiment already exists!
578
+ [2023-07-04 14:59:27,559][13487] Resuming existing experiment from /content/train_dir/default_experiment...
579
+ [2023-07-04 14:59:27,562][13487] Weights and Biases integration disabled
580
+ [2023-07-04 14:59:27,575][13487] Environment var CUDA_VISIBLE_DEVICES is 0
581
+
582
+ [2023-07-04 14:59:32,208][13487] Starting experiment with the following configuration:
583
+ help=False
584
+ algo=APPO
585
+ env=doom_health_gathering_supreme
586
+ experiment=default_experiment
587
+ train_dir=/content/train_dir
588
+ restart_behavior=resume
589
+ device=gpu
590
+ seed=None
591
+ num_policies=1
592
+ async_rl=True
593
+ serial_mode=False
594
+ batched_sampling=False
595
+ num_batches_to_accumulate=2
596
+ worker_num_splits=2
597
+ policy_workers_per_policy=1
598
+ max_policy_lag=1000
599
+ num_workers=8
600
+ num_envs_per_worker=4
601
+ batch_size=1024
602
+ num_batches_per_epoch=1
603
+ num_epochs=1
604
+ rollout=32
605
+ recurrence=32
606
+ shuffle_minibatches=False
607
+ gamma=0.99
608
+ reward_scale=1.0
609
+ reward_clip=1000.0
610
+ value_bootstrap=False
611
+ normalize_returns=True
612
+ exploration_loss_coeff=0.001
613
+ value_loss_coeff=0.5
614
+ kl_loss_coeff=0.0
615
+ exploration_loss=symmetric_kl
616
+ gae_lambda=0.95
617
+ ppo_clip_ratio=0.1
618
+ ppo_clip_value=0.2
619
+ with_vtrace=False
620
+ vtrace_rho=1.0
621
+ vtrace_c=1.0
622
+ optimizer=adam
623
+ adam_eps=1e-06
624
+ adam_beta1=0.9
625
+ adam_beta2=0.999
626
+ max_grad_norm=4.0
627
+ learning_rate=0.0001
628
+ lr_schedule=constant
629
+ lr_schedule_kl_threshold=0.008
630
+ obs_subtract_mean=0.0
631
+ obs_scale=255.0
632
+ normalize_input=True
633
+ normalize_input_keys=None
634
+ decorrelate_experience_max_seconds=0
635
+ decorrelate_envs_on_one_worker=True
636
+ actor_worker_gpus=[]
637
+ set_workers_cpu_affinity=True
638
+ force_envs_single_thread=False
639
+ default_niceness=0
640
+ log_to_file=True
641
+ experiment_summaries_interval=10
642
+ flush_summaries_interval=30
643
+ stats_avg=100
644
+ summaries_use_frameskip=True
645
+ heartbeat_interval=20
646
+ heartbeat_reporting_interval=600
647
+ train_for_env_steps=25000
648
+ train_for_seconds=10000000000
649
+ save_every_sec=120
650
+ keep_checkpoints=2
651
+ load_checkpoint_kind=latest
652
+ save_milestones_sec=-1
653
+ save_best_every_sec=5
654
+ save_best_metric=reward
655
+ save_best_after=100000
656
+ benchmark=False
657
+ encoder_mlp_layers=[512, 512]
658
+ encoder_conv_architecture=convnet_simple
659
+ encoder_conv_mlp_layers=[512]
660
+ use_rnn=True
661
+ rnn_size=512
662
+ rnn_type=gru
663
+ rnn_num_layers=1
664
+ decoder_mlp_layers=[]
665
+ nonlinearity=elu
666
+ policy_initialization=orthogonal
667
+ policy_init_gain=1.0
668
+ actor_critic_share_weights=True
669
+ adaptive_stddev=True
670
+ continuous_tanh_scale=0.0
671
+ initial_stddev=1.0
672
+ use_env_info_cache=False
673
+ env_gpu_actions=False
674
+ env_gpu_observations=True
675
+ env_frameskip=4
676
+ env_framestack=1
677
+ pixel_format=CHW
678
+ use_record_episode_statistics=False
679
+ with_wandb=False
680
+ wandb_user=None
681
+ wandb_project=sample_factory
682
+ wandb_group=None
683
+ wandb_job_type=SF
684
+ wandb_tags=[]
685
+ with_pbt=False
686
+ pbt_mix_policies_in_one_env=True
687
+ pbt_period_env_steps=5000000
688
+ pbt_start_mutation=20000000
689
+ pbt_replace_fraction=0.3
690
+ pbt_mutation_rate=0.15
691
+ pbt_replace_reward_gap=0.1
692
+ pbt_replace_reward_gap_absolute=1e-06
693
+ pbt_optimize_gamma=False
694
+ pbt_target_objective=true_objective
695
+ pbt_perturb_min=1.1
696
+ pbt_perturb_max=1.5
697
+ num_agents=-1
698
+ num_humans=0
699
+ num_bots=-1
700
+ start_bot_difficulty=None
701
+ timelimit=None
702
+ res_w=128
703
+ res_h=72
704
+ wide_aspect_ratio=False
705
+ eval_env_frameskip=1
706
+ fps=35
707
+ command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000
708
+ cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000}
709
+ git_hash=unknown
710
+ git_repo_name=not a git repository
711
+ train_script=.usr.local.lib.python3.10.dist-packages.ipykernel_launcher
712
+ [2023-07-04 14:59:32,213][13487] Saving configuration to /content/train_dir/default_experiment/cfg.json...
713
+ [2023-07-04 14:59:32,216][13487] Rollout worker 0 uses device cpu
714
+ [2023-07-04 14:59:32,219][13487] Rollout worker 1 uses device cpu
715
+ [2023-07-04 14:59:32,222][13487] Rollout worker 2 uses device cpu
716
+ [2023-07-04 14:59:32,224][13487] Rollout worker 3 uses device cpu
717
+ [2023-07-04 14:59:32,225][13487] Rollout worker 4 uses device cpu
718
+ [2023-07-04 14:59:32,227][13487] Rollout worker 5 uses device cpu
719
+ [2023-07-04 14:59:32,229][13487] Rollout worker 6 uses device cpu
720
+ [2023-07-04 14:59:32,236][13487] Rollout worker 7 uses device cpu
721
+ [2023-07-04 14:59:32,377][13487] Using GPUs [0] for process 0 (actually maps to GPUs [0])
722
+ [2023-07-04 14:59:32,379][13487] InferenceWorker_p0-w0: min num requests: 2
723
+ [2023-07-04 14:59:32,419][13487] Starting all processes...
724
+ [2023-07-04 14:59:32,421][13487] Starting process learner_proc0
725
+ [2023-07-04 14:59:32,479][13487] Starting all processes...
726
+ [2023-07-04 14:59:32,488][13487] Starting process inference_proc0-0
727
+ [2023-07-04 14:59:32,488][13487] Starting process rollout_proc0
728
+ [2023-07-04 14:59:32,489][13487] Starting process rollout_proc1
729
+ [2023-07-04 14:59:32,489][13487] Starting process rollout_proc2
730
+ [2023-07-04 14:59:32,489][13487] Starting process rollout_proc3
731
+ [2023-07-04 14:59:32,489][13487] Starting process rollout_proc4
732
+ [2023-07-04 14:59:32,489][13487] Starting process rollout_proc5
733
+ [2023-07-04 14:59:32,489][13487] Starting process rollout_proc6
734
+ [2023-07-04 14:59:32,489][13487] Starting process rollout_proc7
735
+ [2023-07-04 14:59:44,353][19035] Worker 6 uses CPU cores [0]
736
+ [2023-07-04 14:59:44,366][19017] Using GPUs [0] for process 0 (actually maps to GPUs [0])
737
+ [2023-07-04 14:59:44,371][19017] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
738
+ [2023-07-04 14:59:44,475][19017] Num visible devices: 1
739
+ [2023-07-04 14:59:44,533][19017] Starting seed is not provided
740
+ [2023-07-04 14:59:44,534][19017] Using GPUs [0] for process 0 (actually maps to GPUs [0])
741
+ [2023-07-04 14:59:44,535][19017] Initializing actor-critic model on device cuda:0
742
+ [2023-07-04 14:59:44,538][19017] RunningMeanStd input shape: (3, 72, 128)
743
+ [2023-07-04 14:59:44,539][19017] RunningMeanStd input shape: (1,)
744
+ [2023-07-04 14:59:44,575][19038] Worker 7 uses CPU cores [1]
745
+ [2023-07-04 14:59:44,606][19034] Worker 3 uses CPU cores [1]
746
+ [2023-07-04 14:59:44,636][19030] Worker 0 uses CPU cores [0]
747
+ [2023-07-04 14:59:44,687][19017] ConvEncoder: input_channels=3
748
+ [2023-07-04 14:59:44,717][19032] Worker 1 uses CPU cores [1]
749
+ [2023-07-04 14:59:44,738][19033] Worker 2 uses CPU cores [0]
750
+ [2023-07-04 14:59:44,748][19037] Worker 5 uses CPU cores [1]
751
+ [2023-07-04 14:59:44,792][19036] Worker 4 uses CPU cores [0]
752
+ [2023-07-04 14:59:44,916][19031] Using GPUs [0] for process 0 (actually maps to GPUs [0])
753
+ [2023-07-04 14:59:44,917][19031] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
754
+ [2023-07-04 14:59:44,942][19031] Num visible devices: 1
755
+ [2023-07-04 14:59:45,039][19017] Conv encoder output size: 512
756
+ [2023-07-04 14:59:45,040][19017] Policy head output size: 512
757
+ [2023-07-04 14:59:45,066][19017] Created Actor Critic model with architecture:
758
+ [2023-07-04 14:59:45,067][19017] ActorCriticSharedWeights(
759
+ (obs_normalizer): ObservationNormalizer(
760
+ (running_mean_std): RunningMeanStdDictInPlace(
761
+ (running_mean_std): ModuleDict(
762
+ (obs): RunningMeanStdInPlace()
763
+ )
764
+ )
765
+ )
766
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
767
+ (encoder): VizdoomEncoder(
768
+ (basic_encoder): ConvEncoder(
769
+ (enc): RecursiveScriptModule(
770
+ original_name=ConvEncoderImpl
771
+ (conv_head): RecursiveScriptModule(
772
+ original_name=Sequential
773
+ (0): RecursiveScriptModule(original_name=Conv2d)
774
+ (1): RecursiveScriptModule(original_name=ELU)
775
+ (2): RecursiveScriptModule(original_name=Conv2d)
776
+ (3): RecursiveScriptModule(original_name=ELU)
777
+ (4): RecursiveScriptModule(original_name=Conv2d)
778
+ (5): RecursiveScriptModule(original_name=ELU)
779
+ )
780
+ (mlp_layers): RecursiveScriptModule(
781
+ original_name=Sequential
782
+ (0): RecursiveScriptModule(original_name=Linear)
783
+ (1): RecursiveScriptModule(original_name=ELU)
784
+ )
785
+ )
786
+ )
787
+ )
788
+ (core): ModelCoreRNN(
789
+ (core): GRU(512, 512)
790
+ )
791
+ (decoder): MlpDecoder(
792
+ (mlp): Identity()
793
+ )
794
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
795
+ (action_parameterization): ActionParameterizationDefault(
796
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
797
+ )
798
+ )
799
+ [2023-07-04 14:59:48,353][19017] Using optimizer <class 'torch.optim.adam.Adam'>
800
+ [2023-07-04 14:59:48,354][19017] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000112_458752.pth...
801
+ [2023-07-04 14:59:48,388][19017] Loading model from checkpoint
802
+ [2023-07-04 14:59:48,392][19017] Loaded experiment state at self.train_step=112, self.env_steps=458752
803
+ [2023-07-04 14:59:48,393][19017] Initialized policy 0 weights for model version 112
804
+ [2023-07-04 14:59:48,397][19017] LearnerWorker_p0 finished initialization!
805
+ [2023-07-04 14:59:48,398][19017] Using GPUs [0] for process 0 (actually maps to GPUs [0])
806
+ [2023-07-04 14:59:48,601][19031] RunningMeanStd input shape: (3, 72, 128)
807
+ [2023-07-04 14:59:48,603][19031] RunningMeanStd input shape: (1,)
808
+ [2023-07-04 14:59:48,615][19031] ConvEncoder: input_channels=3
809
+ [2023-07-04 14:59:48,721][19031] Conv encoder output size: 512
810
+ [2023-07-04 14:59:48,721][19031] Policy head output size: 512
811
+ [2023-07-04 14:59:49,975][13487] Inference worker 0-0 is ready!
812
+ [2023-07-04 14:59:49,979][13487] All inference workers are ready! Signal rollout workers to start!
813
+ [2023-07-04 14:59:50,078][19035] Doom resolution: 160x120, resize resolution: (128, 72)
814
+ [2023-07-04 14:59:50,080][19033] Doom resolution: 160x120, resize resolution: (128, 72)
815
+ [2023-07-04 14:59:50,081][19030] Doom resolution: 160x120, resize resolution: (128, 72)
816
+ [2023-07-04 14:59:50,076][19036] Doom resolution: 160x120, resize resolution: (128, 72)
817
+ [2023-07-04 14:59:50,086][19034] Doom resolution: 160x120, resize resolution: (128, 72)
818
+ [2023-07-04 14:59:50,080][19038] Doom resolution: 160x120, resize resolution: (128, 72)
819
+ [2023-07-04 14:59:50,083][19037] Doom resolution: 160x120, resize resolution: (128, 72)
820
+ [2023-07-04 14:59:50,085][19032] Doom resolution: 160x120, resize resolution: (128, 72)
821
+ [2023-07-04 14:59:51,479][19033] Decorrelating experience for 0 frames...
822
+ [2023-07-04 14:59:51,483][19036] Decorrelating experience for 0 frames...
823
+ [2023-07-04 14:59:51,484][19035] Decorrelating experience for 0 frames...
824
+ [2023-07-04 14:59:51,907][19032] Decorrelating experience for 0 frames...
825
+ [2023-07-04 14:59:51,911][19034] Decorrelating experience for 0 frames...
826
+ [2023-07-04 14:59:51,913][19037] Decorrelating experience for 0 frames...
827
+ [2023-07-04 14:59:51,916][19038] Decorrelating experience for 0 frames...
828
+ [2023-07-04 14:59:52,369][13487] Heartbeat connected on Batcher_0
829
+ [2023-07-04 14:59:52,376][13487] Heartbeat connected on LearnerWorker_p0
830
+ [2023-07-04 14:59:52,429][13487] Heartbeat connected on InferenceWorker_p0-w0
831
+ [2023-07-04 14:59:52,575][13487] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 458752. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
832
+ [2023-07-04 14:59:52,797][19035] Decorrelating experience for 32 frames...
833
+ [2023-07-04 14:59:52,800][19036] Decorrelating experience for 32 frames...
834
+ [2023-07-04 14:59:52,889][19030] Decorrelating experience for 0 frames...
835
+ [2023-07-04 14:59:53,212][19037] Decorrelating experience for 32 frames...
836
+ [2023-07-04 14:59:53,221][19034] Decorrelating experience for 32 frames...
837
+ [2023-07-04 14:59:53,242][19038] Decorrelating experience for 32 frames...
838
+ [2023-07-04 14:59:54,084][19036] Decorrelating experience for 64 frames...
839
+ [2023-07-04 14:59:54,093][19035] Decorrelating experience for 64 frames...
840
+ [2023-07-04 14:59:54,305][19032] Decorrelating experience for 32 frames...
841
+ [2023-07-04 14:59:54,508][19034] Decorrelating experience for 64 frames...
842
+ [2023-07-04 14:59:54,545][19033] Decorrelating experience for 32 frames...
843
+ [2023-07-04 14:59:55,417][19037] Decorrelating experience for 64 frames...
844
+ [2023-07-04 14:59:55,562][19036] Decorrelating experience for 96 frames...
845
+ [2023-07-04 14:59:55,672][19035] Decorrelating experience for 96 frames...
846
+ [2023-07-04 14:59:55,815][13487] Heartbeat connected on RolloutWorker_w4
847
+ [2023-07-04 14:59:55,996][13487] Heartbeat connected on RolloutWorker_w6
848
+ [2023-07-04 14:59:56,018][19032] Decorrelating experience for 64 frames...
849
+ [2023-07-04 14:59:56,031][19030] Decorrelating experience for 32 frames...
850
+ [2023-07-04 14:59:56,215][19034] Decorrelating experience for 96 frames...
851
+ [2023-07-04 14:59:56,483][13487] Heartbeat connected on RolloutWorker_w3
852
+ [2023-07-04 14:59:57,431][19030] Decorrelating experience for 64 frames...
853
+ [2023-07-04 14:59:57,575][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 458752. Throughput: 0: 3.2. Samples: 16. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
854
+ [2023-07-04 14:59:57,581][13487] Avg episode reward: [(0, '2.010')]
855
+ [2023-07-04 14:59:57,876][19038] Decorrelating experience for 64 frames...
856
+ [2023-07-04 14:59:58,089][19037] Decorrelating experience for 96 frames...
857
+ [2023-07-04 14:59:58,653][13487] Heartbeat connected on RolloutWorker_w5
858
+ [2023-07-04 15:00:02,577][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 458752. Throughput: 0: 177.0. Samples: 1770. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
859
+ [2023-07-04 15:00:02,580][13487] Avg episode reward: [(0, '3.143')]
860
+ [2023-07-04 15:00:03,425][19017] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth...
861
+ [2023-07-04 15:00:03,437][13487] Component Batcher_0 stopped!
862
+ [2023-07-04 15:00:03,425][19017] Stopping Batcher_0...
863
+ [2023-07-04 15:00:03,445][19017] Loop batcher_evt_loop terminating...
864
+ [2023-07-04 15:00:03,506][19031] Weights refcount: 2 0
865
+ [2023-07-04 15:00:03,540][19031] Stopping InferenceWorker_p0-w0...
866
+ [2023-07-04 15:00:03,541][19031] Loop inference_proc0-0_evt_loop terminating...
867
+ [2023-07-04 15:00:03,542][13487] Component InferenceWorker_p0-w0 stopped!
868
+ [2023-07-04 15:00:03,574][19034] Stopping RolloutWorker_w3...
869
+ [2023-07-04 15:00:03,575][19034] Loop rollout_proc3_evt_loop terminating...
870
+ [2023-07-04 15:00:03,574][13487] Component RolloutWorker_w3 stopped!
871
+ [2023-07-04 15:00:03,591][13487] Component RolloutWorker_w6 stopped!
872
+ [2023-07-04 15:00:03,597][19035] Stopping RolloutWorker_w6...
873
+ [2023-07-04 15:00:03,598][19035] Loop rollout_proc6_evt_loop terminating...
874
+ [2023-07-04 15:00:03,601][19037] Stopping RolloutWorker_w5...
875
+ [2023-07-04 15:00:03,602][19037] Loop rollout_proc5_evt_loop terminating...
876
+ [2023-07-04 15:00:03,602][13487] Component RolloutWorker_w5 stopped!
877
+ [2023-07-04 15:00:03,564][19032] Decorrelating experience for 96 frames...
878
+ [2023-07-04 15:00:03,633][13487] Component RolloutWorker_w4 stopped!
879
+ [2023-07-04 15:00:03,636][19036] Stopping RolloutWorker_w4...
880
+ [2023-07-04 15:00:03,630][19017] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000051_208896.pth
881
+ [2023-07-04 15:00:03,637][19036] Loop rollout_proc4_evt_loop terminating...
882
+ [2023-07-04 15:00:03,665][19017] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth...
883
+ [2023-07-04 15:00:03,868][13487] Component LearnerWorker_p0 stopped!
884
+ [2023-07-04 15:00:03,871][19017] Stopping LearnerWorker_p0...
885
+ [2023-07-04 15:00:03,874][19017] Loop learner_proc0_evt_loop terminating...
886
+ [2023-07-04 15:00:03,941][19030] Decorrelating experience for 96 frames...
887
+ [2023-07-04 15:00:04,551][19033] Decorrelating experience for 64 frames...
888
+ [2023-07-04 15:00:04,784][13487] Component RolloutWorker_w1 stopped!
889
+ [2023-07-04 15:00:04,785][19032] Stopping RolloutWorker_w1...
890
+ [2023-07-04 15:00:04,798][19032] Loop rollout_proc1_evt_loop terminating...
891
+ [2023-07-04 15:00:05,354][13487] Component RolloutWorker_w0 stopped!
892
+ [2023-07-04 15:00:05,360][19030] Stopping RolloutWorker_w0...
893
+ [2023-07-04 15:00:05,372][19030] Loop rollout_proc0_evt_loop terminating...
894
+ [2023-07-04 15:00:07,815][19038] Decorrelating experience for 96 frames...
895
+ [2023-07-04 15:00:08,061][19038] Stopping RolloutWorker_w7...
896
+ [2023-07-04 15:00:08,062][19038] Loop rollout_proc7_evt_loop terminating...
897
+ [2023-07-04 15:00:08,061][13487] Component RolloutWorker_w7 stopped!
898
+ [2023-07-04 15:00:08,310][19033] Decorrelating experience for 96 frames...
899
+ [2023-07-04 15:00:08,535][13487] Component RolloutWorker_w2 stopped!
900
+ [2023-07-04 15:00:08,542][13487] Waiting for process learner_proc0 to stop...
901
+ [2023-07-04 15:00:08,535][19033] Stopping RolloutWorker_w2...
902
+ [2023-07-04 15:00:08,546][19033] Loop rollout_proc2_evt_loop terminating...
903
+ [2023-07-04 15:00:08,545][13487] Waiting for process inference_proc0-0 to join...
904
+ [2023-07-04 15:00:08,551][13487] Waiting for process rollout_proc0 to join...
905
+ [2023-07-04 15:00:08,606][13487] Waiting for process rollout_proc1 to join...
906
+ [2023-07-04 15:00:08,612][13487] Waiting for process rollout_proc2 to join...
907
+ [2023-07-04 15:00:09,048][13487] Waiting for process rollout_proc3 to join...
908
+ [2023-07-04 15:00:09,053][13487] Waiting for process rollout_proc4 to join...
909
+ [2023-07-04 15:00:09,056][13487] Waiting for process rollout_proc5 to join...
910
+ [2023-07-04 15:00:09,060][13487] Waiting for process rollout_proc6 to join...
911
+ [2023-07-04 15:00:09,063][13487] Waiting for process rollout_proc7 to join...
912
+ [2023-07-04 15:00:09,065][13487] Batcher 0 profile tree view:
913
+ batching: 0.0380, releasing_batches: 0.0000
914
+ [2023-07-04 15:00:09,068][13487] InferenceWorker_p0-w0 profile tree view:
915
+ wait_policy: 0.0015
916
+ wait_policy_total: 10.3235
917
+ update_model: 0.0217
918
+ weight_update: 0.0012
919
+ one_step: 0.0061
920
+ handle_policy_step: 2.8497
921
+ deserialize: 0.0506, stack: 0.0128, obs_to_device_normalize: 0.4272, forward: 1.8411, send_messages: 0.0826
922
+ prepare_outputs: 0.3033
923
+ to_cpu: 0.1717
924
+ [2023-07-04 15:00:09,070][13487] Learner 0 profile tree view:
925
+ misc: 0.0000, prepare_batch: 3.6049
926
+ train: 0.9956
927
+ epoch_init: 0.0000, minibatch_init: 0.0000, losses_postprocess: 0.0002, kl_divergence: 0.0003, after_optimizer: 0.0048
928
+ calculate_losses: 0.1297
929
+ losses_init: 0.0000, forward_head: 0.1186, bptt_initial: 0.0057, tail: 0.0008, advantages_returns: 0.0009, losses: 0.0021
930
+ bptt: 0.0014
931
+ bptt_forward_core: 0.0014
932
+ update: 0.8563
933
+ clip: 0.0018
934
+ [2023-07-04 15:00:09,071][13487] RolloutWorker_w0 profile tree view:
935
+ wait_for_trajectories: 0.0299, enqueue_policy_requests: 0.0007
936
+ [2023-07-04 15:00:09,074][13487] RolloutWorker_w7 profile tree view:
937
+ wait_for_trajectories: 0.0003, enqueue_policy_requests: 0.0026
938
+ [2023-07-04 15:00:09,075][13487] Loop Runner_EvtLoop terminating...
939
+ [2023-07-04 15:00:09,078][13487] Runner profile tree view:
940
+ main_loop: 36.6591
941
+ [2023-07-04 15:00:09,080][13487] Collected {0: 462848}, FPS: 111.7
942
+ [2023-07-04 15:02:33,856][19799] Saving configuration to /content/train_dir/default_experiment/config.json...
943
+ [2023-07-04 15:02:33,868][19799] Rollout worker 0 uses device cpu
944
+ [2023-07-04 15:02:33,872][19799] Rollout worker 1 uses device cpu
945
+ [2023-07-04 15:02:33,874][19799] Rollout worker 2 uses device cpu
946
+ [2023-07-04 15:02:33,883][19799] Rollout worker 3 uses device cpu
947
+ [2023-07-04 15:02:33,889][19799] Rollout worker 4 uses device cpu
948
+ [2023-07-04 15:02:33,897][19799] Rollout worker 5 uses device cpu
949
+ [2023-07-04 15:02:33,898][19799] Rollout worker 6 uses device cpu
950
+ [2023-07-04 15:02:33,899][19799] Rollout worker 7 uses device cpu
951
+ [2023-07-04 15:02:34,113][19799] Using GPUs [0] for process 0 (actually maps to GPUs [0])
952
+ [2023-07-04 15:02:34,121][19799] InferenceWorker_p0-w0: min num requests: 2
953
+ [2023-07-04 15:02:34,169][19799] Starting all processes...
954
+ [2023-07-04 15:02:34,177][19799] Starting process learner_proc0
955
+ [2023-07-04 15:02:34,188][19799] EvtLoop [Runner_EvtLoop, process=main process 19799] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=()
956
+ Traceback (most recent call last):
957
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
958
+ slot_callable(*args)
959
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start
960
+ self._start_processes()
961
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes
962
+ p.start()
963
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start
964
+ self._process.start()
965
+ File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start
966
+ self._popen = self._Popen(self)
967
+ File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen
968
+ return Popen(process_obj)
969
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__
970
+ super().__init__(process_obj)
971
+ File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__
972
+ self._launch(process_obj)
973
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch
974
+ reduction.dump(process_obj, fp)
975
+ File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump
976
+ ForkingPickler(file, protocol).dump(obj)
977
+ TypeError: cannot pickle 'TLSBuffer' object
978
+ [2023-07-04 15:02:34,196][19799] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop
979
+ [2023-07-04 15:02:34,203][19799] Uncaught exception in Runner evt loop
980
+ Traceback (most recent call last):
981
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run
982
+ evt_loop_status = self.event_loop.exec()
983
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 403, in exec
984
+ raise exc
985
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 399, in exec
986
+ while self._loop_iteration():
987
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 383, in _loop_iteration
988
+ self._process_signal(s)
989
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 358, in _process_signal
990
+ raise exc
991
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal
992
+ slot_callable(*args)
993
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start
994
+ self._start_processes()
995
+ File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes
996
+ p.start()
997
+ File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start
998
+ self._process.start()
999
+ File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start
1000
+ self._popen = self._Popen(self)
1001
+ File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen
1002
+ return Popen(process_obj)
1003
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__
1004
+ super().__init__(process_obj)
1005
+ File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__
1006
+ self._launch(process_obj)
1007
+ File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch
1008
+ reduction.dump(process_obj, fp)
1009
+ File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump
1010
+ ForkingPickler(file, protocol).dump(obj)
1011
+ TypeError: cannot pickle 'TLSBuffer' object
1012
+ [2023-07-04 15:02:34,210][19799] Runner profile tree view:
1013
+ main_loop: 0.0419
1014
+ [2023-07-04 15:02:34,212][19799] Collected {}, FPS: 0.0
1015
+ [2023-07-04 15:02:34,410][19799] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
1016
+ [2023-07-04 15:02:34,413][19799] Overriding arg 'num_workers' with value 1 passed from command line
1017
+ [2023-07-04 15:02:34,423][19799] Adding new argument 'no_render'=True that is not in the saved config file!
1018
+ [2023-07-04 15:02:34,428][19799] Adding new argument 'save_video'=True that is not in the saved config file!
1019
+ [2023-07-04 15:02:34,436][19799] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
1020
+ [2023-07-04 15:02:34,442][19799] Adding new argument 'video_name'=None that is not in the saved config file!
1021
+ [2023-07-04 15:02:34,447][19799] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
1022
+ [2023-07-04 15:02:34,457][19799] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
1023
+ [2023-07-04 15:02:34,459][19799] Adding new argument 'push_to_hub'=False that is not in the saved config file!
1024
+ [2023-07-04 15:02:34,460][19799] Adding new argument 'hf_repository'=None that is not in the saved config file!
1025
+ [2023-07-04 15:02:34,461][19799] Adding new argument 'policy_index'=0 that is not in the saved config file!
1026
+ [2023-07-04 15:02:34,462][19799] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
1027
+ [2023-07-04 15:02:34,463][19799] Adding new argument 'enjoy_script'=None that is not in the saved config file!
1028
+ [2023-07-04 15:02:34,465][19799] Using frameskip 1 and render_action_repeat=4 for evaluation
1029
+ [2023-07-04 15:02:34,528][19799] Doom resolution: 160x120, resize resolution: (128, 72)
1030
+ [2023-07-04 15:02:34,536][19799] RunningMeanStd input shape: (3, 72, 128)
1031
+ [2023-07-04 15:02:34,539][19799] RunningMeanStd input shape: (1,)
1032
+ [2023-07-04 15:02:34,576][19799] ConvEncoder: input_channels=3
1033
+ [2023-07-04 15:02:34,894][19799] Conv encoder output size: 512
1034
+ [2023-07-04 15:02:34,897][19799] Policy head output size: 512
1035
+ [2023-07-04 15:02:42,149][19799] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth...
1036
+ [2023-07-04 15:02:44,118][19799] Num frames 100...
1037
+ [2023-07-04 15:02:44,316][19799] Num frames 200...
1038
+ [2023-07-04 15:02:44,533][19799] Num frames 300...
1039
+ [2023-07-04 15:02:44,758][19799] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
1040
+ [2023-07-04 15:02:44,761][19799] Avg episode reward: 3.840, avg true_objective: 3.840
1041
+ [2023-07-04 15:02:44,802][19799] Num frames 400...
1042
+ [2023-07-04 15:02:44,936][19799] Num frames 500...
1043
+ [2023-07-04 15:02:45,089][19799] Num frames 600...
1044
+ [2023-07-04 15:02:45,228][19799] Num frames 700...
1045
+ [2023-07-04 15:02:45,372][19799] Num frames 800...
1046
+ [2023-07-04 15:02:45,425][19799] Avg episode rewards: #0: 4.500, true rewards: #0: 4.000
1047
+ [2023-07-04 15:02:45,427][19799] Avg episode reward: 4.500, avg true_objective: 4.000
1048
+ [2023-07-04 15:02:45,569][19799] Num frames 900...
1049
+ [2023-07-04 15:02:45,711][19799] Num frames 1000...
1050
+ [2023-07-04 15:02:45,843][19799] Num frames 1100...
1051
+ [2023-07-04 15:02:45,997][19799] Num frames 1200...
1052
+ [2023-07-04 15:02:46,175][19799] Avg episode rewards: #0: 5.267, true rewards: #0: 4.267
1053
+ [2023-07-04 15:02:46,177][19799] Avg episode reward: 5.267, avg true_objective: 4.267
1054
+ [2023-07-04 15:02:46,213][19799] Num frames 1300...
1055
+ [2023-07-04 15:02:46,350][19799] Num frames 1400...
1056
+ [2023-07-04 15:02:46,484][19799] Num frames 1500...
1057
+ [2023-07-04 15:02:46,631][19799] Num frames 1600...
1058
+ [2023-07-04 15:02:46,781][19799] Num frames 1700...
1059
+ [2023-07-04 15:02:46,874][19799] Avg episode rewards: #0: 5.320, true rewards: #0: 4.320
1060
+ [2023-07-04 15:02:46,876][19799] Avg episode reward: 5.320, avg true_objective: 4.320
1061
+ [2023-07-04 15:02:46,985][19799] Num frames 1800...
1062
+ [2023-07-04 15:02:47,125][19799] Num frames 1900...
1063
+ [2023-07-04 15:02:47,265][19799] Num frames 2000...
1064
+ [2023-07-04 15:02:47,411][19799] Num frames 2100...
1065
+ [2023-07-04 15:02:47,566][19799] Avg episode rewards: #0: 5.352, true rewards: #0: 4.352
1066
+ [2023-07-04 15:02:47,569][19799] Avg episode reward: 5.352, avg true_objective: 4.352
1067
+ [2023-07-04 15:02:47,610][19799] Num frames 2200...
1068
+ [2023-07-04 15:02:47,744][19799] Num frames 2300...
1069
+ [2023-07-04 15:02:47,891][19799] Num frames 2400...
1070
+ [2023-07-04 15:02:48,040][19799] Num frames 2500...
1071
+ [2023-07-04 15:02:48,182][19799] Avg episode rewards: #0: 5.100, true rewards: #0: 4.267
1072
+ [2023-07-04 15:02:48,184][19799] Avg episode reward: 5.100, avg true_objective: 4.267
1073
+ [2023-07-04 15:02:48,256][19799] Num frames 2600...
1074
+ [2023-07-04 15:02:48,392][19799] Num frames 2700...
1075
+ [2023-07-04 15:02:48,531][19799] Num frames 2800...
1076
+ [2023-07-04 15:02:48,667][19799] Num frames 2900...
1077
+ [2023-07-04 15:02:48,811][19799] Num frames 3000...
1078
+ [2023-07-04 15:02:48,881][19799] Avg episode rewards: #0: 5.154, true rewards: #0: 4.297
1079
+ [2023-07-04 15:02:48,883][19799] Avg episode reward: 5.154, avg true_objective: 4.297
1080
+ [2023-07-04 15:02:49,008][19799] Num frames 3100...
1081
+ [2023-07-04 15:02:49,141][19799] Num frames 3200...
1082
+ [2023-07-04 15:02:49,277][19799] Num frames 3300...
1083
+ [2023-07-04 15:02:49,472][19799] Avg episode rewards: #0: 4.990, true rewards: #0: 4.240
1084
+ [2023-07-04 15:02:49,475][19799] Avg episode reward: 4.990, avg true_objective: 4.240
1085
+ [2023-07-04 15:02:49,493][19799] Num frames 3400...
1086
+ [2023-07-04 15:02:49,629][19799] Num frames 3500...
1087
+ [2023-07-04 15:02:49,757][19799] Num frames 3600...
1088
+ [2023-07-04 15:02:49,890][19799] Num frames 3700...
1089
+ [2023-07-04 15:02:50,052][19799] Avg episode rewards: #0: 4.862, true rewards: #0: 4.196
1090
+ [2023-07-04 15:02:50,054][19799] Avg episode reward: 4.862, avg true_objective: 4.196
1091
+ [2023-07-04 15:02:50,097][19799] Num frames 3800...
1092
+ [2023-07-04 15:02:50,234][19799] Num frames 3900...
1093
+ [2023-07-04 15:02:50,385][19799] Num frames 4000...
1094
+ [2023-07-04 15:02:50,510][19799] Num frames 4100...
1095
+ [2023-07-04 15:02:50,696][19799] Avg episode rewards: #0: 4.792, true rewards: #0: 4.192
1096
+ [2023-07-04 15:02:50,698][19799] Avg episode reward: 4.792, avg true_objective: 4.192
1097
+ [2023-07-04 15:03:17,366][19799] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
1098
+ [2023-07-04 15:05:23,598][19799] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
1099
+ [2023-07-04 15:05:23,604][19799] Overriding arg 'num_workers' with value 1 passed from command line
1100
+ [2023-07-04 15:05:23,611][19799] Adding new argument 'no_render'=True that is not in the saved config file!
1101
+ [2023-07-04 15:05:23,613][19799] Adding new argument 'save_video'=True that is not in the saved config file!
1102
+ [2023-07-04 15:05:23,620][19799] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
1103
+ [2023-07-04 15:05:23,627][19799] Adding new argument 'video_name'=None that is not in the saved config file!
1104
+ [2023-07-04 15:05:23,629][19799] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
1105
+ [2023-07-04 15:05:23,630][19799] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
1106
+ [2023-07-04 15:05:23,634][19799] Adding new argument 'push_to_hub'=True that is not in the saved config file!
1107
+ [2023-07-04 15:05:23,635][19799] Adding new argument 'hf_repository'='HilbertS/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
1108
+ [2023-07-04 15:05:23,640][19799] Adding new argument 'policy_index'=0 that is not in the saved config file!
1109
+ [2023-07-04 15:05:23,641][19799] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
1110
+ [2023-07-04 15:05:23,643][19799] Adding new argument 'enjoy_script'=None that is not in the saved config file!
1111
+ [2023-07-04 15:05:23,648][19799] Using frameskip 1 and render_action_repeat=4 for evaluation
1112
+ [2023-07-04 15:05:23,691][19799] RunningMeanStd input shape: (3, 72, 128)
1113
+ [2023-07-04 15:05:23,697][19799] RunningMeanStd input shape: (1,)
1114
+ [2023-07-04 15:05:23,728][19799] ConvEncoder: input_channels=3
1115
+ [2023-07-04 15:05:23,875][19799] Conv encoder output size: 512
1116
+ [2023-07-04 15:05:23,894][19799] Policy head output size: 512
1117
+ [2023-07-04 15:05:23,958][19799] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth...
1118
+ [2023-07-04 15:05:25,102][19799] Num frames 100...
1119
+ [2023-07-04 15:05:25,316][19799] Num frames 200...
1120
+ [2023-07-04 15:05:25,517][19799] Num frames 300...
1121
+ [2023-07-04 15:05:25,721][19799] Num frames 400...
1122
+ [2023-07-04 15:05:25,882][19799] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480
1123
+ [2023-07-04 15:05:25,888][19799] Avg episode reward: 5.480, avg true_objective: 4.480
1124
+ [2023-07-04 15:05:26,109][19799] Num frames 500...
1125
+ [2023-07-04 15:05:26,368][19799] Num frames 600...
1126
+ [2023-07-04 15:05:26,570][19799] Num frames 700...
1127
+ [2023-07-04 15:05:26,811][19799] Num frames 800...
1128
+ [2023-07-04 15:05:26,961][19799] Avg episode rewards: #0: 4.660, true rewards: #0: 4.160
1129
+ [2023-07-04 15:05:26,968][19799] Avg episode reward: 4.660, avg true_objective: 4.160
1130
+ [2023-07-04 15:05:27,085][19799] Num frames 900...
1131
+ [2023-07-04 15:05:27,210][19799] Num frames 1000...
1132
+ [2023-07-04 15:05:27,342][19799] Num frames 1100...
1133
+ [2023-07-04 15:05:27,468][19799] Num frames 1200...
1134
+ [2023-07-04 15:05:27,605][19799] Num frames 1300...
1135
+ [2023-07-04 15:05:27,749][19799] Num frames 1400...
1136
+ [2023-07-04 15:05:27,836][19799] Avg episode rewards: #0: 6.070, true rewards: #0: 4.737
1137
+ [2023-07-04 15:05:27,838][19799] Avg episode reward: 6.070, avg true_objective: 4.737
1138
+ [2023-07-04 15:05:27,940][19799] Num frames 1500...
1139
+ [2023-07-04 15:05:28,080][19799] Num frames 1600...
1140
+ [2023-07-04 15:05:28,212][19799] Num frames 1700...
1141
+ [2023-07-04 15:05:28,354][19799] Num frames 1800...
1142
+ [2023-07-04 15:05:28,420][19799] Avg episode rewards: #0: 5.513, true rewards: #0: 4.512
1143
+ [2023-07-04 15:05:28,422][19799] Avg episode reward: 5.513, avg true_objective: 4.512
1144
+ [2023-07-04 15:05:28,553][19799] Num frames 1900...
1145
+ [2023-07-04 15:05:28,681][19799] Num frames 2000...
1146
+ [2023-07-04 15:05:28,815][19799] Num frames 2100...
1147
+ [2023-07-04 15:05:28,982][19799] Avg episode rewards: #0: 5.178, true rewards: #0: 4.378
1148
+ [2023-07-04 15:05:28,985][19799] Avg episode reward: 5.178, avg true_objective: 4.378
1149
+ [2023-07-04 15:05:29,001][19799] Num frames 2200...
1150
+ [2023-07-04 15:05:29,130][19799] Num frames 2300...
1151
+ [2023-07-04 15:05:29,274][19799] Num frames 2400...
1152
+ [2023-07-04 15:05:29,415][19799] Num frames 2500...
1153
+ [2023-07-04 15:05:29,554][19799] Num frames 2600...
1154
+ [2023-07-04 15:05:29,694][19799] Num frames 2700...
1155
+ [2023-07-04 15:05:29,793][19799] Avg episode rewards: #0: 5.555, true rewards: #0: 4.555
1156
+ [2023-07-04 15:05:29,795][19799] Avg episode reward: 5.555, avg true_objective: 4.555
1157
+ [2023-07-04 15:05:29,900][19799] Num frames 2800...
1158
+ [2023-07-04 15:05:30,041][19799] Num frames 2900...
1159
+ [2023-07-04 15:05:30,169][19799] Num frames 3000...
1160
+ [2023-07-04 15:05:30,302][19799] Num frames 3100...
1161
+ [2023-07-04 15:05:30,471][19799] Avg episode rewards: #0: 5.544, true rewards: #0: 4.544
1162
+ [2023-07-04 15:05:30,473][19799] Avg episode reward: 5.544, avg true_objective: 4.544
1163
+ [2023-07-04 15:05:30,501][19799] Num frames 3200...
1164
+ [2023-07-04 15:05:30,636][19799] Num frames 3300...
1165
+ [2023-07-04 15:05:30,768][19799] Num frames 3400...
1166
+ [2023-07-04 15:05:30,916][19799] Num frames 3500...
1167
+ [2023-07-04 15:05:31,058][19799] Num frames 3600...
1168
+ [2023-07-04 15:05:31,154][19799] Avg episode rewards: #0: 5.536, true rewards: #0: 4.536
1169
+ [2023-07-04 15:05:31,156][19799] Avg episode reward: 5.536, avg true_objective: 4.536
1170
+ [2023-07-04 15:05:31,255][19799] Num frames 3700...
1171
+ [2023-07-04 15:05:31,392][19799] Num frames 3800...
1172
+ [2023-07-04 15:05:31,527][19799] Num frames 3900...
1173
+ [2023-07-04 15:05:31,669][19799] Num frames 4000...
1174
+ [2023-07-04 15:05:31,744][19799] Avg episode rewards: #0: 5.348, true rewards: #0: 4.459
1175
+ [2023-07-04 15:05:31,746][19799] Avg episode reward: 5.348, avg true_objective: 4.459
1176
+ [2023-07-04 15:05:31,863][19799] Num frames 4100...
1177
+ [2023-07-04 15:05:31,996][19799] Num frames 4200...
1178
+ [2023-07-04 15:05:32,140][19799] Num frames 4300...
1179
+ [2023-07-04 15:05:32,267][19799] Num frames 4400...
1180
+ [2023-07-04 15:05:32,403][19799] Num frames 4500...
1181
+ [2023-07-04 15:05:32,530][19799] Avg episode rewards: #0: 5.557, true rewards: #0: 4.557
1182
+ [2023-07-04 15:05:32,531][19799] Avg episode reward: 5.557, avg true_objective: 4.557
1183
+ [2023-07-04 15:06:01,713][19799] Replay video saved to /content/train_dir/default_experiment/replay.mp4!