CloXD commited on
Commit
3170c45
1 Parent(s): 5458904

Upload . with huggingface_hub

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1677853404.tensorbook ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01b1fd2c3a6cca025337db424b62d3eb0eebbdce0590845238c3c657550cc254
3
+ size 120207
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ model-index:
8
+ - name: APPO
9
+ results:
10
+ - task:
11
+ type: reinforcement-learning
12
+ name: reinforcement-learning
13
+ dataset:
14
+ name: doom_health_gathering_supreme
15
+ type: doom_health_gathering_supreme
16
+ metrics:
17
+ - type: mean_reward
18
+ value: 10.42 +/- 4.53
19
+ name: mean_reward
20
+ verified: false
21
+ ---
22
+
23
+ A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r CloXD/rl_course_vizdoom_health_gathering_supreme
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
checkpoint_p0/best_000000978_4005888_reward_25.653.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd1ef51ce8f01b11425d9fb455d0aba29e50257528817539b3b63568f0cbff0c
3
+ size 34924044
checkpoint_p0/checkpoint_000000901_3690496.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f0fbb0422d9825cab561ea49be68e6930959a93aae80eda3b3b8a74a4420c2d
3
+ size 34924044
checkpoint_p0/checkpoint_000000978_4005888.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd1ef51ce8f01b11425d9fb455d0aba29e50257528817539b3b63568f0cbff0c
3
+ size 34924044
config.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/home/lorencl/git/ReinforcementLearning/Lesson8/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "lr_adaptive_min": 1e-06,
50
+ "lr_adaptive_max": 0.01,
51
+ "obs_subtract_mean": 0.0,
52
+ "obs_scale": 255.0,
53
+ "normalize_input": true,
54
+ "normalize_input_keys": null,
55
+ "decorrelate_experience_max_seconds": 0,
56
+ "decorrelate_envs_on_one_worker": true,
57
+ "actor_worker_gpus": [],
58
+ "set_workers_cpu_affinity": true,
59
+ "force_envs_single_thread": false,
60
+ "default_niceness": 0,
61
+ "log_to_file": true,
62
+ "experiment_summaries_interval": 10,
63
+ "flush_summaries_interval": 30,
64
+ "stats_avg": 100,
65
+ "summaries_use_frameskip": true,
66
+ "heartbeat_interval": 20,
67
+ "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 4000000,
69
+ "train_for_seconds": 10000000000,
70
+ "save_every_sec": 120,
71
+ "keep_checkpoints": 2,
72
+ "load_checkpoint_kind": "latest",
73
+ "save_milestones_sec": -1,
74
+ "save_best_every_sec": 5,
75
+ "save_best_metric": "reward",
76
+ "save_best_after": 100000,
77
+ "benchmark": false,
78
+ "encoder_mlp_layers": [
79
+ 512,
80
+ 512
81
+ ],
82
+ "encoder_conv_architecture": "convnet_simple",
83
+ "encoder_conv_mlp_layers": [
84
+ 512
85
+ ],
86
+ "use_rnn": true,
87
+ "rnn_size": 512,
88
+ "rnn_type": "gru",
89
+ "rnn_num_layers": 1,
90
+ "decoder_mlp_layers": [],
91
+ "nonlinearity": "elu",
92
+ "policy_initialization": "orthogonal",
93
+ "policy_init_gain": 1.0,
94
+ "actor_critic_share_weights": true,
95
+ "adaptive_stddev": true,
96
+ "continuous_tanh_scale": 0.0,
97
+ "initial_stddev": 1.0,
98
+ "use_env_info_cache": false,
99
+ "env_gpu_actions": false,
100
+ "env_gpu_observations": true,
101
+ "env_frameskip": 4,
102
+ "env_framestack": 1,
103
+ "pixel_format": "CHW",
104
+ "use_record_episode_statistics": false,
105
+ "with_wandb": false,
106
+ "wandb_user": null,
107
+ "wandb_project": "sample_factory",
108
+ "wandb_group": null,
109
+ "wandb_job_type": "SF",
110
+ "wandb_tags": [],
111
+ "with_pbt": false,
112
+ "pbt_mix_policies_in_one_env": true,
113
+ "pbt_period_env_steps": 5000000,
114
+ "pbt_start_mutation": 20000000,
115
+ "pbt_replace_fraction": 0.3,
116
+ "pbt_mutation_rate": 0.15,
117
+ "pbt_replace_reward_gap": 0.1,
118
+ "pbt_replace_reward_gap_absolute": 1e-06,
119
+ "pbt_optimize_gamma": false,
120
+ "pbt_target_objective": "true_objective",
121
+ "pbt_perturb_min": 1.1,
122
+ "pbt_perturb_max": 1.5,
123
+ "num_agents": -1,
124
+ "num_humans": 0,
125
+ "num_bots": -1,
126
+ "start_bot_difficulty": null,
127
+ "timelimit": null,
128
+ "res_w": 128,
129
+ "res_h": 72,
130
+ "wide_aspect_ratio": false,
131
+ "eval_env_frameskip": 1,
132
+ "fps": 35,
133
+ "command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
134
+ "cli_args": {
135
+ "env": "doom_health_gathering_supreme",
136
+ "num_workers": 8,
137
+ "num_envs_per_worker": 4,
138
+ "train_for_env_steps": 4000000
139
+ },
140
+ "git_hash": "unknown",
141
+ "git_repo_name": "not a git repository"
142
+ }
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b820257022fe6981cbe347f275a157ab0dae11690d9c0d120172c84fc470ac5a
3
+ size 20440267
sf_log.txt ADDED
@@ -0,0 +1,1000 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-03-03 15:23:26,156][90258] Saving configuration to /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/config.json...
2
+ [2023-03-03 15:23:26,157][90258] Rollout worker 0 uses device cpu
3
+ [2023-03-03 15:23:26,157][90258] Rollout worker 1 uses device cpu
4
+ [2023-03-03 15:23:26,158][90258] Rollout worker 2 uses device cpu
5
+ [2023-03-03 15:23:26,158][90258] Rollout worker 3 uses device cpu
6
+ [2023-03-03 15:23:26,159][90258] Rollout worker 4 uses device cpu
7
+ [2023-03-03 15:23:26,159][90258] Rollout worker 5 uses device cpu
8
+ [2023-03-03 15:23:26,159][90258] Rollout worker 6 uses device cpu
9
+ [2023-03-03 15:23:26,160][90258] Rollout worker 7 uses device cpu
10
+ [2023-03-03 15:23:26,186][90258] Using GPUs [0] for process 0 (actually maps to GPUs [0])
11
+ [2023-03-03 15:23:26,187][90258] InferenceWorker_p0-w0: min num requests: 2
12
+ [2023-03-03 15:23:26,203][90258] Starting all processes...
13
+ [2023-03-03 15:23:26,203][90258] Starting process learner_proc0
14
+ [2023-03-03 15:23:26,253][90258] Starting all processes...
15
+ [2023-03-03 15:23:26,257][90258] Starting process inference_proc0-0
16
+ [2023-03-03 15:23:26,257][90258] Starting process rollout_proc0
17
+ [2023-03-03 15:23:26,257][90258] Starting process rollout_proc1
18
+ [2023-03-03 15:23:26,258][90258] Starting process rollout_proc2
19
+ [2023-03-03 15:23:26,258][90258] Starting process rollout_proc3
20
+ [2023-03-03 15:23:26,259][90258] Starting process rollout_proc4
21
+ [2023-03-03 15:23:26,259][90258] Starting process rollout_proc5
22
+ [2023-03-03 15:23:26,260][90258] Starting process rollout_proc6
23
+ [2023-03-03 15:23:26,261][90258] Starting process rollout_proc7
24
+ [2023-03-03 15:23:27,164][90462] Worker 0 uses CPU cores [0, 1]
25
+ [2023-03-03 15:23:27,250][90447] Using GPUs [0] for process 0 (actually maps to GPUs [0])
26
+ [2023-03-03 15:23:27,250][90447] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
27
+ [2023-03-03 15:23:27,260][90461] Worker 1 uses CPU cores [2, 3]
28
+ [2023-03-03 15:23:27,262][90460] Using GPUs [0] for process 0 (actually maps to GPUs [0])
29
+ [2023-03-03 15:23:27,262][90460] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
30
+ [2023-03-03 15:23:27,264][90447] Num visible devices: 1
31
+ [2023-03-03 15:23:27,265][90463] Worker 3 uses CPU cores [6, 7]
32
+ [2023-03-03 15:23:27,265][90466] Worker 5 uses CPU cores [10, 11]
33
+ [2023-03-03 15:23:27,266][90460] Num visible devices: 1
34
+ [2023-03-03 15:23:27,301][90447] Starting seed is not provided
35
+ [2023-03-03 15:23:27,302][90447] Using GPUs [0] for process 0 (actually maps to GPUs [0])
36
+ [2023-03-03 15:23:27,302][90483] Worker 6 uses CPU cores [12, 13]
37
+ [2023-03-03 15:23:27,302][90447] Initializing actor-critic model on device cuda:0
38
+ [2023-03-03 15:23:27,302][90447] RunningMeanStd input shape: (3, 72, 128)
39
+ [2023-03-03 15:23:27,302][90447] RunningMeanStd input shape: (1,)
40
+ [2023-03-03 15:23:27,310][90447] ConvEncoder: input_channels=3
41
+ [2023-03-03 15:23:27,333][90464] Worker 2 uses CPU cores [4, 5]
42
+ [2023-03-03 15:23:27,400][90447] Conv encoder output size: 512
43
+ [2023-03-03 15:23:27,400][90447] Policy head output size: 512
44
+ [2023-03-03 15:23:27,403][90482] Worker 7 uses CPU cores [14, 15]
45
+ [2023-03-03 15:23:27,408][90447] Created Actor Critic model with architecture:
46
+ [2023-03-03 15:23:27,408][90447] ActorCriticSharedWeights(
47
+ (obs_normalizer): ObservationNormalizer(
48
+ (running_mean_std): RunningMeanStdDictInPlace(
49
+ (running_mean_std): ModuleDict(
50
+ (obs): RunningMeanStdInPlace()
51
+ )
52
+ )
53
+ )
54
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
55
+ (encoder): VizdoomEncoder(
56
+ (basic_encoder): ConvEncoder(
57
+ (enc): RecursiveScriptModule(
58
+ original_name=ConvEncoderImpl
59
+ (conv_head): RecursiveScriptModule(
60
+ original_name=Sequential
61
+ (0): RecursiveScriptModule(original_name=Conv2d)
62
+ (1): RecursiveScriptModule(original_name=ELU)
63
+ (2): RecursiveScriptModule(original_name=Conv2d)
64
+ (3): RecursiveScriptModule(original_name=ELU)
65
+ (4): RecursiveScriptModule(original_name=Conv2d)
66
+ (5): RecursiveScriptModule(original_name=ELU)
67
+ )
68
+ (mlp_layers): RecursiveScriptModule(
69
+ original_name=Sequential
70
+ (0): RecursiveScriptModule(original_name=Linear)
71
+ (1): RecursiveScriptModule(original_name=ELU)
72
+ )
73
+ )
74
+ )
75
+ )
76
+ (core): ModelCoreRNN(
77
+ (core): GRU(512, 512)
78
+ )
79
+ (decoder): MlpDecoder(
80
+ (mlp): Identity()
81
+ )
82
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
83
+ (action_parameterization): ActionParameterizationDefault(
84
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
85
+ )
86
+ )
87
+ [2023-03-03 15:23:27,416][90465] Worker 4 uses CPU cores [8, 9]
88
+ [2023-03-03 15:23:30,183][90447] Using optimizer <class 'torch.optim.adam.Adam'>
89
+ [2023-03-03 15:23:30,183][90447] No checkpoints found
90
+ [2023-03-03 15:23:30,184][90447] Did not load from checkpoint, starting from scratch!
91
+ [2023-03-03 15:23:30,184][90447] Initialized policy 0 weights for model version 0
92
+ [2023-03-03 15:23:30,186][90447] LearnerWorker_p0 finished initialization!
93
+ [2023-03-03 15:23:30,186][90447] Using GPUs [0] for process 0 (actually maps to GPUs [0])
94
+ [2023-03-03 15:23:30,219][90460] RunningMeanStd input shape: (3, 72, 128)
95
+ [2023-03-03 15:23:30,219][90460] RunningMeanStd input shape: (1,)
96
+ [2023-03-03 15:23:30,226][90460] ConvEncoder: input_channels=3
97
+ [2023-03-03 15:23:30,290][90460] Conv encoder output size: 512
98
+ [2023-03-03 15:23:30,290][90460] Policy head output size: 512
99
+ [2023-03-03 15:23:32,995][90258] Inference worker 0-0 is ready!
100
+ [2023-03-03 15:23:32,996][90258] All inference workers are ready! Signal rollout workers to start!
101
+ [2023-03-03 15:23:33,013][90464] Doom resolution: 160x120, resize resolution: (128, 72)
102
+ [2023-03-03 15:23:33,013][90462] Doom resolution: 160x120, resize resolution: (128, 72)
103
+ [2023-03-03 15:23:33,014][90483] Doom resolution: 160x120, resize resolution: (128, 72)
104
+ [2023-03-03 15:23:33,015][90465] Doom resolution: 160x120, resize resolution: (128, 72)
105
+ [2023-03-03 15:23:33,018][90482] Doom resolution: 160x120, resize resolution: (128, 72)
106
+ [2023-03-03 15:23:33,018][90463] Doom resolution: 160x120, resize resolution: (128, 72)
107
+ [2023-03-03 15:23:33,018][90466] Doom resolution: 160x120, resize resolution: (128, 72)
108
+ [2023-03-03 15:23:33,019][90461] Doom resolution: 160x120, resize resolution: (128, 72)
109
+ [2023-03-03 15:23:33,206][90463] Decorrelating experience for 0 frames...
110
+ [2023-03-03 15:23:33,239][90462] Decorrelating experience for 0 frames...
111
+ [2023-03-03 15:23:33,243][90465] Decorrelating experience for 0 frames...
112
+ [2023-03-03 15:23:33,275][90483] Decorrelating experience for 0 frames...
113
+ [2023-03-03 15:23:33,276][90464] Decorrelating experience for 0 frames...
114
+ [2023-03-03 15:23:33,395][90466] Decorrelating experience for 0 frames...
115
+ [2023-03-03 15:23:33,439][90462] Decorrelating experience for 32 frames...
116
+ [2023-03-03 15:23:33,442][90465] Decorrelating experience for 32 frames...
117
+ [2023-03-03 15:23:33,457][90483] Decorrelating experience for 32 frames...
118
+ [2023-03-03 15:23:33,464][90464] Decorrelating experience for 32 frames...
119
+ [2023-03-03 15:23:33,473][90463] Decorrelating experience for 32 frames...
120
+ [2023-03-03 15:23:33,514][90482] Decorrelating experience for 0 frames...
121
+ [2023-03-03 15:23:33,578][90466] Decorrelating experience for 32 frames...
122
+ [2023-03-03 15:23:33,634][90462] Decorrelating experience for 64 frames...
123
+ [2023-03-03 15:23:33,647][90483] Decorrelating experience for 64 frames...
124
+ [2023-03-03 15:23:33,678][90461] Decorrelating experience for 0 frames...
125
+ [2023-03-03 15:23:33,725][90482] Decorrelating experience for 32 frames...
126
+ [2023-03-03 15:23:33,763][90463] Decorrelating experience for 64 frames...
127
+ [2023-03-03 15:23:33,766][90466] Decorrelating experience for 64 frames...
128
+ [2023-03-03 15:23:33,805][90464] Decorrelating experience for 64 frames...
129
+ [2023-03-03 15:23:33,945][90463] Decorrelating experience for 96 frames...
130
+ [2023-03-03 15:23:33,956][90461] Decorrelating experience for 32 frames...
131
+ [2023-03-03 15:23:33,987][90483] Decorrelating experience for 96 frames...
132
+ [2023-03-03 15:23:34,000][90465] Decorrelating experience for 64 frames...
133
+ [2023-03-03 15:23:34,014][90466] Decorrelating experience for 96 frames...
134
+ [2023-03-03 15:23:34,042][90464] Decorrelating experience for 96 frames...
135
+ [2023-03-03 15:23:34,193][90482] Decorrelating experience for 64 frames...
136
+ [2023-03-03 15:23:34,215][90462] Decorrelating experience for 96 frames...
137
+ [2023-03-03 15:23:34,230][90465] Decorrelating experience for 96 frames...
138
+ [2023-03-03 15:23:34,267][90461] Decorrelating experience for 64 frames...
139
+ [2023-03-03 15:23:34,431][90447] Signal inference workers to stop experience collection...
140
+ [2023-03-03 15:23:34,433][90460] InferenceWorker_p0-w0: stopping experience collection
141
+ [2023-03-03 15:23:34,471][90482] Decorrelating experience for 96 frames...
142
+ [2023-03-03 15:23:34,478][90461] Decorrelating experience for 96 frames...
143
+ [2023-03-03 15:23:34,676][90447] Signal inference workers to resume experience collection...
144
+ [2023-03-03 15:23:34,676][90460] InferenceWorker_p0-w0: resuming experience collection
145
+ [2023-03-03 15:23:34,989][90258] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4096. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
146
+ [2023-03-03 15:23:34,990][90258] Avg episode reward: [(0, '3.176')]
147
+ [2023-03-03 15:23:36,121][90460] Updated weights for policy 0, policy_version 10 (0.0193)
148
+ [2023-03-03 15:23:37,294][90460] Updated weights for policy 0, policy_version 20 (0.0006)
149
+ [2023-03-03 15:23:38,492][90460] Updated weights for policy 0, policy_version 30 (0.0006)
150
+ [2023-03-03 15:23:39,622][90460] Updated weights for policy 0, policy_version 40 (0.0006)
151
+ [2023-03-03 15:23:39,989][90258] Fps is (10 sec: 34406.6, 60 sec: 34406.6, 300 sec: 34406.6). Total num frames: 176128. Throughput: 0: 7185.6. Samples: 35928. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
152
+ [2023-03-03 15:23:39,990][90258] Avg episode reward: [(0, '4.483')]
153
+ [2023-03-03 15:23:39,990][90447] Saving new best policy, reward=4.483!
154
+ [2023-03-03 15:23:40,811][90460] Updated weights for policy 0, policy_version 50 (0.0005)
155
+ [2023-03-03 15:23:42,034][90460] Updated weights for policy 0, policy_version 60 (0.0006)
156
+ [2023-03-03 15:23:43,222][90460] Updated weights for policy 0, policy_version 70 (0.0006)
157
+ [2023-03-03 15:23:44,405][90460] Updated weights for policy 0, policy_version 80 (0.0005)
158
+ [2023-03-03 15:23:44,989][90258] Fps is (10 sec: 34406.4, 60 sec: 34406.4, 300 sec: 34406.4). Total num frames: 348160. Throughput: 0: 6194.2. Samples: 61942. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
159
+ [2023-03-03 15:23:44,990][90258] Avg episode reward: [(0, '4.403')]
160
+ [2023-03-03 15:23:45,560][90460] Updated weights for policy 0, policy_version 90 (0.0006)
161
+ [2023-03-03 15:23:46,182][90258] Heartbeat connected on Batcher_0
162
+ [2023-03-03 15:23:46,184][90258] Heartbeat connected on LearnerWorker_p0
163
+ [2023-03-03 15:23:46,188][90258] Heartbeat connected on InferenceWorker_p0-w0
164
+ [2023-03-03 15:23:46,191][90258] Heartbeat connected on RolloutWorker_w0
165
+ [2023-03-03 15:23:46,192][90258] Heartbeat connected on RolloutWorker_w1
166
+ [2023-03-03 15:23:46,195][90258] Heartbeat connected on RolloutWorker_w2
167
+ [2023-03-03 15:23:46,196][90258] Heartbeat connected on RolloutWorker_w3
168
+ [2023-03-03 15:23:46,197][90258] Heartbeat connected on RolloutWorker_w4
169
+ [2023-03-03 15:23:46,199][90258] Heartbeat connected on RolloutWorker_w5
170
+ [2023-03-03 15:23:46,201][90258] Heartbeat connected on RolloutWorker_w6
171
+ [2023-03-03 15:23:46,203][90258] Heartbeat connected on RolloutWorker_w7
172
+ [2023-03-03 15:23:46,790][90460] Updated weights for policy 0, policy_version 100 (0.0006)
173
+ [2023-03-03 15:23:47,979][90460] Updated weights for policy 0, policy_version 110 (0.0006)
174
+ [2023-03-03 15:23:49,185][90460] Updated weights for policy 0, policy_version 120 (0.0006)
175
+ [2023-03-03 15:23:49,989][90258] Fps is (10 sec: 33996.5, 60 sec: 34133.2, 300 sec: 34133.2). Total num frames: 516096. Throughput: 0: 7577.6. Samples: 113664. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
176
+ [2023-03-03 15:23:49,990][90258] Avg episode reward: [(0, '4.594')]
177
+ [2023-03-03 15:23:49,990][90447] Saving new best policy, reward=4.594!
178
+ [2023-03-03 15:23:50,416][90460] Updated weights for policy 0, policy_version 130 (0.0006)
179
+ [2023-03-03 15:23:51,625][90460] Updated weights for policy 0, policy_version 140 (0.0006)
180
+ [2023-03-03 15:23:52,888][90460] Updated weights for policy 0, policy_version 150 (0.0006)
181
+ [2023-03-03 15:23:54,175][90460] Updated weights for policy 0, policy_version 160 (0.0006)
182
+ [2023-03-03 15:23:54,989][90258] Fps is (10 sec: 33177.3, 60 sec: 33791.8, 300 sec: 33791.8). Total num frames: 679936. Throughput: 0: 8178.2. Samples: 163564. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
183
+ [2023-03-03 15:23:54,990][90258] Avg episode reward: [(0, '4.407')]
184
+ [2023-03-03 15:23:55,439][90460] Updated weights for policy 0, policy_version 170 (0.0006)
185
+ [2023-03-03 15:23:56,723][90460] Updated weights for policy 0, policy_version 180 (0.0006)
186
+ [2023-03-03 15:23:57,871][90460] Updated weights for policy 0, policy_version 190 (0.0006)
187
+ [2023-03-03 15:23:59,063][90460] Updated weights for policy 0, policy_version 200 (0.0006)
188
+ [2023-03-03 15:23:59,989][90258] Fps is (10 sec: 33587.5, 60 sec: 33914.9, 300 sec: 33914.9). Total num frames: 851968. Throughput: 0: 7503.7. Samples: 187592. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
189
+ [2023-03-03 15:23:59,990][90258] Avg episode reward: [(0, '4.461')]
190
+ [2023-03-03 15:24:00,229][90460] Updated weights for policy 0, policy_version 210 (0.0006)
191
+ [2023-03-03 15:24:01,402][90460] Updated weights for policy 0, policy_version 220 (0.0006)
192
+ [2023-03-03 15:24:02,561][90460] Updated weights for policy 0, policy_version 230 (0.0005)
193
+ [2023-03-03 15:24:03,727][90460] Updated weights for policy 0, policy_version 240 (0.0005)
194
+ [2023-03-03 15:24:04,901][90460] Updated weights for policy 0, policy_version 250 (0.0005)
195
+ [2023-03-03 15:24:04,989][90258] Fps is (10 sec: 34406.4, 60 sec: 33996.7, 300 sec: 33996.7). Total num frames: 1024000. Throughput: 0: 8010.4. Samples: 240312. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
196
+ [2023-03-03 15:24:04,990][90258] Avg episode reward: [(0, '4.572')]
197
+ [2023-03-03 15:24:06,091][90460] Updated weights for policy 0, policy_version 260 (0.0005)
198
+ [2023-03-03 15:24:07,335][90460] Updated weights for policy 0, policy_version 270 (0.0006)
199
+ [2023-03-03 15:24:08,625][90460] Updated weights for policy 0, policy_version 280 (0.0006)
200
+ [2023-03-03 15:24:09,862][90460] Updated weights for policy 0, policy_version 290 (0.0006)
201
+ [2023-03-03 15:24:09,989][90258] Fps is (10 sec: 33587.1, 60 sec: 33821.3, 300 sec: 33821.3). Total num frames: 1187840. Throughput: 0: 8302.5. Samples: 290586. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
202
+ [2023-03-03 15:24:09,989][90258] Avg episode reward: [(0, '4.823')]
203
+ [2023-03-03 15:24:09,990][90447] Saving new best policy, reward=4.823!
204
+ [2023-03-03 15:24:11,103][90460] Updated weights for policy 0, policy_version 300 (0.0006)
205
+ [2023-03-03 15:24:12,385][90460] Updated weights for policy 0, policy_version 310 (0.0006)
206
+ [2023-03-03 15:24:13,708][90460] Updated weights for policy 0, policy_version 320 (0.0007)
207
+ [2023-03-03 15:24:14,989][90258] Fps is (10 sec: 32358.4, 60 sec: 33587.1, 300 sec: 33587.1). Total num frames: 1347584. Throughput: 0: 7887.0. Samples: 315482. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
208
+ [2023-03-03 15:24:14,990][90258] Avg episode reward: [(0, '5.670')]
209
+ [2023-03-03 15:24:15,017][90447] Saving new best policy, reward=5.670!
210
+ [2023-03-03 15:24:15,017][90460] Updated weights for policy 0, policy_version 330 (0.0006)
211
+ [2023-03-03 15:24:16,218][90460] Updated weights for policy 0, policy_version 340 (0.0006)
212
+ [2023-03-03 15:24:17,427][90460] Updated weights for policy 0, policy_version 350 (0.0006)
213
+ [2023-03-03 15:24:18,611][90460] Updated weights for policy 0, policy_version 360 (0.0006)
214
+ [2023-03-03 15:24:19,810][90460] Updated weights for policy 0, policy_version 370 (0.0006)
215
+ [2023-03-03 15:24:19,989][90258] Fps is (10 sec: 33177.5, 60 sec: 33678.2, 300 sec: 33678.2). Total num frames: 1519616. Throughput: 0: 8092.0. Samples: 364138. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
216
+ [2023-03-03 15:24:19,990][90258] Avg episode reward: [(0, '6.905')]
217
+ [2023-03-03 15:24:19,990][90447] Saving new best policy, reward=6.905!
218
+ [2023-03-03 15:24:21,119][90460] Updated weights for policy 0, policy_version 380 (0.0007)
219
+ [2023-03-03 15:24:22,347][90460] Updated weights for policy 0, policy_version 390 (0.0006)
220
+ [2023-03-03 15:24:23,571][90460] Updated weights for policy 0, policy_version 400 (0.0006)
221
+ [2023-03-03 15:24:24,782][90460] Updated weights for policy 0, policy_version 410 (0.0006)
222
+ [2023-03-03 15:24:24,989][90258] Fps is (10 sec: 33587.4, 60 sec: 33587.2, 300 sec: 33587.2). Total num frames: 1683456. Throughput: 0: 8403.9. Samples: 414104. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
223
+ [2023-03-03 15:24:24,990][90258] Avg episode reward: [(0, '7.918')]
224
+ [2023-03-03 15:24:24,993][90447] Saving new best policy, reward=7.918!
225
+ [2023-03-03 15:24:26,008][90460] Updated weights for policy 0, policy_version 420 (0.0006)
226
+ [2023-03-03 15:24:27,167][90460] Updated weights for policy 0, policy_version 430 (0.0006)
227
+ [2023-03-03 15:24:28,337][90460] Updated weights for policy 0, policy_version 440 (0.0006)
228
+ [2023-03-03 15:24:29,516][90460] Updated weights for policy 0, policy_version 450 (0.0006)
229
+ [2023-03-03 15:24:29,990][90258] Fps is (10 sec: 33992.3, 60 sec: 33735.3, 300 sec: 33735.3). Total num frames: 1859584. Throughput: 0: 8396.3. Samples: 439788. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
230
+ [2023-03-03 15:24:29,991][90258] Avg episode reward: [(0, '10.319')]
231
+ [2023-03-03 15:24:29,992][90447] Saving new best policy, reward=10.319!
232
+ [2023-03-03 15:24:30,685][90460] Updated weights for policy 0, policy_version 460 (0.0006)
233
+ [2023-03-03 15:24:31,866][90460] Updated weights for policy 0, policy_version 470 (0.0006)
234
+ [2023-03-03 15:24:33,058][90460] Updated weights for policy 0, policy_version 480 (0.0006)
235
+ [2023-03-03 15:24:34,278][90460] Updated weights for policy 0, policy_version 490 (0.0006)
236
+ [2023-03-03 15:24:34,989][90258] Fps is (10 sec: 34406.6, 60 sec: 33723.7, 300 sec: 33723.7). Total num frames: 2027520. Throughput: 0: 8403.7. Samples: 491828. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
237
+ [2023-03-03 15:24:34,989][90258] Avg episode reward: [(0, '11.640')]
238
+ [2023-03-03 15:24:35,007][90447] Saving new best policy, reward=11.640!
239
+ [2023-03-03 15:24:35,499][90460] Updated weights for policy 0, policy_version 500 (0.0006)
240
+ [2023-03-03 15:24:36,700][90460] Updated weights for policy 0, policy_version 510 (0.0006)
241
+ [2023-03-03 15:24:37,933][90460] Updated weights for policy 0, policy_version 520 (0.0006)
242
+ [2023-03-03 15:24:39,218][90460] Updated weights for policy 0, policy_version 530 (0.0006)
243
+ [2023-03-03 15:24:39,989][90258] Fps is (10 sec: 33591.6, 60 sec: 33655.4, 300 sec: 33713.2). Total num frames: 2195456. Throughput: 0: 8403.8. Samples: 541734. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
244
+ [2023-03-03 15:24:39,990][90258] Avg episode reward: [(0, '15.053')]
245
+ [2023-03-03 15:24:39,991][90447] Saving new best policy, reward=15.053!
246
+ [2023-03-03 15:24:40,436][90460] Updated weights for policy 0, policy_version 540 (0.0006)
247
+ [2023-03-03 15:24:41,673][90460] Updated weights for policy 0, policy_version 550 (0.0006)
248
+ [2023-03-03 15:24:42,888][90460] Updated weights for policy 0, policy_version 560 (0.0006)
249
+ [2023-03-03 15:24:44,131][90460] Updated weights for policy 0, policy_version 570 (0.0006)
250
+ [2023-03-03 15:24:44,989][90258] Fps is (10 sec: 33587.1, 60 sec: 33587.2, 300 sec: 33704.2). Total num frames: 2363392. Throughput: 0: 8422.9. Samples: 566624. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
251
+ [2023-03-03 15:24:44,990][90258] Avg episode reward: [(0, '17.175')]
252
+ [2023-03-03 15:24:44,993][90447] Saving new best policy, reward=17.175!
253
+ [2023-03-03 15:24:45,312][90460] Updated weights for policy 0, policy_version 580 (0.0006)
254
+ [2023-03-03 15:24:46,519][90460] Updated weights for policy 0, policy_version 590 (0.0006)
255
+ [2023-03-03 15:24:47,733][90460] Updated weights for policy 0, policy_version 600 (0.0006)
256
+ [2023-03-03 15:24:48,917][90460] Updated weights for policy 0, policy_version 610 (0.0006)
257
+ [2023-03-03 15:24:49,989][90258] Fps is (10 sec: 33587.0, 60 sec: 33587.2, 300 sec: 33696.4). Total num frames: 2531328. Throughput: 0: 8379.3. Samples: 617380. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
258
+ [2023-03-03 15:24:49,990][90258] Avg episode reward: [(0, '20.485')]
259
+ [2023-03-03 15:24:49,991][90447] Saving new best policy, reward=20.485!
260
+ [2023-03-03 15:24:50,119][90460] Updated weights for policy 0, policy_version 620 (0.0006)
261
+ [2023-03-03 15:24:51,284][90460] Updated weights for policy 0, policy_version 630 (0.0006)
262
+ [2023-03-03 15:24:52,492][90460] Updated weights for policy 0, policy_version 640 (0.0006)
263
+ [2023-03-03 15:24:53,765][90460] Updated weights for policy 0, policy_version 650 (0.0006)
264
+ [2023-03-03 15:24:54,968][90460] Updated weights for policy 0, policy_version 660 (0.0006)
265
+ [2023-03-03 15:24:54,989][90258] Fps is (10 sec: 33996.8, 60 sec: 33723.8, 300 sec: 33740.8). Total num frames: 2703360. Throughput: 0: 8393.2. Samples: 668282. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
266
+ [2023-03-03 15:24:54,990][90258] Avg episode reward: [(0, '20.297')]
267
+ [2023-03-03 15:24:56,181][90460] Updated weights for policy 0, policy_version 670 (0.0006)
268
+ [2023-03-03 15:24:57,477][90460] Updated weights for policy 0, policy_version 680 (0.0006)
269
+ [2023-03-03 15:24:58,661][90460] Updated weights for policy 0, policy_version 690 (0.0006)
270
+ [2023-03-03 15:24:59,900][90460] Updated weights for policy 0, policy_version 700 (0.0006)
271
+ [2023-03-03 15:24:59,989][90258] Fps is (10 sec: 33587.3, 60 sec: 33587.1, 300 sec: 33683.5). Total num frames: 2867200. Throughput: 0: 8394.3. Samples: 693224. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
272
+ [2023-03-03 15:24:59,990][90258] Avg episode reward: [(0, '19.129')]
273
+ [2023-03-03 15:25:01,130][90460] Updated weights for policy 0, policy_version 710 (0.0006)
274
+ [2023-03-03 15:25:02,368][90460] Updated weights for policy 0, policy_version 720 (0.0006)
275
+ [2023-03-03 15:25:03,551][90460] Updated weights for policy 0, policy_version 730 (0.0006)
276
+ [2023-03-03 15:25:04,822][90460] Updated weights for policy 0, policy_version 740 (0.0006)
277
+ [2023-03-03 15:25:04,989][90258] Fps is (10 sec: 33177.7, 60 sec: 33519.0, 300 sec: 33678.2). Total num frames: 3035136. Throughput: 0: 8427.6. Samples: 743382. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
278
+ [2023-03-03 15:25:04,990][90258] Avg episode reward: [(0, '22.414')]
279
+ [2023-03-03 15:25:04,993][90447] Saving new best policy, reward=22.414!
280
+ [2023-03-03 15:25:06,097][90460] Updated weights for policy 0, policy_version 750 (0.0006)
281
+ [2023-03-03 15:25:07,366][90460] Updated weights for policy 0, policy_version 760 (0.0006)
282
+ [2023-03-03 15:25:08,606][90460] Updated weights for policy 0, policy_version 770 (0.0007)
283
+ [2023-03-03 15:25:09,895][90460] Updated weights for policy 0, policy_version 780 (0.0006)
284
+ [2023-03-03 15:25:09,989][90258] Fps is (10 sec: 32768.2, 60 sec: 33450.7, 300 sec: 33587.2). Total num frames: 3194880. Throughput: 0: 8400.0. Samples: 792104. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
285
+ [2023-03-03 15:25:09,990][90258] Avg episode reward: [(0, '20.125')]
286
+ [2023-03-03 15:25:11,149][90460] Updated weights for policy 0, policy_version 790 (0.0006)
287
+ [2023-03-03 15:25:12,405][90460] Updated weights for policy 0, policy_version 800 (0.0006)
288
+ [2023-03-03 15:25:13,689][90460] Updated weights for policy 0, policy_version 810 (0.0006)
289
+ [2023-03-03 15:25:14,965][90460] Updated weights for policy 0, policy_version 820 (0.0006)
290
+ [2023-03-03 15:25:14,989][90258] Fps is (10 sec: 32358.4, 60 sec: 33519.0, 300 sec: 33546.2). Total num frames: 3358720. Throughput: 0: 8377.4. Samples: 816758. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
291
+ [2023-03-03 15:25:14,990][90258] Avg episode reward: [(0, '22.716')]
292
+ [2023-03-03 15:25:14,993][90447] Saving new best policy, reward=22.716!
293
+ [2023-03-03 15:25:16,264][90460] Updated weights for policy 0, policy_version 830 (0.0006)
294
+ [2023-03-03 15:25:17,460][90460] Updated weights for policy 0, policy_version 840 (0.0006)
295
+ [2023-03-03 15:25:18,669][90460] Updated weights for policy 0, policy_version 850 (0.0006)
296
+ [2023-03-03 15:25:19,904][90460] Updated weights for policy 0, policy_version 860 (0.0006)
297
+ [2023-03-03 15:25:19,989][90258] Fps is (10 sec: 32767.8, 60 sec: 33382.4, 300 sec: 33509.2). Total num frames: 3522560. Throughput: 0: 8305.5. Samples: 865578. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
298
+ [2023-03-03 15:25:19,990][90258] Avg episode reward: [(0, '24.941')]
299
+ [2023-03-03 15:25:19,991][90447] Saving new best policy, reward=24.941!
300
+ [2023-03-03 15:25:21,126][90460] Updated weights for policy 0, policy_version 870 (0.0006)
301
+ [2023-03-03 15:25:22,349][90460] Updated weights for policy 0, policy_version 880 (0.0006)
302
+ [2023-03-03 15:25:23,577][90460] Updated weights for policy 0, policy_version 890 (0.0006)
303
+ [2023-03-03 15:25:24,845][90460] Updated weights for policy 0, policy_version 900 (0.0006)
304
+ [2023-03-03 15:25:24,989][90258] Fps is (10 sec: 33177.5, 60 sec: 33450.7, 300 sec: 33512.7). Total num frames: 3690496. Throughput: 0: 8308.0. Samples: 915594. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
305
+ [2023-03-03 15:25:24,990][90258] Avg episode reward: [(0, '23.809')]
306
+ [2023-03-03 15:25:24,993][90447] Saving /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000901_3690496.pth...
307
+ [2023-03-03 15:25:26,107][90460] Updated weights for policy 0, policy_version 910 (0.0007)
308
+ [2023-03-03 15:25:27,363][90460] Updated weights for policy 0, policy_version 920 (0.0006)
309
+ [2023-03-03 15:25:28,556][90460] Updated weights for policy 0, policy_version 930 (0.0006)
310
+ [2023-03-03 15:25:29,810][90460] Updated weights for policy 0, policy_version 940 (0.0006)
311
+ [2023-03-03 15:25:29,989][90258] Fps is (10 sec: 33177.7, 60 sec: 33246.6, 300 sec: 33480.3). Total num frames: 3854336. Throughput: 0: 8300.2. Samples: 940134. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
312
+ [2023-03-03 15:25:29,990][90258] Avg episode reward: [(0, '24.056')]
313
+ [2023-03-03 15:25:31,074][90460] Updated weights for policy 0, policy_version 950 (0.0006)
314
+ [2023-03-03 15:25:32,337][90460] Updated weights for policy 0, policy_version 960 (0.0006)
315
+ [2023-03-03 15:25:33,597][90460] Updated weights for policy 0, policy_version 970 (0.0006)
316
+ [2023-03-03 15:25:34,647][90447] Stopping Batcher_0...
317
+ [2023-03-03 15:25:34,648][90447] Loop batcher_evt_loop terminating...
318
+ [2023-03-03 15:25:34,648][90447] Saving /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
319
+ [2023-03-03 15:25:34,647][90258] Component Batcher_0 stopped!
320
+ [2023-03-03 15:25:34,655][90465] Stopping RolloutWorker_w4...
321
+ [2023-03-03 15:25:34,656][90462] Stopping RolloutWorker_w0...
322
+ [2023-03-03 15:25:34,656][90465] Loop rollout_proc4_evt_loop terminating...
323
+ [2023-03-03 15:25:34,656][90462] Loop rollout_proc0_evt_loop terminating...
324
+ [2023-03-03 15:25:34,656][90464] Stopping RolloutWorker_w2...
325
+ [2023-03-03 15:25:34,655][90258] Component RolloutWorker_w4 stopped!
326
+ [2023-03-03 15:25:34,656][90463] Stopping RolloutWorker_w3...
327
+ [2023-03-03 15:25:34,656][90463] Loop rollout_proc3_evt_loop terminating...
328
+ [2023-03-03 15:25:34,656][90464] Loop rollout_proc2_evt_loop terminating...
329
+ [2023-03-03 15:25:34,656][90460] Weights refcount: 2 0
330
+ [2023-03-03 15:25:34,656][90258] Component RolloutWorker_w0 stopped!
331
+ [2023-03-03 15:25:34,657][90461] Stopping RolloutWorker_w1...
332
+ [2023-03-03 15:25:34,657][90461] Loop rollout_proc1_evt_loop terminating...
333
+ [2023-03-03 15:25:34,657][90258] Component RolloutWorker_w2 stopped!
334
+ [2023-03-03 15:25:34,658][90258] Component RolloutWorker_w3 stopped!
335
+ [2023-03-03 15:25:34,658][90482] Stopping RolloutWorker_w7...
336
+ [2023-03-03 15:25:34,658][90258] Component RolloutWorker_w1 stopped!
337
+ [2023-03-03 15:25:34,659][90482] Loop rollout_proc7_evt_loop terminating...
338
+ [2023-03-03 15:25:34,659][90466] Stopping RolloutWorker_w5...
339
+ [2023-03-03 15:25:34,659][90466] Loop rollout_proc5_evt_loop terminating...
340
+ [2023-03-03 15:25:34,659][90258] Component RolloutWorker_w7 stopped!
341
+ [2023-03-03 15:25:34,659][90258] Component RolloutWorker_w5 stopped!
342
+ [2023-03-03 15:25:34,660][90460] Stopping InferenceWorker_p0-w0...
343
+ [2023-03-03 15:25:34,660][90460] Loop inference_proc0-0_evt_loop terminating...
344
+ [2023-03-03 15:25:34,660][90258] Component InferenceWorker_p0-w0 stopped!
345
+ [2023-03-03 15:25:34,676][90483] Stopping RolloutWorker_w6...
346
+ [2023-03-03 15:25:34,677][90483] Loop rollout_proc6_evt_loop terminating...
347
+ [2023-03-03 15:25:34,677][90258] Component RolloutWorker_w6 stopped!
348
+ [2023-03-03 15:25:34,714][90447] Saving new best policy, reward=25.653!
349
+ [2023-03-03 15:25:34,792][90447] Saving /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
350
+ [2023-03-03 15:25:34,874][90447] Stopping LearnerWorker_p0...
351
+ [2023-03-03 15:25:34,874][90447] Loop learner_proc0_evt_loop terminating...
352
+ [2023-03-03 15:25:34,874][90258] Component LearnerWorker_p0 stopped!
353
+ [2023-03-03 15:25:34,875][90258] Waiting for process learner_proc0 to stop...
354
+ [2023-03-03 15:25:35,212][90258] Waiting for process inference_proc0-0 to join...
355
+ [2023-03-03 15:25:35,212][90258] Waiting for process rollout_proc0 to join...
356
+ [2023-03-03 15:25:35,213][90258] Waiting for process rollout_proc1 to join...
357
+ [2023-03-03 15:25:35,213][90258] Waiting for process rollout_proc2 to join...
358
+ [2023-03-03 15:25:35,214][90258] Waiting for process rollout_proc3 to join...
359
+ [2023-03-03 15:25:35,214][90258] Waiting for process rollout_proc4 to join...
360
+ [2023-03-03 15:25:35,215][90258] Waiting for process rollout_proc5 to join...
361
+ [2023-03-03 15:25:35,215][90258] Waiting for process rollout_proc6 to join...
362
+ [2023-03-03 15:25:35,216][90258] Waiting for process rollout_proc7 to join...
363
+ [2023-03-03 15:25:35,216][90258] Batcher 0 profile tree view:
364
+ batching: 8.3033, releasing_batches: 0.0178
365
+ [2023-03-03 15:25:35,217][90258] InferenceWorker_p0-w0 profile tree view:
366
+ wait_policy: 0.0000
367
+ wait_policy_total: 2.5839
368
+ update_model: 1.6943
369
+ weight_update: 0.0006
370
+ one_step: 0.0017
371
+ handle_policy_step: 110.3776
372
+ deserialize: 4.9021, stack: 0.5500, obs_to_device_normalize: 30.0009, forward: 42.0827, send_messages: 7.4467
373
+ prepare_outputs: 20.7199
374
+ to_cpu: 15.0518
375
+ [2023-03-03 15:25:35,217][90258] Learner 0 profile tree view:
376
+ misc: 0.0042, prepare_batch: 7.6612
377
+ train: 21.5018
378
+ epoch_init: 0.0041, minibatch_init: 0.0047, losses_postprocess: 0.2515, kl_divergence: 0.1742, after_optimizer: 8.2064
379
+ calculate_losses: 8.3142
380
+ losses_init: 0.0024, forward_head: 0.5648, bptt_initial: 6.0024, tail: 0.3384, advantages_returns: 0.1002, losses: 0.6245
381
+ bptt: 0.5717
382
+ bptt_forward_core: 0.5459
383
+ update: 4.2840
384
+ clip: 0.6168
385
+ [2023-03-03 15:25:35,217][90258] RolloutWorker_w0 profile tree view:
386
+ wait_for_trajectories: 0.0913, enqueue_policy_requests: 4.6418, env_step: 61.2028, overhead: 5.1253, complete_rollouts: 0.1457
387
+ save_policy_outputs: 5.3083
388
+ split_output_tensors: 2.6432
389
+ [2023-03-03 15:25:35,217][90258] RolloutWorker_w7 profile tree view:
390
+ wait_for_trajectories: 0.0903, enqueue_policy_requests: 4.6051, env_step: 63.7948, overhead: 5.1927, complete_rollouts: 0.1508
391
+ save_policy_outputs: 5.3090
392
+ split_output_tensors: 2.6476
393
+ [2023-03-03 15:25:35,218][90258] Loop Runner_EvtLoop terminating...
394
+ [2023-03-03 15:25:35,218][90258] Runner profile tree view:
395
+ main_loop: 129.0154
396
+ [2023-03-03 15:25:35,218][90258] Collected {0: 4005888}, FPS: 31049.7
397
+ [2023-03-03 15:25:41,834][90258] Loading existing experiment configuration from /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/config.json
398
+ [2023-03-03 15:25:41,834][90258] Overriding arg 'num_workers' with value 1 passed from command line
399
+ [2023-03-03 15:25:41,835][90258] Adding new argument 'no_render'=True that is not in the saved config file!
400
+ [2023-03-03 15:25:41,835][90258] Adding new argument 'save_video'=True that is not in the saved config file!
401
+ [2023-03-03 15:25:41,835][90258] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
402
+ [2023-03-03 15:25:41,836][90258] Adding new argument 'video_name'=None that is not in the saved config file!
403
+ [2023-03-03 15:25:41,836][90258] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
404
+ [2023-03-03 15:25:41,836][90258] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
405
+ [2023-03-03 15:25:41,837][90258] Adding new argument 'push_to_hub'=False that is not in the saved config file!
406
+ [2023-03-03 15:25:41,837][90258] Adding new argument 'hf_repository'=None that is not in the saved config file!
407
+ [2023-03-03 15:25:41,837][90258] Adding new argument 'policy_index'=0 that is not in the saved config file!
408
+ [2023-03-03 15:25:41,838][90258] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
409
+ [2023-03-03 15:25:41,838][90258] Adding new argument 'train_script'=None that is not in the saved config file!
410
+ [2023-03-03 15:25:41,838][90258] Adding new argument 'enjoy_script'=None that is not in the saved config file!
411
+ [2023-03-03 15:25:41,838][90258] Using frameskip 1 and render_action_repeat=4 for evaluation
412
+ [2023-03-03 15:25:41,848][90258] Doom resolution: 160x120, resize resolution: (128, 72)
413
+ [2023-03-03 15:25:41,849][90258] RunningMeanStd input shape: (3, 72, 128)
414
+ [2023-03-03 15:25:41,850][90258] RunningMeanStd input shape: (1,)
415
+ [2023-03-03 15:25:41,858][90258] ConvEncoder: input_channels=3
416
+ [2023-03-03 15:25:41,931][90258] Conv encoder output size: 512
417
+ [2023-03-03 15:25:41,932][90258] Policy head output size: 512
418
+ [2023-03-03 15:25:44,781][90258] Loading state from checkpoint /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
419
+ [2023-03-03 15:25:45,080][90258] Num frames 100...
420
+ [2023-03-03 15:25:45,129][90258] Num frames 200...
421
+ [2023-03-03 15:25:45,181][90258] Num frames 300...
422
+ [2023-03-03 15:25:45,247][90258] Num frames 400...
423
+ [2023-03-03 15:25:45,300][90258] Num frames 500...
424
+ [2023-03-03 15:25:45,351][90258] Num frames 600...
425
+ [2023-03-03 15:25:45,401][90258] Num frames 700...
426
+ [2023-03-03 15:25:45,452][90258] Num frames 800...
427
+ [2023-03-03 15:25:45,504][90258] Num frames 900...
428
+ [2023-03-03 15:25:45,554][90258] Num frames 1000...
429
+ [2023-03-03 15:25:45,603][90258] Num frames 1100...
430
+ [2023-03-03 15:25:45,654][90258] Num frames 1200...
431
+ [2023-03-03 15:25:45,704][90258] Num frames 1300...
432
+ [2023-03-03 15:25:45,756][90258] Num frames 1400...
433
+ [2023-03-03 15:25:45,830][90258] Avg episode rewards: #0: 36.420, true rewards: #0: 14.420
434
+ [2023-03-03 15:25:45,831][90258] Avg episode reward: 36.420, avg true_objective: 14.420
435
+ [2023-03-03 15:25:45,865][90258] Num frames 1500...
436
+ [2023-03-03 15:25:45,921][90258] Num frames 1600...
437
+ [2023-03-03 15:25:45,974][90258] Num frames 1700...
438
+ [2023-03-03 15:25:46,027][90258] Num frames 1800...
439
+ [2023-03-03 15:25:46,079][90258] Num frames 1900...
440
+ [2023-03-03 15:25:46,134][90258] Avg episode rewards: #0: 23.025, true rewards: #0: 9.525
441
+ [2023-03-03 15:25:46,135][90258] Avg episode reward: 23.025, avg true_objective: 9.525
442
+ [2023-03-03 15:25:46,184][90258] Num frames 2000...
443
+ [2023-03-03 15:25:46,235][90258] Num frames 2100...
444
+ [2023-03-03 15:25:46,285][90258] Num frames 2200...
445
+ [2023-03-03 15:25:46,334][90258] Num frames 2300...
446
+ [2023-03-03 15:25:46,414][90258] Avg episode rewards: #0: 18.177, true rewards: #0: 7.843
447
+ [2023-03-03 15:25:46,416][90258] Avg episode reward: 18.177, avg true_objective: 7.843
448
+ [2023-03-03 15:25:46,458][90258] Num frames 2400...
449
+ [2023-03-03 15:25:46,508][90258] Num frames 2500...
450
+ [2023-03-03 15:25:46,558][90258] Num frames 2600...
451
+ [2023-03-03 15:25:46,608][90258] Num frames 2700...
452
+ [2023-03-03 15:25:46,658][90258] Num frames 2800...
453
+ [2023-03-03 15:25:46,709][90258] Num frames 2900...
454
+ [2023-03-03 15:25:46,776][90258] Avg episode rewards: #0: 16.073, true rewards: #0: 7.322
455
+ [2023-03-03 15:25:46,778][90258] Avg episode reward: 16.073, avg true_objective: 7.322
456
+ [2023-03-03 15:25:46,835][90258] Num frames 3000...
457
+ [2023-03-03 15:25:46,898][90258] Num frames 3100...
458
+ [2023-03-03 15:25:46,961][90258] Num frames 3200...
459
+ [2023-03-03 15:25:47,033][90258] Num frames 3300...
460
+ [2023-03-03 15:25:47,088][90258] Num frames 3400...
461
+ [2023-03-03 15:25:47,140][90258] Num frames 3500...
462
+ [2023-03-03 15:25:47,195][90258] Num frames 3600...
463
+ [2023-03-03 15:25:47,247][90258] Num frames 3700...
464
+ [2023-03-03 15:25:47,298][90258] Num frames 3800...
465
+ [2023-03-03 15:25:47,350][90258] Num frames 3900...
466
+ [2023-03-03 15:25:47,401][90258] Num frames 4000...
467
+ [2023-03-03 15:25:47,450][90258] Num frames 4100...
468
+ [2023-03-03 15:25:47,500][90258] Num frames 4200...
469
+ [2023-03-03 15:25:47,589][90258] Avg episode rewards: #0: 20.546, true rewards: #0: 8.546
470
+ [2023-03-03 15:25:47,591][90258] Avg episode reward: 20.546, avg true_objective: 8.546
471
+ [2023-03-03 15:25:47,622][90258] Num frames 4300...
472
+ [2023-03-03 15:25:47,672][90258] Num frames 4400...
473
+ [2023-03-03 15:25:47,722][90258] Num frames 4500...
474
+ [2023-03-03 15:25:47,772][90258] Num frames 4600...
475
+ [2023-03-03 15:25:47,829][90258] Num frames 4700...
476
+ [2023-03-03 15:25:47,887][90258] Num frames 4800...
477
+ [2023-03-03 15:25:47,985][90258] Avg episode rewards: #0: 19.128, true rewards: #0: 8.128
478
+ [2023-03-03 15:25:47,986][90258] Avg episode reward: 19.128, avg true_objective: 8.128
479
+ [2023-03-03 15:25:48,001][90258] Num frames 4900...
480
+ [2023-03-03 15:25:48,054][90258] Num frames 5000...
481
+ [2023-03-03 15:25:48,103][90258] Num frames 5100...
482
+ [2023-03-03 15:25:48,154][90258] Num frames 5200...
483
+ [2023-03-03 15:25:48,207][90258] Num frames 5300...
484
+ [2023-03-03 15:25:48,261][90258] Num frames 5400...
485
+ [2023-03-03 15:25:48,338][90258] Avg episode rewards: #0: 17.926, true rewards: #0: 7.783
486
+ [2023-03-03 15:25:48,340][90258] Avg episode reward: 17.926, avg true_objective: 7.783
487
+ [2023-03-03 15:25:48,387][90258] Num frames 5500...
488
+ [2023-03-03 15:25:48,442][90258] Num frames 5600...
489
+ [2023-03-03 15:25:48,492][90258] Num frames 5700...
490
+ [2023-03-03 15:25:48,542][90258] Num frames 5800...
491
+ [2023-03-03 15:25:48,599][90258] Num frames 5900...
492
+ [2023-03-03 15:25:48,650][90258] Num frames 6000...
493
+ [2023-03-03 15:25:48,702][90258] Num frames 6100...
494
+ [2023-03-03 15:25:48,766][90258] Num frames 6200...
495
+ [2023-03-03 15:25:48,821][90258] Num frames 6300...
496
+ [2023-03-03 15:25:48,873][90258] Num frames 6400...
497
+ [2023-03-03 15:25:48,925][90258] Num frames 6500...
498
+ [2023-03-03 15:25:48,976][90258] Num frames 6600...
499
+ [2023-03-03 15:25:49,029][90258] Num frames 6700...
500
+ [2023-03-03 15:25:49,082][90258] Num frames 6800...
501
+ [2023-03-03 15:25:49,142][90258] Avg episode rewards: #0: 19.895, true rewards: #0: 8.520
502
+ [2023-03-03 15:25:49,144][90258] Avg episode reward: 19.895, avg true_objective: 8.520
503
+ [2023-03-03 15:25:49,206][90258] Num frames 6900...
504
+ [2023-03-03 15:25:49,260][90258] Num frames 7000...
505
+ [2023-03-03 15:25:49,314][90258] Num frames 7100...
506
+ [2023-03-03 15:25:49,368][90258] Num frames 7200...
507
+ [2023-03-03 15:25:49,421][90258] Num frames 7300...
508
+ [2023-03-03 15:25:49,476][90258] Num frames 7400...
509
+ [2023-03-03 15:25:49,533][90258] Num frames 7500...
510
+ [2023-03-03 15:25:49,586][90258] Num frames 7600...
511
+ [2023-03-03 15:25:49,638][90258] Num frames 7700...
512
+ [2023-03-03 15:25:49,690][90258] Num frames 7800...
513
+ [2023-03-03 15:25:49,743][90258] Num frames 7900...
514
+ [2023-03-03 15:25:49,797][90258] Num frames 8000...
515
+ [2023-03-03 15:25:49,850][90258] Num frames 8100...
516
+ [2023-03-03 15:25:49,950][90258] Avg episode rewards: #0: 21.102, true rewards: #0: 9.102
517
+ [2023-03-03 15:25:49,951][90258] Avg episode reward: 21.102, avg true_objective: 9.102
518
+ [2023-03-03 15:25:49,969][90258] Num frames 8200...
519
+ [2023-03-03 15:25:50,035][90258] Num frames 8300...
520
+ [2023-03-03 15:25:50,092][90258] Num frames 8400...
521
+ [2023-03-03 15:25:50,144][90258] Num frames 8500...
522
+ [2023-03-03 15:25:50,198][90258] Num frames 8600...
523
+ [2023-03-03 15:25:50,260][90258] Num frames 8700...
524
+ [2023-03-03 15:25:50,313][90258] Num frames 8800...
525
+ [2023-03-03 15:25:50,365][90258] Num frames 8900...
526
+ [2023-03-03 15:25:50,420][90258] Num frames 9000...
527
+ [2023-03-03 15:25:50,481][90258] Num frames 9100...
528
+ [2023-03-03 15:25:50,538][90258] Num frames 9200...
529
+ [2023-03-03 15:25:50,595][90258] Num frames 9300...
530
+ [2023-03-03 15:25:50,650][90258] Num frames 9400...
531
+ [2023-03-03 15:25:50,702][90258] Num frames 9500...
532
+ [2023-03-03 15:25:50,753][90258] Num frames 9600...
533
+ [2023-03-03 15:25:50,805][90258] Num frames 9700...
534
+ [2023-03-03 15:25:50,858][90258] Num frames 9800...
535
+ [2023-03-03 15:25:50,913][90258] Num frames 9900...
536
+ [2023-03-03 15:25:50,966][90258] Num frames 10000...
537
+ [2023-03-03 15:25:51,041][90258] Avg episode rewards: #0: 23.542, true rewards: #0: 10.042
538
+ [2023-03-03 15:25:51,042][90258] Avg episode reward: 23.542, avg true_objective: 10.042
539
+ [2023-03-03 15:26:04,458][90258] Replay video saved to /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/replay.mp4!
540
+ [2023-03-03 15:27:26,535][90258] Loading existing experiment configuration from /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/config.json
541
+ [2023-03-03 15:27:26,535][90258] Overriding arg 'num_workers' with value 1 passed from command line
542
+ [2023-03-03 15:27:26,536][90258] Adding new argument 'no_render'=True that is not in the saved config file!
543
+ [2023-03-03 15:27:26,536][90258] Adding new argument 'save_video'=True that is not in the saved config file!
544
+ [2023-03-03 15:27:26,537][90258] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
545
+ [2023-03-03 15:27:26,537][90258] Adding new argument 'video_name'=None that is not in the saved config file!
546
+ [2023-03-03 15:27:26,537][90258] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
547
+ [2023-03-03 15:27:26,538][90258] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
548
+ [2023-03-03 15:27:26,538][90258] Adding new argument 'push_to_hub'=True that is not in the saved config file!
549
+ [2023-03-03 15:27:26,538][90258] Adding new argument 'hf_repository'='CloXD/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
550
+ [2023-03-03 15:27:26,539][90258] Adding new argument 'policy_index'=0 that is not in the saved config file!
551
+ [2023-03-03 15:27:26,539][90258] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
552
+ [2023-03-03 15:27:26,540][90258] Adding new argument 'train_script'=None that is not in the saved config file!
553
+ [2023-03-03 15:27:26,540][90258] Adding new argument 'enjoy_script'=None that is not in the saved config file!
554
+ [2023-03-03 15:27:26,540][90258] Using frameskip 1 and render_action_repeat=4 for evaluation
555
+ [2023-03-03 15:27:26,549][90258] RunningMeanStd input shape: (3, 72, 128)
556
+ [2023-03-03 15:27:26,550][90258] RunningMeanStd input shape: (1,)
557
+ [2023-03-03 15:27:26,557][90258] ConvEncoder: input_channels=3
558
+ [2023-03-03 15:27:26,578][90258] Conv encoder output size: 512
559
+ [2023-03-03 15:27:26,579][90258] Policy head output size: 512
560
+ [2023-03-03 15:27:26,601][90258] Loading state from checkpoint /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
561
+ [2023-03-03 15:27:26,887][90258] Num frames 100...
562
+ [2023-03-03 15:27:26,940][90258] Num frames 200...
563
+ [2023-03-03 15:27:26,992][90258] Num frames 300...
564
+ [2023-03-03 15:27:27,044][90258] Num frames 400...
565
+ [2023-03-03 15:27:27,099][90258] Num frames 500...
566
+ [2023-03-03 15:27:27,151][90258] Num frames 600...
567
+ [2023-03-03 15:27:27,203][90258] Num frames 700...
568
+ [2023-03-03 15:27:27,305][90258] Avg episode rewards: #0: 17.940, true rewards: #0: 7.940
569
+ [2023-03-03 15:27:27,305][90258] Avg episode reward: 17.940, avg true_objective: 7.940
570
+ [2023-03-03 15:27:27,310][90258] Num frames 800...
571
+ [2023-03-03 15:27:27,362][90258] Num frames 900...
572
+ [2023-03-03 15:27:27,418][90258] Num frames 1000...
573
+ [2023-03-03 15:27:27,468][90258] Num frames 1100...
574
+ [2023-03-03 15:27:27,518][90258] Num frames 1200...
575
+ [2023-03-03 15:27:27,570][90258] Num frames 1300...
576
+ [2023-03-03 15:27:27,622][90258] Num frames 1400...
577
+ [2023-03-03 15:27:27,677][90258] Num frames 1500...
578
+ [2023-03-03 15:27:27,730][90258] Num frames 1600...
579
+ [2023-03-03 15:27:27,782][90258] Num frames 1700...
580
+ [2023-03-03 15:27:27,833][90258] Num frames 1800...
581
+ [2023-03-03 15:27:27,885][90258] Num frames 1900...
582
+ [2023-03-03 15:27:27,935][90258] Num frames 2000...
583
+ [2023-03-03 15:27:27,993][90258] Avg episode rewards: #0: 21.550, true rewards: #0: 10.050
584
+ [2023-03-03 15:27:27,994][90258] Avg episode reward: 21.550, avg true_objective: 10.050
585
+ [2023-03-03 15:27:28,060][90258] Num frames 2100...
586
+ [2023-03-03 15:27:28,109][90258] Num frames 2200...
587
+ [2023-03-03 15:27:28,160][90258] Num frames 2300...
588
+ [2023-03-03 15:27:28,210][90258] Num frames 2400...
589
+ [2023-03-03 15:27:28,265][90258] Num frames 2500...
590
+ [2023-03-03 15:27:28,314][90258] Num frames 2600...
591
+ [2023-03-03 15:27:28,367][90258] Num frames 2700...
592
+ [2023-03-03 15:27:28,417][90258] Num frames 2800...
593
+ [2023-03-03 15:27:28,508][90258] Avg episode rewards: #0: 20.580, true rewards: #0: 9.580
594
+ [2023-03-03 15:27:28,510][90258] Avg episode reward: 20.580, avg true_objective: 9.580
595
+ [2023-03-03 15:27:28,541][90258] Num frames 2900...
596
+ [2023-03-03 15:27:28,594][90258] Num frames 3000...
597
+ [2023-03-03 15:27:28,646][90258] Num frames 3100...
598
+ [2023-03-03 15:27:28,697][90258] Num frames 3200...
599
+ [2023-03-03 15:27:28,766][90258] Num frames 3300...
600
+ [2023-03-03 15:27:28,816][90258] Num frames 3400...
601
+ [2023-03-03 15:27:28,870][90258] Num frames 3500...
602
+ [2023-03-03 15:27:28,925][90258] Num frames 3600...
603
+ [2023-03-03 15:27:28,977][90258] Num frames 3700...
604
+ [2023-03-03 15:27:29,030][90258] Num frames 3800...
605
+ [2023-03-03 15:27:29,082][90258] Num frames 3900...
606
+ [2023-03-03 15:27:29,136][90258] Num frames 4000...
607
+ [2023-03-03 15:27:29,190][90258] Num frames 4100...
608
+ [2023-03-03 15:27:29,241][90258] Num frames 4200...
609
+ [2023-03-03 15:27:29,292][90258] Num frames 4300...
610
+ [2023-03-03 15:27:29,344][90258] Num frames 4400...
611
+ [2023-03-03 15:27:29,396][90258] Num frames 4500...
612
+ [2023-03-03 15:27:29,447][90258] Num frames 4600...
613
+ [2023-03-03 15:27:29,499][90258] Num frames 4700...
614
+ [2023-03-03 15:27:29,551][90258] Num frames 4800...
615
+ [2023-03-03 15:27:29,606][90258] Num frames 4900...
616
+ [2023-03-03 15:27:29,697][90258] Avg episode rewards: #0: 30.935, true rewards: #0: 12.435
617
+ [2023-03-03 15:27:29,698][90258] Avg episode reward: 30.935, avg true_objective: 12.435
618
+ [2023-03-03 15:27:29,714][90258] Num frames 5000...
619
+ [2023-03-03 15:27:29,766][90258] Num frames 5100...
620
+ [2023-03-03 15:27:29,816][90258] Num frames 5200...
621
+ [2023-03-03 15:27:29,870][90258] Num frames 5300...
622
+ [2023-03-03 15:27:29,919][90258] Num frames 5400...
623
+ [2023-03-03 15:27:29,971][90258] Num frames 5500...
624
+ [2023-03-03 15:27:30,023][90258] Num frames 5600...
625
+ [2023-03-03 15:27:30,072][90258] Num frames 5700...
626
+ [2023-03-03 15:27:30,163][90258] Avg episode rewards: #0: 28.548, true rewards: #0: 11.548
627
+ [2023-03-03 15:27:30,165][90258] Avg episode reward: 28.548, avg true_objective: 11.548
628
+ [2023-03-03 15:27:30,199][90258] Num frames 5800...
629
+ [2023-03-03 15:27:30,260][90258] Num frames 5900...
630
+ [2023-03-03 15:27:30,317][90258] Num frames 6000...
631
+ [2023-03-03 15:27:30,371][90258] Num frames 6100...
632
+ [2023-03-03 15:27:30,422][90258] Num frames 6200...
633
+ [2023-03-03 15:27:30,473][90258] Num frames 6300...
634
+ [2023-03-03 15:27:30,525][90258] Num frames 6400...
635
+ [2023-03-03 15:27:30,578][90258] Num frames 6500...
636
+ [2023-03-03 15:27:30,633][90258] Avg episode rewards: #0: 26.172, true rewards: #0: 10.838
637
+ [2023-03-03 15:27:30,634][90258] Avg episode reward: 26.172, avg true_objective: 10.838
638
+ [2023-03-03 15:27:30,691][90258] Num frames 6600...
639
+ [2023-03-03 15:27:30,741][90258] Num frames 6700...
640
+ [2023-03-03 15:27:30,792][90258] Num frames 6800...
641
+ [2023-03-03 15:27:30,844][90258] Num frames 6900...
642
+ [2023-03-03 15:27:30,895][90258] Num frames 7000...
643
+ [2023-03-03 15:27:30,947][90258] Num frames 7100...
644
+ [2023-03-03 15:27:30,998][90258] Num frames 7200...
645
+ [2023-03-03 15:27:31,071][90258] Avg episode rewards: #0: 24.770, true rewards: #0: 10.341
646
+ [2023-03-03 15:27:31,072][90258] Avg episode reward: 24.770, avg true_objective: 10.341
647
+ [2023-03-03 15:27:31,106][90258] Num frames 7300...
648
+ [2023-03-03 15:27:31,157][90258] Num frames 7400...
649
+ [2023-03-03 15:27:31,208][90258] Num frames 7500...
650
+ [2023-03-03 15:27:31,262][90258] Num frames 7600...
651
+ [2023-03-03 15:27:31,316][90258] Num frames 7700...
652
+ [2023-03-03 15:27:31,369][90258] Num frames 7800...
653
+ [2023-03-03 15:27:31,416][90258] Num frames 7900...
654
+ [2023-03-03 15:27:31,464][90258] Num frames 8000...
655
+ [2023-03-03 15:27:31,513][90258] Num frames 8100...
656
+ [2023-03-03 15:27:31,614][90258] Avg episode rewards: #0: 24.124, true rewards: #0: 10.249
657
+ [2023-03-03 15:27:31,616][90258] Avg episode reward: 24.124, avg true_objective: 10.249
658
+ [2023-03-03 15:27:31,619][90258] Num frames 8200...
659
+ [2023-03-03 15:27:31,690][90258] Num frames 8300...
660
+ [2023-03-03 15:27:31,738][90258] Num frames 8400...
661
+ [2023-03-03 15:27:31,787][90258] Num frames 8500...
662
+ [2023-03-03 15:27:31,836][90258] Num frames 8600...
663
+ [2023-03-03 15:27:31,887][90258] Num frames 8700...
664
+ [2023-03-03 15:27:31,935][90258] Num frames 8800...
665
+ [2023-03-03 15:27:31,983][90258] Num frames 8900...
666
+ [2023-03-03 15:27:32,035][90258] Num frames 9000...
667
+ [2023-03-03 15:27:32,085][90258] Num frames 9100...
668
+ [2023-03-03 15:27:32,136][90258] Num frames 9200...
669
+ [2023-03-03 15:27:32,185][90258] Num frames 9300...
670
+ [2023-03-03 15:27:32,252][90258] Num frames 9400...
671
+ [2023-03-03 15:27:32,303][90258] Num frames 9500...
672
+ [2023-03-03 15:27:32,355][90258] Num frames 9600...
673
+ [2023-03-03 15:27:32,405][90258] Num frames 9700...
674
+ [2023-03-03 15:27:32,456][90258] Num frames 9800...
675
+ [2023-03-03 15:27:32,508][90258] Num frames 9900...
676
+ [2023-03-03 15:27:32,559][90258] Num frames 10000...
677
+ [2023-03-03 15:27:32,610][90258] Num frames 10100...
678
+ [2023-03-03 15:27:32,660][90258] Num frames 10200...
679
+ [2023-03-03 15:27:32,764][90258] Avg episode rewards: #0: 27.665, true rewards: #0: 11.443
680
+ [2023-03-03 15:27:32,765][90258] Avg episode reward: 27.665, avg true_objective: 11.443
681
+ [2023-03-03 15:27:32,767][90258] Num frames 10300...
682
+ [2023-03-03 15:27:32,821][90258] Num frames 10400...
683
+ [2023-03-03 15:27:32,875][90258] Num frames 10500...
684
+ [2023-03-03 15:27:32,932][90258] Num frames 10600...
685
+ [2023-03-03 15:27:32,988][90258] Num frames 10700...
686
+ [2023-03-03 15:27:33,042][90258] Num frames 10800...
687
+ [2023-03-03 15:27:33,096][90258] Num frames 10900...
688
+ [2023-03-03 15:27:33,150][90258] Num frames 11000...
689
+ [2023-03-03 15:27:33,238][90258] Avg episode rewards: #0: 26.567, true rewards: #0: 11.067
690
+ [2023-03-03 15:27:33,239][90258] Avg episode reward: 26.567, avg true_objective: 11.067
691
+ [2023-03-03 15:27:47,330][90258] Replay video saved to /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/replay.mp4!
692
+ [2023-03-03 15:29:31,288][90258] Loading existing experiment configuration from /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/config.json
693
+ [2023-03-03 15:29:31,289][90258] Overriding arg 'num_workers' with value 1 passed from command line
694
+ [2023-03-03 15:29:31,289][90258] Adding new argument 'no_render'=True that is not in the saved config file!
695
+ [2023-03-03 15:29:31,290][90258] Adding new argument 'save_video'=True that is not in the saved config file!
696
+ [2023-03-03 15:29:31,290][90258] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
697
+ [2023-03-03 15:29:31,290][90258] Adding new argument 'video_name'=None that is not in the saved config file!
698
+ [2023-03-03 15:29:31,291][90258] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
699
+ [2023-03-03 15:29:31,291][90258] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
700
+ [2023-03-03 15:29:31,291][90258] Adding new argument 'push_to_hub'=True that is not in the saved config file!
701
+ [2023-03-03 15:29:31,292][90258] Adding new argument 'hf_repository'='CloXD/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
702
+ [2023-03-03 15:29:31,292][90258] Adding new argument 'policy_index'=0 that is not in the saved config file!
703
+ [2023-03-03 15:29:31,292][90258] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
704
+ [2023-03-03 15:29:31,292][90258] Adding new argument 'train_script'=None that is not in the saved config file!
705
+ [2023-03-03 15:29:31,292][90258] Adding new argument 'enjoy_script'=None that is not in the saved config file!
706
+ [2023-03-03 15:29:31,293][90258] Using frameskip 1 and render_action_repeat=4 for evaluation
707
+ [2023-03-03 15:29:31,302][90258] RunningMeanStd input shape: (3, 72, 128)
708
+ [2023-03-03 15:29:31,303][90258] RunningMeanStd input shape: (1,)
709
+ [2023-03-03 15:29:31,309][90258] ConvEncoder: input_channels=3
710
+ [2023-03-03 15:29:31,332][90258] Conv encoder output size: 512
711
+ [2023-03-03 15:29:31,332][90258] Policy head output size: 512
712
+ [2023-03-03 15:29:31,367][90258] Loading state from checkpoint /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
713
+ [2023-03-03 15:29:31,655][90258] Num frames 100...
714
+ [2023-03-03 15:29:31,706][90258] Num frames 200...
715
+ [2023-03-03 15:29:31,757][90258] Num frames 300...
716
+ [2023-03-03 15:29:31,806][90258] Num frames 400...
717
+ [2023-03-03 15:29:31,859][90258] Num frames 500...
718
+ [2023-03-03 15:29:31,911][90258] Num frames 600...
719
+ [2023-03-03 15:29:31,962][90258] Num frames 700...
720
+ [2023-03-03 15:29:32,019][90258] Num frames 800...
721
+ [2023-03-03 15:29:32,078][90258] Num frames 900...
722
+ [2023-03-03 15:29:32,131][90258] Num frames 1000...
723
+ [2023-03-03 15:29:32,185][90258] Num frames 1100...
724
+ [2023-03-03 15:29:32,240][90258] Num frames 1200...
725
+ [2023-03-03 15:29:32,293][90258] Num frames 1300...
726
+ [2023-03-03 15:29:32,345][90258] Num frames 1400...
727
+ [2023-03-03 15:29:32,396][90258] Num frames 1500...
728
+ [2023-03-03 15:29:32,450][90258] Num frames 1600...
729
+ [2023-03-03 15:29:32,500][90258] Num frames 1700...
730
+ [2023-03-03 15:29:32,552][90258] Num frames 1800...
731
+ [2023-03-03 15:29:32,616][90258] Avg episode rewards: #0: 45.240, true rewards: #0: 18.240
732
+ [2023-03-03 15:29:32,617][90258] Avg episode reward: 45.240, avg true_objective: 18.240
733
+ [2023-03-03 15:29:32,664][90258] Num frames 1900...
734
+ [2023-03-03 15:29:32,717][90258] Num frames 2000...
735
+ [2023-03-03 15:29:32,770][90258] Num frames 2100...
736
+ [2023-03-03 15:29:32,829][90258] Num frames 2200...
737
+ [2023-03-03 15:29:32,893][90258] Num frames 2300...
738
+ [2023-03-03 15:29:32,948][90258] Num frames 2400...
739
+ [2023-03-03 15:29:33,000][90258] Num frames 2500...
740
+ [2023-03-03 15:29:33,052][90258] Num frames 2600...
741
+ [2023-03-03 15:29:33,105][90258] Num frames 2700...
742
+ [2023-03-03 15:29:33,157][90258] Num frames 2800...
743
+ [2023-03-03 15:29:33,209][90258] Num frames 2900...
744
+ [2023-03-03 15:29:33,261][90258] Num frames 3000...
745
+ [2023-03-03 15:29:33,313][90258] Num frames 3100...
746
+ [2023-03-03 15:29:33,364][90258] Num frames 3200...
747
+ [2023-03-03 15:29:33,416][90258] Num frames 3300...
748
+ [2023-03-03 15:29:33,488][90258] Avg episode rewards: #0: 42.695, true rewards: #0: 16.695
749
+ [2023-03-03 15:29:33,489][90258] Avg episode reward: 42.695, avg true_objective: 16.695
750
+ [2023-03-03 15:29:33,524][90258] Num frames 3400...
751
+ [2023-03-03 15:29:33,574][90258] Num frames 3500...
752
+ [2023-03-03 15:29:33,625][90258] Num frames 3600...
753
+ [2023-03-03 15:29:33,677][90258] Num frames 3700...
754
+ [2023-03-03 15:29:33,729][90258] Num frames 3800...
755
+ [2023-03-03 15:29:33,778][90258] Num frames 3900...
756
+ [2023-03-03 15:29:33,829][90258] Num frames 4000...
757
+ [2023-03-03 15:29:33,879][90258] Num frames 4100...
758
+ [2023-03-03 15:29:33,930][90258] Num frames 4200...
759
+ [2023-03-03 15:29:33,981][90258] Num frames 4300...
760
+ [2023-03-03 15:29:34,065][90258] Avg episode rewards: #0: 36.543, true rewards: #0: 14.543
761
+ [2023-03-03 15:29:34,067][90258] Avg episode reward: 36.543, avg true_objective: 14.543
762
+ [2023-03-03 15:29:34,110][90258] Num frames 4400...
763
+ [2023-03-03 15:29:34,160][90258] Num frames 4500...
764
+ [2023-03-03 15:29:34,211][90258] Num frames 4600...
765
+ [2023-03-03 15:29:34,261][90258] Num frames 4700...
766
+ [2023-03-03 15:29:34,311][90258] Num frames 4800...
767
+ [2023-03-03 15:29:34,361][90258] Num frames 4900...
768
+ [2023-03-03 15:29:34,412][90258] Num frames 5000...
769
+ [2023-03-03 15:29:34,461][90258] Num frames 5100...
770
+ [2023-03-03 15:29:34,511][90258] Num frames 5200...
771
+ [2023-03-03 15:29:34,563][90258] Num frames 5300...
772
+ [2023-03-03 15:29:34,612][90258] Num frames 5400...
773
+ [2023-03-03 15:29:34,662][90258] Num frames 5500...
774
+ [2023-03-03 15:29:34,712][90258] Num frames 5600...
775
+ [2023-03-03 15:29:34,762][90258] Num frames 5700...
776
+ [2023-03-03 15:29:34,810][90258] Num frames 5800...
777
+ [2023-03-03 15:29:34,860][90258] Num frames 5900...
778
+ [2023-03-03 15:29:34,910][90258] Num frames 6000...
779
+ [2023-03-03 15:29:34,959][90258] Num frames 6100...
780
+ [2023-03-03 15:29:35,010][90258] Num frames 6200...
781
+ [2023-03-03 15:29:35,059][90258] Num frames 6300...
782
+ [2023-03-03 15:29:35,109][90258] Num frames 6400...
783
+ [2023-03-03 15:29:35,194][90258] Avg episode rewards: #0: 41.157, true rewards: #0: 16.158
784
+ [2023-03-03 15:29:35,196][90258] Avg episode reward: 41.157, avg true_objective: 16.158
785
+ [2023-03-03 15:29:35,233][90258] Num frames 6500...
786
+ [2023-03-03 15:29:35,286][90258] Num frames 6600...
787
+ [2023-03-03 15:29:35,335][90258] Num frames 6700...
788
+ [2023-03-03 15:29:35,384][90258] Num frames 6800...
789
+ [2023-03-03 15:29:35,434][90258] Num frames 6900...
790
+ [2023-03-03 15:29:35,482][90258] Num frames 7000...
791
+ [2023-03-03 15:29:35,531][90258] Num frames 7100...
792
+ [2023-03-03 15:29:35,580][90258] Num frames 7200...
793
+ [2023-03-03 15:29:35,664][90258] Avg episode rewards: #0: 36.126, true rewards: #0: 14.526
794
+ [2023-03-03 15:29:35,665][90258] Avg episode reward: 36.126, avg true_objective: 14.526
795
+ [2023-03-03 15:29:35,694][90258] Num frames 7300...
796
+ [2023-03-03 15:29:35,747][90258] Num frames 7400...
797
+ [2023-03-03 15:29:35,796][90258] Num frames 7500...
798
+ [2023-03-03 15:29:35,846][90258] Num frames 7600...
799
+ [2023-03-03 15:29:35,895][90258] Num frames 7700...
800
+ [2023-03-03 15:29:35,946][90258] Num frames 7800...
801
+ [2023-03-03 15:29:35,997][90258] Num frames 7900...
802
+ [2023-03-03 15:29:36,048][90258] Num frames 8000...
803
+ [2023-03-03 15:29:36,116][90258] Avg episode rewards: #0: 32.551, true rewards: #0: 13.385
804
+ [2023-03-03 15:29:36,117][90258] Avg episode reward: 32.551, avg true_objective: 13.385
805
+ [2023-03-03 15:29:36,156][90258] Num frames 8100...
806
+ [2023-03-03 15:29:36,209][90258] Num frames 8200...
807
+ [2023-03-03 15:29:36,263][90258] Num frames 8300...
808
+ [2023-03-03 15:29:36,337][90258] Num frames 8400...
809
+ [2023-03-03 15:29:36,415][90258] Num frames 8500...
810
+ [2023-03-03 15:29:36,478][90258] Num frames 8600...
811
+ [2023-03-03 15:29:36,531][90258] Num frames 8700...
812
+ [2023-03-03 15:29:36,585][90258] Num frames 8800...
813
+ [2023-03-03 15:29:36,641][90258] Num frames 8900...
814
+ [2023-03-03 15:29:36,695][90258] Num frames 9000...
815
+ [2023-03-03 15:29:36,747][90258] Num frames 9100...
816
+ [2023-03-03 15:29:36,801][90258] Num frames 9200...
817
+ [2023-03-03 15:29:36,851][90258] Num frames 9300...
818
+ [2023-03-03 15:29:36,901][90258] Num frames 9400...
819
+ [2023-03-03 15:29:36,951][90258] Num frames 9500...
820
+ [2023-03-03 15:29:37,002][90258] Num frames 9600...
821
+ [2023-03-03 15:29:37,093][90258] Avg episode rewards: #0: 33.958, true rewards: #0: 13.816
822
+ [2023-03-03 15:29:37,094][90258] Avg episode reward: 33.958, avg true_objective: 13.816
823
+ [2023-03-03 15:29:37,114][90258] Num frames 9700...
824
+ [2023-03-03 15:29:37,166][90258] Num frames 9800...
825
+ [2023-03-03 15:29:37,220][90258] Num frames 9900...
826
+ [2023-03-03 15:29:37,273][90258] Num frames 10000...
827
+ [2023-03-03 15:29:37,327][90258] Num frames 10100...
828
+ [2023-03-03 15:29:37,379][90258] Num frames 10200...
829
+ [2023-03-03 15:29:37,433][90258] Num frames 10300...
830
+ [2023-03-03 15:29:37,487][90258] Num frames 10400...
831
+ [2023-03-03 15:29:37,536][90258] Num frames 10500...
832
+ [2023-03-03 15:29:37,590][90258] Avg episode rewards: #0: 32.004, true rewards: #0: 13.129
833
+ [2023-03-03 15:29:37,592][90258] Avg episode reward: 32.004, avg true_objective: 13.129
834
+ [2023-03-03 15:29:37,660][90258] Num frames 10600...
835
+ [2023-03-03 15:29:37,710][90258] Num frames 10700...
836
+ [2023-03-03 15:29:37,762][90258] Num frames 10800...
837
+ [2023-03-03 15:29:37,811][90258] Num frames 10900...
838
+ [2023-03-03 15:29:37,862][90258] Num frames 11000...
839
+ [2023-03-03 15:29:37,914][90258] Num frames 11100...
840
+ [2023-03-03 15:29:37,967][90258] Num frames 11200...
841
+ [2023-03-03 15:29:38,022][90258] Num frames 11300...
842
+ [2023-03-03 15:29:38,097][90258] Avg episode rewards: #0: 30.150, true rewards: #0: 12.594
843
+ [2023-03-03 15:29:38,099][90258] Avg episode reward: 30.150, avg true_objective: 12.594
844
+ [2023-03-03 15:29:38,154][90258] Num frames 11400...
845
+ [2023-03-03 15:29:38,212][90258] Num frames 11500...
846
+ [2023-03-03 15:29:38,263][90258] Num frames 11600...
847
+ [2023-03-03 15:29:38,313][90258] Num frames 11700...
848
+ [2023-03-03 15:29:38,363][90258] Num frames 11800...
849
+ [2023-03-03 15:29:38,411][90258] Num frames 11900...
850
+ [2023-03-03 15:29:38,474][90258] Num frames 12000...
851
+ [2023-03-03 15:29:38,546][90258] Num frames 12100...
852
+ [2023-03-03 15:29:38,625][90258] Avg episode rewards: #0: 28.939, true rewards: #0: 12.139
853
+ [2023-03-03 15:29:38,626][90258] Avg episode reward: 28.939, avg true_objective: 12.139
854
+ [2023-03-03 15:29:53,891][90258] Replay video saved to /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/replay.mp4!
855
+ [2023-03-03 15:32:20,498][90258] Loading existing experiment configuration from /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/config.json
856
+ [2023-03-03 15:32:20,498][90258] Overriding arg 'num_workers' with value 1 passed from command line
857
+ [2023-03-03 15:32:20,499][90258] Adding new argument 'no_render'=True that is not in the saved config file!
858
+ [2023-03-03 15:32:20,499][90258] Adding new argument 'save_video'=True that is not in the saved config file!
859
+ [2023-03-03 15:32:20,500][90258] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
860
+ [2023-03-03 15:32:20,500][90258] Adding new argument 'video_name'=None that is not in the saved config file!
861
+ [2023-03-03 15:32:20,500][90258] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
862
+ [2023-03-03 15:32:20,501][90258] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
863
+ [2023-03-03 15:32:20,501][90258] Adding new argument 'push_to_hub'=True that is not in the saved config file!
864
+ [2023-03-03 15:32:20,501][90258] Adding new argument 'hf_repository'='CloXD/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
865
+ [2023-03-03 15:32:20,502][90258] Adding new argument 'policy_index'=0 that is not in the saved config file!
866
+ [2023-03-03 15:32:20,502][90258] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
867
+ [2023-03-03 15:32:20,502][90258] Adding new argument 'train_script'=None that is not in the saved config file!
868
+ [2023-03-03 15:32:20,503][90258] Adding new argument 'enjoy_script'=None that is not in the saved config file!
869
+ [2023-03-03 15:32:20,503][90258] Using frameskip 1 and render_action_repeat=4 for evaluation
870
+ [2023-03-03 15:32:20,513][90258] RunningMeanStd input shape: (3, 72, 128)
871
+ [2023-03-03 15:32:20,513][90258] RunningMeanStd input shape: (1,)
872
+ [2023-03-03 15:32:20,521][90258] ConvEncoder: input_channels=3
873
+ [2023-03-03 15:32:20,558][90258] Conv encoder output size: 512
874
+ [2023-03-03 15:32:20,559][90258] Policy head output size: 512
875
+ [2023-03-03 15:32:20,579][90258] Loading state from checkpoint /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
876
+ [2023-03-03 15:32:20,876][90258] Num frames 100...
877
+ [2023-03-03 15:32:20,935][90258] Num frames 200...
878
+ [2023-03-03 15:32:20,994][90258] Num frames 300...
879
+ [2023-03-03 15:32:21,052][90258] Num frames 400...
880
+ [2023-03-03 15:32:21,114][90258] Num frames 500...
881
+ [2023-03-03 15:32:21,167][90258] Num frames 600...
882
+ [2023-03-03 15:32:21,223][90258] Num frames 700...
883
+ [2023-03-03 15:32:21,278][90258] Num frames 800...
884
+ [2023-03-03 15:32:21,329][90258] Num frames 900...
885
+ [2023-03-03 15:32:21,379][90258] Num frames 1000...
886
+ [2023-03-03 15:32:21,427][90258] Num frames 1100...
887
+ [2023-03-03 15:32:21,477][90258] Num frames 1200...
888
+ [2023-03-03 15:32:21,529][90258] Num frames 1300...
889
+ [2023-03-03 15:32:21,578][90258] Num frames 1400...
890
+ [2023-03-03 15:32:21,634][90258] Avg episode rewards: #0: 34.080, true rewards: #0: 14.080
891
+ [2023-03-03 15:32:21,635][90258] Avg episode reward: 34.080, avg true_objective: 14.080
892
+ [2023-03-03 15:32:21,686][90258] Num frames 1500...
893
+ [2023-03-03 15:32:21,736][90258] Num frames 1600...
894
+ [2023-03-03 15:32:21,786][90258] Num frames 1700...
895
+ [2023-03-03 15:32:21,838][90258] Num frames 1800...
896
+ [2023-03-03 15:32:21,891][90258] Num frames 1900...
897
+ [2023-03-03 15:32:21,972][90258] Avg episode rewards: #0: 23.280, true rewards: #0: 9.780
898
+ [2023-03-03 15:32:21,974][90258] Avg episode reward: 23.280, avg true_objective: 9.780
899
+ [2023-03-03 15:32:22,032][90258] Num frames 2000...
900
+ [2023-03-03 15:32:22,082][90258] Num frames 2100...
901
+ [2023-03-03 15:32:22,134][90258] Num frames 2200...
902
+ [2023-03-03 15:32:22,183][90258] Num frames 2300...
903
+ [2023-03-03 15:32:22,237][90258] Num frames 2400...
904
+ [2023-03-03 15:32:22,288][90258] Num frames 2500...
905
+ [2023-03-03 15:32:22,341][90258] Num frames 2600...
906
+ [2023-03-03 15:32:22,391][90258] Num frames 2700...
907
+ [2023-03-03 15:32:22,446][90258] Num frames 2800...
908
+ [2023-03-03 15:32:22,509][90258] Num frames 2900...
909
+ [2023-03-03 15:32:22,581][90258] Num frames 3000...
910
+ [2023-03-03 15:32:22,646][90258] Num frames 3100...
911
+ [2023-03-03 15:32:22,708][90258] Num frames 3200...
912
+ [2023-03-03 15:32:22,763][90258] Avg episode rewards: #0: 26.680, true rewards: #0: 10.680
913
+ [2023-03-03 15:32:22,765][90258] Avg episode reward: 26.680, avg true_objective: 10.680
914
+ [2023-03-03 15:32:22,836][90258] Num frames 3300...
915
+ [2023-03-03 15:32:22,887][90258] Num frames 3400...
916
+ [2023-03-03 15:32:22,944][90258] Num frames 3500...
917
+ [2023-03-03 15:32:22,996][90258] Num frames 3600...
918
+ [2023-03-03 15:32:23,048][90258] Num frames 3700...
919
+ [2023-03-03 15:32:23,098][90258] Num frames 3800...
920
+ [2023-03-03 15:32:23,186][90258] Avg episode rewards: #0: 23.190, true rewards: #0: 9.690
921
+ [2023-03-03 15:32:23,187][90258] Avg episode reward: 23.190, avg true_objective: 9.690
922
+ [2023-03-03 15:32:23,203][90258] Num frames 3900...
923
+ [2023-03-03 15:32:23,254][90258] Num frames 4000...
924
+ [2023-03-03 15:32:23,305][90258] Num frames 4100...
925
+ [2023-03-03 15:32:23,355][90258] Num frames 4200...
926
+ [2023-03-03 15:32:23,406][90258] Num frames 4300...
927
+ [2023-03-03 15:32:23,455][90258] Num frames 4400...
928
+ [2023-03-03 15:32:23,506][90258] Num frames 4500...
929
+ [2023-03-03 15:32:23,555][90258] Num frames 4600...
930
+ [2023-03-03 15:32:23,610][90258] Num frames 4700...
931
+ [2023-03-03 15:32:23,661][90258] Num frames 4800...
932
+ [2023-03-03 15:32:23,710][90258] Num frames 4900...
933
+ [2023-03-03 15:32:23,767][90258] Num frames 5000...
934
+ [2023-03-03 15:32:23,860][90258] Avg episode rewards: #0: 23.936, true rewards: #0: 10.136
935
+ [2023-03-03 15:32:23,862][90258] Avg episode reward: 23.936, avg true_objective: 10.136
936
+ [2023-03-03 15:32:23,890][90258] Num frames 5100...
937
+ [2023-03-03 15:32:23,948][90258] Num frames 5200...
938
+ [2023-03-03 15:32:24,003][90258] Num frames 5300...
939
+ [2023-03-03 15:32:24,059][90258] Num frames 5400...
940
+ [2023-03-03 15:32:24,111][90258] Num frames 5500...
941
+ [2023-03-03 15:32:24,167][90258] Num frames 5600...
942
+ [2023-03-03 15:32:24,234][90258] Num frames 5700...
943
+ [2023-03-03 15:32:24,290][90258] Num frames 5800...
944
+ [2023-03-03 15:32:24,349][90258] Num frames 5900...
945
+ [2023-03-03 15:32:24,411][90258] Num frames 6000...
946
+ [2023-03-03 15:32:24,469][90258] Num frames 6100...
947
+ [2023-03-03 15:32:24,564][90258] Avg episode rewards: #0: 25.297, true rewards: #0: 10.297
948
+ [2023-03-03 15:32:24,565][90258] Avg episode reward: 25.297, avg true_objective: 10.297
949
+ [2023-03-03 15:32:24,578][90258] Num frames 6200...
950
+ [2023-03-03 15:32:24,631][90258] Num frames 6300...
951
+ [2023-03-03 15:32:24,683][90258] Num frames 6400...
952
+ [2023-03-03 15:32:24,733][90258] Num frames 6500...
953
+ [2023-03-03 15:32:24,786][90258] Num frames 6600...
954
+ [2023-03-03 15:32:24,836][90258] Num frames 6700...
955
+ [2023-03-03 15:32:24,886][90258] Num frames 6800...
956
+ [2023-03-03 15:32:24,937][90258] Num frames 6900...
957
+ [2023-03-03 15:32:24,987][90258] Num frames 7000...
958
+ [2023-03-03 15:32:25,050][90258] Avg episode rewards: #0: 24.312, true rewards: #0: 10.026
959
+ [2023-03-03 15:32:25,052][90258] Avg episode reward: 24.312, avg true_objective: 10.026
960
+ [2023-03-03 15:32:25,110][90258] Num frames 7100...
961
+ [2023-03-03 15:32:25,164][90258] Num frames 7200...
962
+ [2023-03-03 15:32:25,217][90258] Num frames 7300...
963
+ [2023-03-03 15:32:25,268][90258] Num frames 7400...
964
+ [2023-03-03 15:32:25,317][90258] Num frames 7500...
965
+ [2023-03-03 15:32:25,366][90258] Num frames 7600...
966
+ [2023-03-03 15:32:25,442][90258] Avg episode rewards: #0: 22.683, true rewards: #0: 9.557
967
+ [2023-03-03 15:32:25,443][90258] Avg episode reward: 22.683, avg true_objective: 9.557
968
+ [2023-03-03 15:32:25,474][90258] Num frames 7700...
969
+ [2023-03-03 15:32:25,529][90258] Num frames 7800...
970
+ [2023-03-03 15:32:25,583][90258] Num frames 7900...
971
+ [2023-03-03 15:32:25,636][90258] Num frames 8000...
972
+ [2023-03-03 15:32:25,687][90258] Num frames 8100...
973
+ [2023-03-03 15:32:25,736][90258] Num frames 8200...
974
+ [2023-03-03 15:32:25,786][90258] Num frames 8300...
975
+ [2023-03-03 15:32:25,834][90258] Num frames 8400...
976
+ [2023-03-03 15:32:25,886][90258] Num frames 8500...
977
+ [2023-03-03 15:32:25,936][90258] Num frames 8600...
978
+ [2023-03-03 15:32:25,985][90258] Num frames 8700...
979
+ [2023-03-03 15:32:26,037][90258] Num frames 8800...
980
+ [2023-03-03 15:32:26,088][90258] Num frames 8900...
981
+ [2023-03-03 15:32:26,138][90258] Num frames 9000...
982
+ [2023-03-03 15:32:26,189][90258] Num frames 9100...
983
+ [2023-03-03 15:32:26,241][90258] Num frames 9200...
984
+ [2023-03-03 15:32:26,297][90258] Num frames 9300...
985
+ [2023-03-03 15:32:26,351][90258] Num frames 9400...
986
+ [2023-03-03 15:32:26,403][90258] Num frames 9500...
987
+ [2023-03-03 15:32:26,455][90258] Num frames 9600...
988
+ [2023-03-03 15:32:26,505][90258] Num frames 9700...
989
+ [2023-03-03 15:32:26,584][90258] Avg episode rewards: #0: 25.718, true rewards: #0: 10.829
990
+ [2023-03-03 15:32:26,585][90258] Avg episode reward: 25.718, avg true_objective: 10.829
991
+ [2023-03-03 15:32:26,633][90258] Num frames 9800...
992
+ [2023-03-03 15:32:26,686][90258] Num frames 9900...
993
+ [2023-03-03 15:32:26,735][90258] Num frames 10000...
994
+ [2023-03-03 15:32:26,783][90258] Num frames 10100...
995
+ [2023-03-03 15:32:26,831][90258] Num frames 10200...
996
+ [2023-03-03 15:32:26,879][90258] Num frames 10300...
997
+ [2023-03-03 15:32:26,928][90258] Num frames 10400...
998
+ [2023-03-03 15:32:26,990][90258] Avg episode rewards: #0: 24.318, true rewards: #0: 10.418
999
+ [2023-03-03 15:32:26,990][90258] Avg episode reward: 24.318, avg true_objective: 10.418
1000
+ [2023-03-03 15:32:40,153][90258] Replay video saved to /home/lorencl/git/ReinforcementLearning/Lesson8/train_dir/default_experiment/replay.mp4!