michal512 commited on
Commit
e60ce52
1 Parent(s): ee2ec2f

Upload . with huggingface_hub

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1678279038.michal-H81M-S2H ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b82efe7d17648cf58921c9df807a59cae51c633925601ae4e95e8c994dc4c3
3
+ size 8894
.summary/0/events.out.tfevents.1678282286.michal-H81M-S2H ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19490d73b4edd3abf8e82aa2026d116475f9de0a069106f63fddab0fa6cfad35
3
+ size 104215
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ model-index:
8
+ - name: APPO
9
+ results:
10
+ - task:
11
+ type: reinforcement-learning
12
+ name: reinforcement-learning
13
+ dataset:
14
+ name: doom_health_gathering_supreme
15
+ type: doom_health_gathering_supreme
16
+ metrics:
17
+ - type: mean_reward
18
+ value: 8.85 +/- 2.91
19
+ name: mean_reward
20
+ verified: false
21
+ ---
22
+
23
+ A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r michal512/rl_course_vizdoom_health_gathering_supreme
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
checkpoint_p0/best_000000892_3653632_reward_22.082.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc8edb8ef64f85e4cd0dc6aa8962225fb5aca5eb431de88471f9fc7933b0c076
3
+ size 34924044
checkpoint_p0/checkpoint_000000081_331776.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e165aaa4a3b71056af9b8f96b9eda620f79ec570fc7b0a0167746b000e7228
3
+ size 34923980
checkpoint_p0/checkpoint_000000978_4005888.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b750d17d7dad1828db427c1c5e4a67c4030d382b18db4338839a8912f4a1533d
3
+ size 34924044
config.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/home/michal/programming/deep-rl-course/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "lr_adaptive_min": 1e-06,
50
+ "lr_adaptive_max": 0.01,
51
+ "obs_subtract_mean": 0.0,
52
+ "obs_scale": 255.0,
53
+ "normalize_input": true,
54
+ "normalize_input_keys": null,
55
+ "decorrelate_experience_max_seconds": 0,
56
+ "decorrelate_envs_on_one_worker": true,
57
+ "actor_worker_gpus": [],
58
+ "set_workers_cpu_affinity": true,
59
+ "force_envs_single_thread": false,
60
+ "default_niceness": 0,
61
+ "log_to_file": true,
62
+ "experiment_summaries_interval": 10,
63
+ "flush_summaries_interval": 30,
64
+ "stats_avg": 100,
65
+ "summaries_use_frameskip": true,
66
+ "heartbeat_interval": 20,
67
+ "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 4000000,
69
+ "train_for_seconds": 10000000000,
70
+ "save_every_sec": 120,
71
+ "keep_checkpoints": 2,
72
+ "load_checkpoint_kind": "latest",
73
+ "save_milestones_sec": -1,
74
+ "save_best_every_sec": 5,
75
+ "save_best_metric": "reward",
76
+ "save_best_after": 100000,
77
+ "benchmark": false,
78
+ "encoder_mlp_layers": [
79
+ 512,
80
+ 512
81
+ ],
82
+ "encoder_conv_architecture": "convnet_simple",
83
+ "encoder_conv_mlp_layers": [
84
+ 512
85
+ ],
86
+ "use_rnn": true,
87
+ "rnn_size": 512,
88
+ "rnn_type": "gru",
89
+ "rnn_num_layers": 1,
90
+ "decoder_mlp_layers": [],
91
+ "nonlinearity": "elu",
92
+ "policy_initialization": "orthogonal",
93
+ "policy_init_gain": 1.0,
94
+ "actor_critic_share_weights": true,
95
+ "adaptive_stddev": true,
96
+ "continuous_tanh_scale": 0.0,
97
+ "initial_stddev": 1.0,
98
+ "use_env_info_cache": false,
99
+ "env_gpu_actions": false,
100
+ "env_gpu_observations": true,
101
+ "env_frameskip": 4,
102
+ "env_framestack": 1,
103
+ "pixel_format": "CHW",
104
+ "use_record_episode_statistics": false,
105
+ "with_wandb": false,
106
+ "wandb_user": null,
107
+ "wandb_project": "sample_factory",
108
+ "wandb_group": null,
109
+ "wandb_job_type": "SF",
110
+ "wandb_tags": [],
111
+ "with_pbt": false,
112
+ "pbt_mix_policies_in_one_env": true,
113
+ "pbt_period_env_steps": 5000000,
114
+ "pbt_start_mutation": 20000000,
115
+ "pbt_replace_fraction": 0.3,
116
+ "pbt_mutation_rate": 0.15,
117
+ "pbt_replace_reward_gap": 0.1,
118
+ "pbt_replace_reward_gap_absolute": 1e-06,
119
+ "pbt_optimize_gamma": false,
120
+ "pbt_target_objective": "true_objective",
121
+ "pbt_perturb_min": 1.1,
122
+ "pbt_perturb_max": 1.5,
123
+ "num_agents": -1,
124
+ "num_humans": 0,
125
+ "num_bots": -1,
126
+ "start_bot_difficulty": null,
127
+ "timelimit": null,
128
+ "res_w": 128,
129
+ "res_h": 72,
130
+ "wide_aspect_ratio": false,
131
+ "eval_env_frameskip": 1,
132
+ "fps": 35,
133
+ "command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
134
+ "cli_args": {
135
+ "env": "doom_health_gathering_supreme",
136
+ "num_workers": 8,
137
+ "num_envs_per_worker": 4,
138
+ "train_for_env_steps": 4000000
139
+ },
140
+ "git_hash": "unknown",
141
+ "git_repo_name": "not a git repository"
142
+ }
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f176ceed95a756d25b321ddac54033a185ff0ae1330cc0d09616c31708f2a5e6
3
+ size 16487549
sf_log.txt ADDED
@@ -0,0 +1,1052 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-03-08 13:37:19,359][669675] Saving configuration to /home/michal/programming/deep-rl-course/train_dir/default_experiment/config.json...
2
+ [2023-03-08 13:37:19,359][669675] Rollout worker 0 uses device cpu
3
+ [2023-03-08 13:37:19,360][669675] Rollout worker 1 uses device cpu
4
+ [2023-03-08 13:37:19,360][669675] Rollout worker 2 uses device cpu
5
+ [2023-03-08 13:37:19,361][669675] Rollout worker 3 uses device cpu
6
+ [2023-03-08 13:37:19,361][669675] Rollout worker 4 uses device cpu
7
+ [2023-03-08 13:37:19,361][669675] Rollout worker 5 uses device cpu
8
+ [2023-03-08 13:37:19,362][669675] Rollout worker 6 uses device cpu
9
+ [2023-03-08 13:37:19,362][669675] Rollout worker 7 uses device cpu
10
+ [2023-03-08 13:37:19,411][669675] Using GPUs [0] for process 0 (actually maps to GPUs [0])
11
+ [2023-03-08 13:37:19,412][669675] InferenceWorker_p0-w0: min num requests: 2
12
+ [2023-03-08 13:37:19,430][669675] Starting all processes...
13
+ [2023-03-08 13:37:19,430][669675] Starting process learner_proc0
14
+ [2023-03-08 13:37:19,480][669675] Starting all processes...
15
+ [2023-03-08 13:37:19,484][669675] Starting process inference_proc0-0
16
+ [2023-03-08 13:37:19,485][669675] Starting process rollout_proc0
17
+ [2023-03-08 13:37:19,485][669675] Starting process rollout_proc1
18
+ [2023-03-08 13:37:19,486][669675] Starting process rollout_proc2
19
+ [2023-03-08 13:37:19,486][669675] Starting process rollout_proc3
20
+ [2023-03-08 13:37:19,486][669675] Starting process rollout_proc4
21
+ [2023-03-08 13:37:19,486][669675] Starting process rollout_proc5
22
+ [2023-03-08 13:37:19,486][669675] Starting process rollout_proc6
23
+ [2023-03-08 13:37:19,491][669675] Starting process rollout_proc7
24
+ [2023-03-08 13:37:20,414][670949] Using GPUs [0] for process 0 (actually maps to GPUs [0])
25
+ [2023-03-08 13:37:20,414][670949] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
26
+ [2023-03-08 13:37:20,419][670949] Num visible devices: 1
27
+ [2023-03-08 13:37:20,424][670962] Worker 0 uses CPU cores [0, 1]
28
+ [2023-03-08 13:37:20,444][670949] Starting seed is not provided
29
+ [2023-03-08 13:37:20,444][670949] Using GPUs [0] for process 0 (actually maps to GPUs [0])
30
+ [2023-03-08 13:37:20,445][670949] Initializing actor-critic model on device cuda:0
31
+ [2023-03-08 13:37:20,445][670949] RunningMeanStd input shape: (3, 72, 128)
32
+ [2023-03-08 13:37:20,445][670949] RunningMeanStd input shape: (1,)
33
+ [2023-03-08 13:37:20,462][670949] ConvEncoder: input_channels=3
34
+ [2023-03-08 13:37:20,464][670965] Worker 2 uses CPU cores [4, 5]
35
+ [2023-03-08 13:37:20,471][670963] Worker 1 uses CPU cores [2, 3]
36
+ [2023-03-08 13:37:20,556][670949] Conv encoder output size: 512
37
+ [2023-03-08 13:37:20,556][670949] Policy head output size: 512
38
+ [2023-03-08 13:37:20,565][670949] Created Actor Critic model with architecture:
39
+ [2023-03-08 13:37:20,566][670949] ActorCriticSharedWeights(
40
+ (obs_normalizer): ObservationNormalizer(
41
+ (running_mean_std): RunningMeanStdDictInPlace(
42
+ (running_mean_std): ModuleDict(
43
+ (obs): RunningMeanStdInPlace()
44
+ )
45
+ )
46
+ )
47
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
48
+ (encoder): VizdoomEncoder(
49
+ (basic_encoder): ConvEncoder(
50
+ (enc): RecursiveScriptModule(
51
+ original_name=ConvEncoderImpl
52
+ (conv_head): RecursiveScriptModule(
53
+ original_name=Sequential
54
+ (0): RecursiveScriptModule(original_name=Conv2d)
55
+ (1): RecursiveScriptModule(original_name=ELU)
56
+ (2): RecursiveScriptModule(original_name=Conv2d)
57
+ (3): RecursiveScriptModule(original_name=ELU)
58
+ (4): RecursiveScriptModule(original_name=Conv2d)
59
+ (5): RecursiveScriptModule(original_name=ELU)
60
+ )
61
+ (mlp_layers): RecursiveScriptModule(
62
+ original_name=Sequential
63
+ (0): RecursiveScriptModule(original_name=Linear)
64
+ (1): RecursiveScriptModule(original_name=ELU)
65
+ )
66
+ )
67
+ )
68
+ )
69
+ (core): ModelCoreRNN(
70
+ (core): GRU(512, 512)
71
+ )
72
+ (decoder): MlpDecoder(
73
+ (mlp): Identity()
74
+ )
75
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
76
+ (action_parameterization): ActionParameterizationDefault(
77
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
78
+ )
79
+ )
80
+ [2023-03-08 13:37:20,578][670985] Worker 7 uses CPU cores [14, 15]
81
+ [2023-03-08 13:37:20,579][670967] Worker 4 uses CPU cores [8, 9]
82
+ [2023-03-08 13:37:20,583][670969] Worker 6 uses CPU cores [12, 13]
83
+ [2023-03-08 13:37:20,601][670964] Using GPUs [0] for process 0 (actually maps to GPUs [0])
84
+ [2023-03-08 13:37:20,601][670964] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
85
+ [2023-03-08 13:37:20,608][670964] Num visible devices: 1
86
+ [2023-03-08 13:37:20,625][670968] Worker 5 uses CPU cores [10, 11]
87
+ [2023-03-08 13:37:20,652][670966] Worker 3 uses CPU cores [6, 7]
88
+ [2023-03-08 13:37:21,689][670949] Using optimizer <class 'torch.optim.adam.Adam'>
89
+ [2023-03-08 13:37:21,690][670949] No checkpoints found
90
+ [2023-03-08 13:37:21,690][670949] Did not load from checkpoint, starting from scratch!
91
+ [2023-03-08 13:37:21,690][670949] Initialized policy 0 weights for model version 0
92
+ [2023-03-08 13:37:21,692][670949] LearnerWorker_p0 finished initialization!
93
+ [2023-03-08 13:37:21,692][670949] Using GPUs [0] for process 0 (actually maps to GPUs [0])
94
+ [2023-03-08 13:37:21,762][670964] RunningMeanStd input shape: (3, 72, 128)
95
+ [2023-03-08 13:37:21,762][670964] RunningMeanStd input shape: (1,)
96
+ [2023-03-08 13:37:21,769][670964] ConvEncoder: input_channels=3
97
+ [2023-03-08 13:37:21,834][670964] Conv encoder output size: 512
98
+ [2023-03-08 13:37:21,834][670964] Policy head output size: 512
99
+ [2023-03-08 13:37:22,875][669675] Inference worker 0-0 is ready!
100
+ [2023-03-08 13:37:22,876][669675] All inference workers are ready! Signal rollout workers to start!
101
+ [2023-03-08 13:37:22,911][670963] Doom resolution: 160x120, resize resolution: (128, 72)
102
+ [2023-03-08 13:37:22,914][670965] Doom resolution: 160x120, resize resolution: (128, 72)
103
+ [2023-03-08 13:37:22,918][670969] Doom resolution: 160x120, resize resolution: (128, 72)
104
+ [2023-03-08 13:37:22,919][670968] Doom resolution: 160x120, resize resolution: (128, 72)
105
+ [2023-03-08 13:37:22,928][670985] Doom resolution: 160x120, resize resolution: (128, 72)
106
+ [2023-03-08 13:37:22,928][670966] Doom resolution: 160x120, resize resolution: (128, 72)
107
+ [2023-03-08 13:37:22,929][670962] Doom resolution: 160x120, resize resolution: (128, 72)
108
+ [2023-03-08 13:37:22,929][670967] Doom resolution: 160x120, resize resolution: (128, 72)
109
+ [2023-03-08 13:37:23,122][670962] Decorrelating experience for 0 frames...
110
+ [2023-03-08 13:37:23,122][670963] Decorrelating experience for 0 frames...
111
+ [2023-03-08 13:37:23,122][670985] Decorrelating experience for 0 frames...
112
+ [2023-03-08 13:37:23,123][670965] Decorrelating experience for 0 frames...
113
+ [2023-03-08 13:37:23,123][670969] Decorrelating experience for 0 frames...
114
+ [2023-03-08 13:37:23,257][670969] Decorrelating experience for 32 frames...
115
+ [2023-03-08 13:37:23,258][670965] Decorrelating experience for 32 frames...
116
+ [2023-03-08 13:37:23,266][670963] Decorrelating experience for 32 frames...
117
+ [2023-03-08 13:37:23,272][669675] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
118
+ [2023-03-08 13:37:23,306][670985] Decorrelating experience for 32 frames...
119
+ [2023-03-08 13:37:23,306][670967] Decorrelating experience for 0 frames...
120
+ [2023-03-08 13:37:23,346][670966] Decorrelating experience for 0 frames...
121
+ [2023-03-08 13:37:23,457][670965] Decorrelating experience for 64 frames...
122
+ [2023-03-08 13:37:23,458][670985] Decorrelating experience for 64 frames...
123
+ [2023-03-08 13:37:23,466][670967] Decorrelating experience for 32 frames...
124
+ [2023-03-08 13:37:23,466][670969] Decorrelating experience for 64 frames...
125
+ [2023-03-08 13:37:23,479][670966] Decorrelating experience for 32 frames...
126
+ [2023-03-08 13:37:23,544][670962] Decorrelating experience for 32 frames...
127
+ [2023-03-08 13:37:23,621][670985] Decorrelating experience for 96 frames...
128
+ [2023-03-08 13:37:23,627][670965] Decorrelating experience for 96 frames...
129
+ [2023-03-08 13:37:23,635][670967] Decorrelating experience for 64 frames...
130
+ [2023-03-08 13:37:23,666][670969] Decorrelating experience for 96 frames...
131
+ [2023-03-08 13:37:23,804][670963] Decorrelating experience for 64 frames...
132
+ [2023-03-08 13:37:23,829][670966] Decorrelating experience for 64 frames...
133
+ [2023-03-08 13:37:23,854][670967] Decorrelating experience for 96 frames...
134
+ [2023-03-08 13:37:23,882][670962] Decorrelating experience for 64 frames...
135
+ [2023-03-08 13:37:24,039][670968] Decorrelating experience for 0 frames...
136
+ [2023-03-08 13:37:24,042][670963] Decorrelating experience for 96 frames...
137
+ [2023-03-08 13:37:24,056][670966] Decorrelating experience for 96 frames...
138
+ [2023-03-08 13:37:24,099][670962] Decorrelating experience for 96 frames...
139
+ [2023-03-08 13:37:24,216][670949] Signal inference workers to stop experience collection...
140
+ [2023-03-08 13:37:24,218][670964] InferenceWorker_p0-w0: stopping experience collection
141
+ [2023-03-08 13:37:24,232][670968] Decorrelating experience for 32 frames...
142
+ [2023-03-08 13:37:24,388][670968] Decorrelating experience for 64 frames...
143
+ [2023-03-08 13:37:24,475][670949] Signal inference workers to resume experience collection...
144
+ [2023-03-08 13:37:24,475][670964] InferenceWorker_p0-w0: resuming experience collection
145
+ [2023-03-08 13:37:24,533][670968] Decorrelating experience for 96 frames...
146
+ [2023-03-08 13:37:25,922][670964] Updated weights for policy 0, policy_version 10 (0.0168)
147
+ [2023-03-08 13:37:27,103][670964] Updated weights for policy 0, policy_version 20 (0.0006)
148
+ [2023-03-08 13:37:28,272][669675] Fps is (10 sec: 23756.6, 60 sec: 23756.6, 300 sec: 23756.6). Total num frames: 118784. Throughput: 0: 2183.2. Samples: 10916. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
149
+ [2023-03-08 13:37:28,273][669675] Avg episode reward: [(0, '4.545')]
150
+ [2023-03-08 13:37:28,277][670949] Saving new best policy, reward=4.545!
151
+ [2023-03-08 13:37:28,372][670964] Updated weights for policy 0, policy_version 30 (0.0007)
152
+ [2023-03-08 13:37:29,596][670964] Updated weights for policy 0, policy_version 40 (0.0006)
153
+ [2023-03-08 13:37:30,793][670964] Updated weights for policy 0, policy_version 50 (0.0006)
154
+ [2023-03-08 13:37:32,060][670964] Updated weights for policy 0, policy_version 60 (0.0006)
155
+ [2023-03-08 13:37:33,231][670964] Updated weights for policy 0, policy_version 70 (0.0006)
156
+ [2023-03-08 13:37:33,272][669675] Fps is (10 sec: 28672.1, 60 sec: 28672.1, 300 sec: 28672.1). Total num frames: 286720. Throughput: 0: 6086.0. Samples: 60860. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
157
+ [2023-03-08 13:37:33,273][669675] Avg episode reward: [(0, '4.602')]
158
+ [2023-03-08 13:37:33,274][670949] Saving new best policy, reward=4.602!
159
+ [2023-03-08 13:37:34,475][670964] Updated weights for policy 0, policy_version 80 (0.0006)
160
+ [2023-03-08 13:37:34,573][670969] EvtLoop [rollout_proc6_evt_loop, process=rollout_proc6] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance6'), args=(0, 0)
161
+ Traceback (most recent call last):
162
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
163
+ slot_callable(*args)
164
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
165
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
166
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
167
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
168
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
169
+ return self.env.step(action)
170
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
171
+ obs, rew, terminated, truncated, info = self.env.step(action)
172
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
173
+ obs, rew, terminated, truncated, info = self.env.step(action)
174
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
175
+ observation, reward, terminated, truncated, info = self.env.step(action)
176
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
177
+ observation, reward, terminated, truncated, info = self.env.step(action)
178
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
179
+ obs, reward, terminated, truncated, info = self.env.step(action)
180
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
181
+ return self.env.step(action)
182
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
183
+ obs, reward, terminated, truncated, info = self.env.step(action)
184
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
185
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
186
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
187
+ [2023-03-08 13:37:34,575][670969] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc6_evt_loop
188
+ [2023-03-08 13:37:34,577][670965] EvtLoop [rollout_proc2_evt_loop, process=rollout_proc2] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance2'), args=(1, 0)
189
+ Traceback (most recent call last):
190
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
191
+ slot_callable(*args)
192
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
193
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
194
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
195
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
196
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
197
+ return self.env.step(action)
198
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
199
+ obs, rew, terminated, truncated, info = self.env.step(action)
200
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
201
+ obs, rew, terminated, truncated, info = self.env.step(action)
202
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
203
+ observation, reward, terminated, truncated, info = self.env.step(action)
204
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
205
+ observation, reward, terminated, truncated, info = self.env.step(action)
206
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
207
+ obs, reward, terminated, truncated, info = self.env.step(action)
208
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
209
+ return self.env.step(action)
210
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
211
+ obs, reward, terminated, truncated, info = self.env.step(action)
212
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
213
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
214
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
215
+ [2023-03-08 13:37:34,577][670963] EvtLoop [rollout_proc1_evt_loop, process=rollout_proc1] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance1'), args=(1, 0)
216
+ Traceback (most recent call last):
217
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
218
+ slot_callable(*args)
219
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
220
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
221
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
222
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
223
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
224
+ return self.env.step(action)
225
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
226
+ obs, rew, terminated, truncated, info = self.env.step(action)
227
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
228
+ obs, rew, terminated, truncated, info = self.env.step(action)
229
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
230
+ observation, reward, terminated, truncated, info = self.env.step(action)
231
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
232
+ observation, reward, terminated, truncated, info = self.env.step(action)
233
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
234
+ obs, reward, terminated, truncated, info = self.env.step(action)
235
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
236
+ return self.env.step(action)
237
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
238
+ obs, reward, terminated, truncated, info = self.env.step(action)
239
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
240
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
241
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
242
+ [2023-03-08 13:37:34,579][670965] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc2_evt_loop
243
+ [2023-03-08 13:37:34,579][670963] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc1_evt_loop
244
+ [2023-03-08 13:37:34,579][670968] EvtLoop [rollout_proc5_evt_loop, process=rollout_proc5] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance5'), args=(0, 0)
245
+ Traceback (most recent call last):
246
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
247
+ slot_callable(*args)
248
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
249
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
250
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
251
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
252
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
253
+ return self.env.step(action)
254
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
255
+ obs, rew, terminated, truncated, info = self.env.step(action)
256
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
257
+ obs, rew, terminated, truncated, info = self.env.step(action)
258
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
259
+ observation, reward, terminated, truncated, info = self.env.step(action)
260
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
261
+ observation, reward, terminated, truncated, info = self.env.step(action)
262
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
263
+ obs, reward, terminated, truncated, info = self.env.step(action)
264
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
265
+ return self.env.step(action)
266
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
267
+ obs, reward, terminated, truncated, info = self.env.step(action)
268
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
269
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
270
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
271
+ [2023-03-08 13:37:34,580][670985] EvtLoop [rollout_proc7_evt_loop, process=rollout_proc7] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance7'), args=(0, 0)
272
+ Traceback (most recent call last):
273
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
274
+ slot_callable(*args)
275
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
276
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
277
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
278
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
279
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
280
+ return self.env.step(action)
281
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
282
+ obs, rew, terminated, truncated, info = self.env.step(action)
283
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
284
+ obs, rew, terminated, truncated, info = self.env.step(action)
285
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
286
+ observation, reward, terminated, truncated, info = self.env.step(action)
287
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
288
+ observation, reward, terminated, truncated, info = self.env.step(action)
289
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
290
+ obs, reward, terminated, truncated, info = self.env.step(action)
291
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
292
+ return self.env.step(action)
293
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
294
+ obs, reward, terminated, truncated, info = self.env.step(action)
295
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
296
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
297
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
298
+ [2023-03-08 13:37:34,581][670968] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc5_evt_loop
299
+ [2023-03-08 13:37:34,581][670985] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc7_evt_loop
300
+ [2023-03-08 13:37:34,580][669675] Keyboard interrupt detected in the event loop EvtLoop [Runner_EvtLoop, process=main process 669675], exiting...
301
+ [2023-03-08 13:37:34,580][670966] EvtLoop [rollout_proc3_evt_loop, process=rollout_proc3] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance3'), args=(0, 0)
302
+ Traceback (most recent call last):
303
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
304
+ slot_callable(*args)
305
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
306
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
307
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
308
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
309
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
310
+ return self.env.step(action)
311
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
312
+ obs, rew, terminated, truncated, info = self.env.step(action)
313
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
314
+ obs, rew, terminated, truncated, info = self.env.step(action)
315
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
316
+ observation, reward, terminated, truncated, info = self.env.step(action)
317
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
318
+ observation, reward, terminated, truncated, info = self.env.step(action)
319
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
320
+ obs, reward, terminated, truncated, info = self.env.step(action)
321
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
322
+ return self.env.step(action)
323
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
324
+ obs, reward, terminated, truncated, info = self.env.step(action)
325
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
326
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
327
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
328
+ [2023-03-08 13:37:34,582][670966] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc3_evt_loop
329
+ [2023-03-08 13:37:34,582][670967] EvtLoop [rollout_proc4_evt_loop, process=rollout_proc4] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance4'), args=(1, 0)
330
+ Traceback (most recent call last):
331
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
332
+ slot_callable(*args)
333
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
334
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
335
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
336
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
337
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
338
+ return self.env.step(action)
339
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
340
+ obs, rew, terminated, truncated, info = self.env.step(action)
341
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
342
+ obs, rew, terminated, truncated, info = self.env.step(action)
343
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
344
+ observation, reward, terminated, truncated, info = self.env.step(action)
345
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
346
+ observation, reward, terminated, truncated, info = self.env.step(action)
347
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
348
+ obs, reward, terminated, truncated, info = self.env.step(action)
349
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
350
+ return self.env.step(action)
351
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
352
+ obs, reward, terminated, truncated, info = self.env.step(action)
353
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
354
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
355
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
356
+ [2023-03-08 13:37:34,585][670949] Stopping Batcher_0...
357
+ [2023-03-08 13:37:34,585][670949] Loop batcher_evt_loop terminating...
358
+ [2023-03-08 13:37:34,584][669675] Runner profile tree view:
359
+ main_loop: 15.1547
360
+ [2023-03-08 13:37:34,587][670967] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc4_evt_loop
361
+ [2023-03-08 13:37:34,586][669675] Collected {0: 327680}, FPS: 21622.4
362
+ [2023-03-08 13:37:34,597][670949] Saving /home/michal/programming/deep-rl-course/train_dir/default_experiment/checkpoint_p0/checkpoint_000000081_331776.pth...
363
+ [2023-03-08 13:37:34,600][670962] EvtLoop [rollout_proc0_evt_loop, process=rollout_proc0] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance0'), args=(1, 0)
364
+ Traceback (most recent call last):
365
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/signal_slot/signal_slot.py", line 355, in _process_signal
366
+ slot_callable(*args)
367
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts
368
+ complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing)
369
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts
370
+ new_obs, rewards, terminated, truncated, infos = e.step(actions)
371
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
372
+ return self.env.step(action)
373
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 129, in step
374
+ obs, rew, terminated, truncated, info = self.env.step(action)
375
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/algo/utils/make_env.py", line 115, in step
376
+ obs, rew, terminated, truncated, info = self.env.step(action)
377
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step
378
+ observation, reward, terminated, truncated, info = self.env.step(action)
379
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 384, in step
380
+ observation, reward, terminated, truncated, info = self.env.step(action)
381
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sample_factory/envs/env_wrappers.py", line 88, in step
382
+ obs, reward, terminated, truncated, info = self.env.step(action)
383
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/gym/core.py", line 319, in step
384
+ return self.env.step(action)
385
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step
386
+ obs, reward, terminated, truncated, info = self.env.step(action)
387
+ File "/home/michal/anaconda3/envs/deep-rl/lib/python3.9/site-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step
388
+ reward = self.game.make_action(actions_flattened, self.skip_frames)
389
+ vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed.
390
+ [2023-03-08 13:37:34,602][670962] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc0_evt_loop
391
+ [2023-03-08 13:37:34,654][670964] Weights refcount: 2 0
392
+ [2023-03-08 13:37:34,661][670964] Stopping InferenceWorker_p0-w0...
393
+ [2023-03-08 13:37:34,662][670964] Loop inference_proc0-0_evt_loop terminating...
394
+ [2023-03-08 13:37:34,697][670949] Stopping LearnerWorker_p0...
395
+ [2023-03-08 13:37:34,697][670949] Loop learner_proc0_evt_loop terminating...
396
+ [2023-03-08 14:31:27,767][671990] Saving configuration to /home/michal/programming/deep-rl-course/train_dir/default_experiment/config.json...
397
+ [2023-03-08 14:31:27,768][671990] Rollout worker 0 uses device cpu
398
+ [2023-03-08 14:31:27,768][671990] Rollout worker 1 uses device cpu
399
+ [2023-03-08 14:31:27,768][671990] Rollout worker 2 uses device cpu
400
+ [2023-03-08 14:31:27,768][671990] Rollout worker 3 uses device cpu
401
+ [2023-03-08 14:31:27,769][671990] Rollout worker 4 uses device cpu
402
+ [2023-03-08 14:31:27,769][671990] Rollout worker 5 uses device cpu
403
+ [2023-03-08 14:31:27,769][671990] Rollout worker 6 uses device cpu
404
+ [2023-03-08 14:31:27,770][671990] Rollout worker 7 uses device cpu
405
+ [2023-03-08 14:31:27,820][671990] Using GPUs [0] for process 0 (actually maps to GPUs [0])
406
+ [2023-03-08 14:31:27,821][671990] InferenceWorker_p0-w0: min num requests: 2
407
+ [2023-03-08 14:31:27,841][671990] Starting all processes...
408
+ [2023-03-08 14:31:27,842][671990] Starting process learner_proc0
409
+ [2023-03-08 14:31:27,891][671990] Starting all processes...
410
+ [2023-03-08 14:31:27,895][671990] Starting process inference_proc0-0
411
+ [2023-03-08 14:31:27,896][671990] Starting process rollout_proc0
412
+ [2023-03-08 14:31:27,896][671990] Starting process rollout_proc1
413
+ [2023-03-08 14:31:27,896][671990] Starting process rollout_proc2
414
+ [2023-03-08 14:31:27,897][671990] Starting process rollout_proc3
415
+ [2023-03-08 14:31:27,899][671990] Starting process rollout_proc4
416
+ [2023-03-08 14:31:27,899][671990] Starting process rollout_proc5
417
+ [2023-03-08 14:31:27,899][671990] Starting process rollout_proc6
418
+ [2023-03-08 14:31:27,904][671990] Starting process rollout_proc7
419
+ [2023-03-08 14:31:28,692][682716] Using GPUs [0] for process 0 (actually maps to GPUs [0])
420
+ [2023-03-08 14:31:28,692][682716] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
421
+ [2023-03-08 14:31:28,705][682716] Num visible devices: 1
422
+ [2023-03-08 14:31:28,732][682716] Starting seed is not provided
423
+ [2023-03-08 14:31:28,732][682716] Using GPUs [0] for process 0 (actually maps to GPUs [0])
424
+ [2023-03-08 14:31:28,732][682716] Initializing actor-critic model on device cuda:0
425
+ [2023-03-08 14:31:28,733][682716] RunningMeanStd input shape: (3, 72, 128)
426
+ [2023-03-08 14:31:28,733][682716] RunningMeanStd input shape: (1,)
427
+ [2023-03-08 14:31:28,745][682716] ConvEncoder: input_channels=3
428
+ [2023-03-08 14:31:28,799][682729] Using GPUs [0] for process 0 (actually maps to GPUs [0])
429
+ [2023-03-08 14:31:28,799][682729] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
430
+ [2023-03-08 14:31:28,803][682729] Num visible devices: 1
431
+ [2023-03-08 14:31:28,820][682747] Worker 3 uses CPU cores [6, 7]
432
+ [2023-03-08 14:31:28,826][682746] Worker 1 uses CPU cores [2, 3]
433
+ [2023-03-08 14:31:28,828][682749] Worker 4 uses CPU cores [8, 9]
434
+ [2023-03-08 14:31:28,829][682730] Worker 0 uses CPU cores [0, 1]
435
+ [2023-03-08 14:31:28,829][682748] Worker 2 uses CPU cores [4, 5]
436
+ [2023-03-08 14:31:28,845][682716] Conv encoder output size: 512
437
+ [2023-03-08 14:31:28,846][682716] Policy head output size: 512
438
+ [2023-03-08 14:31:28,854][682716] Created Actor Critic model with architecture:
439
+ [2023-03-08 14:31:28,855][682716] ActorCriticSharedWeights(
440
+ (obs_normalizer): ObservationNormalizer(
441
+ (running_mean_std): RunningMeanStdDictInPlace(
442
+ (running_mean_std): ModuleDict(
443
+ (obs): RunningMeanStdInPlace()
444
+ )
445
+ )
446
+ )
447
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
448
+ (encoder): VizdoomEncoder(
449
+ (basic_encoder): ConvEncoder(
450
+ (enc): RecursiveScriptModule(
451
+ original_name=ConvEncoderImpl
452
+ (conv_head): RecursiveScriptModule(
453
+ original_name=Sequential
454
+ (0): RecursiveScriptModule(original_name=Conv2d)
455
+ (1): RecursiveScriptModule(original_name=ELU)
456
+ (2): RecursiveScriptModule(original_name=Conv2d)
457
+ (3): RecursiveScriptModule(original_name=ELU)
458
+ (4): RecursiveScriptModule(original_name=Conv2d)
459
+ (5): RecursiveScriptModule(original_name=ELU)
460
+ )
461
+ (mlp_layers): RecursiveScriptModule(
462
+ original_name=Sequential
463
+ (0): RecursiveScriptModule(original_name=Linear)
464
+ (1): RecursiveScriptModule(original_name=ELU)
465
+ )
466
+ )
467
+ )
468
+ )
469
+ (core): ModelCoreRNN(
470
+ (core): GRU(512, 512)
471
+ )
472
+ (decoder): MlpDecoder(
473
+ (mlp): Identity()
474
+ )
475
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
476
+ (action_parameterization): ActionParameterizationDefault(
477
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
478
+ )
479
+ )
480
+ [2023-03-08 14:31:28,857][682752] Worker 7 uses CPU cores [14, 15]
481
+ [2023-03-08 14:31:28,878][682751] Worker 6 uses CPU cores [12, 13]
482
+ [2023-03-08 14:31:28,925][682750] Worker 5 uses CPU cores [10, 11]
483
+ [2023-03-08 14:31:29,909][682716] Using optimizer <class 'torch.optim.adam.Adam'>
484
+ [2023-03-08 14:31:29,910][682716] Loading state from checkpoint /home/michal/programming/deep-rl-course/train_dir/default_experiment/checkpoint_p0/checkpoint_000000081_331776.pth...
485
+ [2023-03-08 14:31:29,925][682716] Loading model from checkpoint
486
+ [2023-03-08 14:31:29,927][682716] Loaded experiment state at self.train_step=81, self.env_steps=331776
487
+ [2023-03-08 14:31:29,928][682716] Initialized policy 0 weights for model version 81
488
+ [2023-03-08 14:31:29,929][682716] LearnerWorker_p0 finished initialization!
489
+ [2023-03-08 14:31:29,929][682716] Using GPUs [0] for process 0 (actually maps to GPUs [0])
490
+ [2023-03-08 14:31:29,991][682729] RunningMeanStd input shape: (3, 72, 128)
491
+ [2023-03-08 14:31:29,992][682729] RunningMeanStd input shape: (1,)
492
+ [2023-03-08 14:31:29,998][682729] ConvEncoder: input_channels=3
493
+ [2023-03-08 14:31:30,058][682729] Conv encoder output size: 512
494
+ [2023-03-08 14:31:30,058][682729] Policy head output size: 512
495
+ [2023-03-08 14:31:30,993][671990] Inference worker 0-0 is ready!
496
+ [2023-03-08 14:31:30,994][671990] All inference workers are ready! Signal rollout workers to start!
497
+ [2023-03-08 14:31:31,028][682747] Doom resolution: 160x120, resize resolution: (128, 72)
498
+ [2023-03-08 14:31:31,028][682746] Doom resolution: 160x120, resize resolution: (128, 72)
499
+ [2023-03-08 14:31:31,029][682750] Doom resolution: 160x120, resize resolution: (128, 72)
500
+ [2023-03-08 14:31:31,029][682752] Doom resolution: 160x120, resize resolution: (128, 72)
501
+ [2023-03-08 14:31:31,040][682730] Doom resolution: 160x120, resize resolution: (128, 72)
502
+ [2023-03-08 14:31:31,040][682749] Doom resolution: 160x120, resize resolution: (128, 72)
503
+ [2023-03-08 14:31:31,040][682751] Doom resolution: 160x120, resize resolution: (128, 72)
504
+ [2023-03-08 14:31:31,041][682748] Doom resolution: 160x120, resize resolution: (128, 72)
505
+ [2023-03-08 14:31:31,217][682752] Decorrelating experience for 0 frames...
506
+ [2023-03-08 14:31:31,218][682748] Decorrelating experience for 0 frames...
507
+ [2023-03-08 14:31:31,220][682750] Decorrelating experience for 0 frames...
508
+ [2023-03-08 14:31:31,220][682746] Decorrelating experience for 0 frames...
509
+ [2023-03-08 14:31:31,345][682746] Decorrelating experience for 32 frames...
510
+ [2023-03-08 14:31:31,345][682752] Decorrelating experience for 32 frames...
511
+ [2023-03-08 14:31:31,345][682750] Decorrelating experience for 32 frames...
512
+ [2023-03-08 14:31:31,351][682751] Decorrelating experience for 0 frames...
513
+ [2023-03-08 14:31:31,409][682748] Decorrelating experience for 32 frames...
514
+ [2023-03-08 14:31:31,507][682751] Decorrelating experience for 32 frames...
515
+ [2023-03-08 14:31:31,508][682750] Decorrelating experience for 64 frames...
516
+ [2023-03-08 14:31:31,553][682747] Decorrelating experience for 0 frames...
517
+ [2023-03-08 14:31:31,563][682752] Decorrelating experience for 64 frames...
518
+ [2023-03-08 14:31:31,568][682746] Decorrelating experience for 64 frames...
519
+ [2023-03-08 14:31:31,661][682751] Decorrelating experience for 64 frames...
520
+ [2023-03-08 14:31:31,710][682752] Decorrelating experience for 96 frames...
521
+ [2023-03-08 14:31:31,710][682730] Decorrelating experience for 0 frames...
522
+ [2023-03-08 14:31:31,711][682750] Decorrelating experience for 96 frames...
523
+ [2023-03-08 14:31:31,725][682746] Decorrelating experience for 96 frames...
524
+ [2023-03-08 14:31:31,800][671990] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 331776. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
525
+ [2023-03-08 14:31:31,830][682751] Decorrelating experience for 96 frames...
526
+ [2023-03-08 14:31:31,864][682749] Decorrelating experience for 0 frames...
527
+ [2023-03-08 14:31:31,866][682747] Decorrelating experience for 32 frames...
528
+ [2023-03-08 14:31:32,054][682747] Decorrelating experience for 64 frames...
529
+ [2023-03-08 14:31:32,072][682730] Decorrelating experience for 32 frames...
530
+ [2023-03-08 14:31:32,077][682749] Decorrelating experience for 32 frames...
531
+ [2023-03-08 14:31:32,211][682716] Signal inference workers to stop experience collection...
532
+ [2023-03-08 14:31:32,214][682729] InferenceWorker_p0-w0: stopping experience collection
533
+ [2023-03-08 14:31:32,246][682747] Decorrelating experience for 96 frames...
534
+ [2023-03-08 14:31:32,253][682730] Decorrelating experience for 64 frames...
535
+ [2023-03-08 14:31:32,256][682749] Decorrelating experience for 64 frames...
536
+ [2023-03-08 14:31:32,286][682748] Decorrelating experience for 64 frames...
537
+ [2023-03-08 14:31:32,372][682716] Signal inference workers to resume experience collection...
538
+ [2023-03-08 14:31:32,372][682729] InferenceWorker_p0-w0: resuming experience collection
539
+ [2023-03-08 14:31:32,434][682748] Decorrelating experience for 96 frames...
540
+ [2023-03-08 14:31:32,453][682749] Decorrelating experience for 96 frames...
541
+ [2023-03-08 14:31:32,454][682730] Decorrelating experience for 96 frames...
542
+ [2023-03-08 14:31:33,655][682729] Updated weights for policy 0, policy_version 91 (0.0140)
543
+ [2023-03-08 14:31:34,792][682729] Updated weights for policy 0, policy_version 101 (0.0005)
544
+ [2023-03-08 14:31:35,898][682729] Updated weights for policy 0, policy_version 111 (0.0005)
545
+ [2023-03-08 14:31:36,800][671990] Fps is (10 sec: 31129.4, 60 sec: 31129.4, 300 sec: 31129.4). Total num frames: 487424. Throughput: 0: 2831.6. Samples: 14158. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
546
+ [2023-03-08 14:31:36,801][671990] Avg episode reward: [(0, '4.411')]
547
+ [2023-03-08 14:31:36,994][682729] Updated weights for policy 0, policy_version 121 (0.0006)
548
+ [2023-03-08 14:31:38,163][682729] Updated weights for policy 0, policy_version 131 (0.0006)
549
+ [2023-03-08 14:31:39,276][682729] Updated weights for policy 0, policy_version 141 (0.0006)
550
+ [2023-03-08 14:31:40,399][682729] Updated weights for policy 0, policy_version 151 (0.0005)
551
+ [2023-03-08 14:31:41,488][682729] Updated weights for policy 0, policy_version 161 (0.0006)
552
+ [2023-03-08 14:31:41,800][671990] Fps is (10 sec: 33587.2, 60 sec: 33587.2, 300 sec: 33587.2). Total num frames: 667648. Throughput: 0: 6863.0. Samples: 68630. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
553
+ [2023-03-08 14:31:41,801][671990] Avg episode reward: [(0, '4.328')]
554
+ [2023-03-08 14:31:42,632][682729] Updated weights for policy 0, policy_version 171 (0.0006)
555
+ [2023-03-08 14:31:43,767][682729] Updated weights for policy 0, policy_version 181 (0.0006)
556
+ [2023-03-08 14:31:44,894][682729] Updated weights for policy 0, policy_version 191 (0.0005)
557
+ [2023-03-08 14:31:46,000][682729] Updated weights for policy 0, policy_version 201 (0.0006)
558
+ [2023-03-08 14:31:46,800][671990] Fps is (10 sec: 36454.3, 60 sec: 34679.3, 300 sec: 34679.3). Total num frames: 851968. Throughput: 0: 8238.5. Samples: 123578. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
559
+ [2023-03-08 14:31:46,801][671990] Avg episode reward: [(0, '4.684')]
560
+ [2023-03-08 14:31:46,804][682716] Saving new best policy, reward=4.684!
561
+ [2023-03-08 14:31:47,159][682729] Updated weights for policy 0, policy_version 211 (0.0006)
562
+ [2023-03-08 14:31:47,814][671990] Heartbeat connected on Batcher_0
563
+ [2023-03-08 14:31:47,824][671990] Heartbeat connected on InferenceWorker_p0-w0
564
+ [2023-03-08 14:31:47,825][671990] Heartbeat connected on RolloutWorker_w0
565
+ [2023-03-08 14:31:47,828][671990] Heartbeat connected on RolloutWorker_w1
566
+ [2023-03-08 14:31:47,830][671990] Heartbeat connected on RolloutWorker_w2
567
+ [2023-03-08 14:31:47,832][671990] Heartbeat connected on RolloutWorker_w3
568
+ [2023-03-08 14:31:47,835][671990] Heartbeat connected on RolloutWorker_w4
569
+ [2023-03-08 14:31:47,837][671990] Heartbeat connected on RolloutWorker_w5
570
+ [2023-03-08 14:31:47,839][671990] Heartbeat connected on RolloutWorker_w6
571
+ [2023-03-08 14:31:47,840][671990] Heartbeat connected on LearnerWorker_p0
572
+ [2023-03-08 14:31:47,845][671990] Heartbeat connected on RolloutWorker_w7
573
+ [2023-03-08 14:31:48,287][682729] Updated weights for policy 0, policy_version 221 (0.0006)
574
+ [2023-03-08 14:31:49,409][682729] Updated weights for policy 0, policy_version 231 (0.0006)
575
+ [2023-03-08 14:31:50,502][682729] Updated weights for policy 0, policy_version 241 (0.0005)
576
+ [2023-03-08 14:31:51,642][682729] Updated weights for policy 0, policy_version 251 (0.0005)
577
+ [2023-03-08 14:31:51,800][671990] Fps is (10 sec: 36453.9, 60 sec: 35020.6, 300 sec: 35020.6). Total num frames: 1032192. Throughput: 0: 7529.3. Samples: 150588. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
578
+ [2023-03-08 14:31:51,801][671990] Avg episode reward: [(0, '4.603')]
579
+ [2023-03-08 14:31:52,750][682729] Updated weights for policy 0, policy_version 261 (0.0006)
580
+ [2023-03-08 14:31:53,928][682729] Updated weights for policy 0, policy_version 271 (0.0005)
581
+ [2023-03-08 14:31:55,044][682729] Updated weights for policy 0, policy_version 281 (0.0006)
582
+ [2023-03-08 14:31:56,141][682729] Updated weights for policy 0, policy_version 291 (0.0006)
583
+ [2023-03-08 14:31:56,800][671990] Fps is (10 sec: 36044.9, 60 sec: 35225.5, 300 sec: 35225.5). Total num frames: 1212416. Throughput: 0: 8215.5. Samples: 205388. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
584
+ [2023-03-08 14:31:56,801][671990] Avg episode reward: [(0, '4.862')]
585
+ [2023-03-08 14:31:56,808][682716] Saving new best policy, reward=4.862!
586
+ [2023-03-08 14:31:57,261][682729] Updated weights for policy 0, policy_version 301 (0.0006)
587
+ [2023-03-08 14:31:58,382][682729] Updated weights for policy 0, policy_version 311 (0.0005)
588
+ [2023-03-08 14:31:59,477][682729] Updated weights for policy 0, policy_version 321 (0.0005)
589
+ [2023-03-08 14:32:00,577][682729] Updated weights for policy 0, policy_version 331 (0.0006)
590
+ [2023-03-08 14:32:01,700][682729] Updated weights for policy 0, policy_version 341 (0.0005)
591
+ [2023-03-08 14:32:01,800][671990] Fps is (10 sec: 36454.7, 60 sec: 35498.6, 300 sec: 35498.6). Total num frames: 1396736. Throughput: 0: 8691.4. Samples: 260742. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
592
+ [2023-03-08 14:32:01,802][671990] Avg episode reward: [(0, '5.818')]
593
+ [2023-03-08 14:32:01,815][682716] Saving new best policy, reward=5.818!
594
+ [2023-03-08 14:32:02,784][682729] Updated weights for policy 0, policy_version 351 (0.0006)
595
+ [2023-03-08 14:32:03,870][682729] Updated weights for policy 0, policy_version 361 (0.0006)
596
+ [2023-03-08 14:32:04,975][682729] Updated weights for policy 0, policy_version 371 (0.0006)
597
+ [2023-03-08 14:32:06,069][682729] Updated weights for policy 0, policy_version 381 (0.0005)
598
+ [2023-03-08 14:32:06,800][671990] Fps is (10 sec: 37273.5, 60 sec: 35810.7, 300 sec: 35810.7). Total num frames: 1585152. Throughput: 0: 8248.6. Samples: 288702. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
599
+ [2023-03-08 14:32:06,801][671990] Avg episode reward: [(0, '6.887')]
600
+ [2023-03-08 14:32:06,819][682716] Saving new best policy, reward=6.887!
601
+ [2023-03-08 14:32:07,149][682729] Updated weights for policy 0, policy_version 391 (0.0006)
602
+ [2023-03-08 14:32:08,242][682729] Updated weights for policy 0, policy_version 401 (0.0006)
603
+ [2023-03-08 14:32:09,314][682729] Updated weights for policy 0, policy_version 411 (0.0005)
604
+ [2023-03-08 14:32:10,390][682729] Updated weights for policy 0, policy_version 421 (0.0005)
605
+ [2023-03-08 14:32:11,471][682729] Updated weights for policy 0, policy_version 431 (0.0005)
606
+ [2023-03-08 14:32:11,800][671990] Fps is (10 sec: 37683.4, 60 sec: 36044.8, 300 sec: 36044.8). Total num frames: 1773568. Throughput: 0: 8632.1. Samples: 345284. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
607
+ [2023-03-08 14:32:11,801][671990] Avg episode reward: [(0, '8.134')]
608
+ [2023-03-08 14:32:11,808][682716] Saving new best policy, reward=8.134!
609
+ [2023-03-08 14:32:12,592][682729] Updated weights for policy 0, policy_version 441 (0.0005)
610
+ [2023-03-08 14:32:13,668][682729] Updated weights for policy 0, policy_version 451 (0.0006)
611
+ [2023-03-08 14:32:14,758][682729] Updated weights for policy 0, policy_version 461 (0.0006)
612
+ [2023-03-08 14:32:15,855][682729] Updated weights for policy 0, policy_version 471 (0.0006)
613
+ [2023-03-08 14:32:16,800][671990] Fps is (10 sec: 37683.3, 60 sec: 36226.8, 300 sec: 36226.8). Total num frames: 1961984. Throughput: 0: 8923.7. Samples: 401566. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
614
+ [2023-03-08 14:32:16,801][671990] Avg episode reward: [(0, '10.635')]
615
+ [2023-03-08 14:32:16,829][682716] Saving new best policy, reward=10.635!
616
+ [2023-03-08 14:32:16,956][682729] Updated weights for policy 0, policy_version 481 (0.0006)
617
+ [2023-03-08 14:32:18,149][682729] Updated weights for policy 0, policy_version 491 (0.0006)
618
+ [2023-03-08 14:32:19,411][682729] Updated weights for policy 0, policy_version 501 (0.0006)
619
+ [2023-03-08 14:32:20,531][682729] Updated weights for policy 0, policy_version 511 (0.0005)
620
+ [2023-03-08 14:32:21,649][682729] Updated weights for policy 0, policy_version 521 (0.0005)
621
+ [2023-03-08 14:32:21,800][671990] Fps is (10 sec: 36454.4, 60 sec: 36126.7, 300 sec: 36126.7). Total num frames: 2138112. Throughput: 0: 9176.1. Samples: 427084. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
622
+ [2023-03-08 14:32:21,801][671990] Avg episode reward: [(0, '14.048')]
623
+ [2023-03-08 14:32:21,802][682716] Saving new best policy, reward=14.048!
624
+ [2023-03-08 14:32:22,938][682729] Updated weights for policy 0, policy_version 531 (0.0006)
625
+ [2023-03-08 14:32:24,157][682729] Updated weights for policy 0, policy_version 541 (0.0006)
626
+ [2023-03-08 14:32:25,360][682729] Updated weights for policy 0, policy_version 551 (0.0006)
627
+ [2023-03-08 14:32:26,488][682729] Updated weights for policy 0, policy_version 561 (0.0006)
628
+ [2023-03-08 14:32:26,800][671990] Fps is (10 sec: 34406.1, 60 sec: 35895.8, 300 sec: 35895.8). Total num frames: 2306048. Throughput: 0: 9121.2. Samples: 479086. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
629
+ [2023-03-08 14:32:26,801][671990] Avg episode reward: [(0, '14.162')]
630
+ [2023-03-08 14:32:26,805][682716] Saving new best policy, reward=14.162!
631
+ [2023-03-08 14:32:27,751][682729] Updated weights for policy 0, policy_version 571 (0.0006)
632
+ [2023-03-08 14:32:29,007][682729] Updated weights for policy 0, policy_version 581 (0.0005)
633
+ [2023-03-08 14:32:30,571][682729] Updated weights for policy 0, policy_version 591 (0.0007)
634
+ [2023-03-08 14:32:31,800][671990] Fps is (10 sec: 31538.7, 60 sec: 35362.1, 300 sec: 35362.1). Total num frames: 2453504. Throughput: 0: 8947.1. Samples: 526198. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
635
+ [2023-03-08 14:32:31,801][671990] Avg episode reward: [(0, '19.425')]
636
+ [2023-03-08 14:32:31,803][682716] Saving new best policy, reward=19.425!
637
+ [2023-03-08 14:32:32,014][682729] Updated weights for policy 0, policy_version 601 (0.0006)
638
+ [2023-03-08 14:32:33,354][682729] Updated weights for policy 0, policy_version 611 (0.0006)
639
+ [2023-03-08 14:32:34,646][682729] Updated weights for policy 0, policy_version 621 (0.0006)
640
+ [2023-03-08 14:32:35,933][682729] Updated weights for policy 0, policy_version 631 (0.0006)
641
+ [2023-03-08 14:32:36,800][671990] Fps is (10 sec: 30720.3, 60 sec: 35430.4, 300 sec: 35099.6). Total num frames: 2613248. Throughput: 0: 8840.9. Samples: 548426. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
642
+ [2023-03-08 14:32:36,802][671990] Avg episode reward: [(0, '16.826')]
643
+ [2023-03-08 14:32:37,081][682729] Updated weights for policy 0, policy_version 641 (0.0006)
644
+ [2023-03-08 14:32:38,247][682729] Updated weights for policy 0, policy_version 651 (0.0006)
645
+ [2023-03-08 14:32:39,376][682729] Updated weights for policy 0, policy_version 661 (0.0006)
646
+ [2023-03-08 14:32:40,533][682729] Updated weights for policy 0, policy_version 671 (0.0006)
647
+ [2023-03-08 14:32:41,800][671990] Fps is (10 sec: 33177.9, 60 sec: 35293.8, 300 sec: 35050.0). Total num frames: 2785280. Throughput: 0: 8779.0. Samples: 600442. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
648
+ [2023-03-08 14:32:41,802][671990] Avg episode reward: [(0, '17.019')]
649
+ [2023-03-08 14:32:41,871][682729] Updated weights for policy 0, policy_version 681 (0.0006)
650
+ [2023-03-08 14:32:43,289][682729] Updated weights for policy 0, policy_version 691 (0.0006)
651
+ [2023-03-08 14:32:44,689][682729] Updated weights for policy 0, policy_version 701 (0.0006)
652
+ [2023-03-08 14:32:45,913][682729] Updated weights for policy 0, policy_version 711 (0.0006)
653
+ [2023-03-08 14:32:46,800][671990] Fps is (10 sec: 32358.4, 60 sec: 34747.8, 300 sec: 34734.1). Total num frames: 2936832. Throughput: 0: 8576.5. Samples: 646684. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
654
+ [2023-03-08 14:32:46,801][671990] Avg episode reward: [(0, '22.064')]
655
+ [2023-03-08 14:32:46,804][682716] Saving new best policy, reward=22.064!
656
+ [2023-03-08 14:32:47,146][682729] Updated weights for policy 0, policy_version 721 (0.0006)
657
+ [2023-03-08 14:32:48,272][682729] Updated weights for policy 0, policy_version 731 (0.0006)
658
+ [2023-03-08 14:32:49,718][682729] Updated weights for policy 0, policy_version 741 (0.0006)
659
+ [2023-03-08 14:32:50,891][682729] Updated weights for policy 0, policy_version 751 (0.0006)
660
+ [2023-03-08 14:32:51,800][671990] Fps is (10 sec: 31948.6, 60 sec: 34543.0, 300 sec: 34662.4). Total num frames: 3104768. Throughput: 0: 8507.7. Samples: 671550. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
661
+ [2023-03-08 14:32:51,801][671990] Avg episode reward: [(0, '18.601')]
662
+ [2023-03-08 14:32:52,025][682729] Updated weights for policy 0, policy_version 761 (0.0006)
663
+ [2023-03-08 14:32:53,121][682729] Updated weights for policy 0, policy_version 771 (0.0005)
664
+ [2023-03-08 14:32:54,273][682729] Updated weights for policy 0, policy_version 781 (0.0006)
665
+ [2023-03-08 14:32:55,412][682729] Updated weights for policy 0, policy_version 791 (0.0006)
666
+ [2023-03-08 14:32:56,591][682729] Updated weights for policy 0, policy_version 801 (0.0006)
667
+ [2023-03-08 14:32:56,800][671990] Fps is (10 sec: 34816.0, 60 sec: 34542.9, 300 sec: 34743.7). Total num frames: 3284992. Throughput: 0: 8419.5. Samples: 724162. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
668
+ [2023-03-08 14:32:56,801][671990] Avg episode reward: [(0, '19.367')]
669
+ [2023-03-08 14:32:57,697][682729] Updated weights for policy 0, policy_version 811 (0.0006)
670
+ [2023-03-08 14:32:58,813][682729] Updated weights for policy 0, policy_version 821 (0.0006)
671
+ [2023-03-08 14:32:59,899][682729] Updated weights for policy 0, policy_version 831 (0.0006)
672
+ [2023-03-08 14:33:00,985][682729] Updated weights for policy 0, policy_version 841 (0.0005)
673
+ [2023-03-08 14:33:01,800][671990] Fps is (10 sec: 36864.3, 60 sec: 34611.2, 300 sec: 34907.0). Total num frames: 3473408. Throughput: 0: 8390.0. Samples: 779116. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
674
+ [2023-03-08 14:33:01,801][671990] Avg episode reward: [(0, '20.222')]
675
+ [2023-03-08 14:33:02,102][682729] Updated weights for policy 0, policy_version 851 (0.0006)
676
+ [2023-03-08 14:33:03,243][682729] Updated weights for policy 0, policy_version 861 (0.0006)
677
+ [2023-03-08 14:33:04,400][682729] Updated weights for policy 0, policy_version 871 (0.0006)
678
+ [2023-03-08 14:33:05,506][682729] Updated weights for policy 0, policy_version 881 (0.0005)
679
+ [2023-03-08 14:33:06,672][682729] Updated weights for policy 0, policy_version 891 (0.0006)
680
+ [2023-03-08 14:33:06,800][671990] Fps is (10 sec: 36863.6, 60 sec: 34474.6, 300 sec: 34966.9). Total num frames: 3653632. Throughput: 0: 8430.3. Samples: 806450. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
681
+ [2023-03-08 14:33:06,801][671990] Avg episode reward: [(0, '22.082')]
682
+ [2023-03-08 14:33:06,805][682716] Saving new best policy, reward=22.082!
683
+ [2023-03-08 14:33:07,783][682729] Updated weights for policy 0, policy_version 901 (0.0005)
684
+ [2023-03-08 14:33:08,924][682729] Updated weights for policy 0, policy_version 911 (0.0006)
685
+ [2023-03-08 14:33:10,055][682729] Updated weights for policy 0, policy_version 921 (0.0005)
686
+ [2023-03-08 14:33:11,179][682729] Updated weights for policy 0, policy_version 931 (0.0006)
687
+ [2023-03-08 14:33:11,800][671990] Fps is (10 sec: 36045.0, 60 sec: 34338.2, 300 sec: 35020.8). Total num frames: 3833856. Throughput: 0: 8478.0. Samples: 860594. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
688
+ [2023-03-08 14:33:11,801][671990] Avg episode reward: [(0, '20.605')]
689
+ [2023-03-08 14:33:12,236][682729] Updated weights for policy 0, policy_version 941 (0.0006)
690
+ [2023-03-08 14:33:13,344][682729] Updated weights for policy 0, policy_version 951 (0.0005)
691
+ [2023-03-08 14:33:14,453][682729] Updated weights for policy 0, policy_version 961 (0.0005)
692
+ [2023-03-08 14:33:15,616][682729] Updated weights for policy 0, policy_version 971 (0.0006)
693
+ [2023-03-08 14:33:16,402][682716] Stopping Batcher_0...
694
+ [2023-03-08 14:33:16,402][682716] Loop batcher_evt_loop terminating...
695
+ [2023-03-08 14:33:16,402][682716] Saving /home/michal/programming/deep-rl-course/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
696
+ [2023-03-08 14:33:16,402][671990] Component Batcher_0 stopped!
697
+ [2023-03-08 14:33:16,411][682751] Stopping RolloutWorker_w6...
698
+ [2023-03-08 14:33:16,411][682747] Stopping RolloutWorker_w3...
699
+ [2023-03-08 14:33:16,412][682751] Loop rollout_proc6_evt_loop terminating...
700
+ [2023-03-08 14:33:16,412][682747] Loop rollout_proc3_evt_loop terminating...
701
+ [2023-03-08 14:33:16,412][682746] Stopping RolloutWorker_w1...
702
+ [2023-03-08 14:33:16,411][671990] Component RolloutWorker_w6 stopped!
703
+ [2023-03-08 14:33:16,412][682748] Stopping RolloutWorker_w2...
704
+ [2023-03-08 14:33:16,413][682746] Loop rollout_proc1_evt_loop terminating...
705
+ [2023-03-08 14:33:16,413][682748] Loop rollout_proc2_evt_loop terminating...
706
+ [2023-03-08 14:33:16,413][671990] Component RolloutWorker_w3 stopped!
707
+ [2023-03-08 14:33:16,414][682750] Stopping RolloutWorker_w5...
708
+ [2023-03-08 14:33:16,415][682750] Loop rollout_proc5_evt_loop terminating...
709
+ [2023-03-08 14:33:16,415][682752] Stopping RolloutWorker_w7...
710
+ [2023-03-08 14:33:16,415][671990] Component RolloutWorker_w1 stopped!
711
+ [2023-03-08 14:33:16,415][682752] Loop rollout_proc7_evt_loop terminating...
712
+ [2023-03-08 14:33:16,415][671990] Component RolloutWorker_w2 stopped!
713
+ [2023-03-08 14:33:16,416][671990] Component RolloutWorker_w5 stopped!
714
+ [2023-03-08 14:33:16,416][671990] Component RolloutWorker_w7 stopped!
715
+ [2023-03-08 14:33:16,418][682729] Weights refcount: 2 0
716
+ [2023-03-08 14:33:16,421][682729] Stopping InferenceWorker_p0-w0...
717
+ [2023-03-08 14:33:16,421][671990] Component InferenceWorker_p0-w0 stopped!
718
+ [2023-03-08 14:33:16,422][682729] Loop inference_proc0-0_evt_loop terminating...
719
+ [2023-03-08 14:33:16,440][682749] Stopping RolloutWorker_w4...
720
+ [2023-03-08 14:33:16,441][682749] Loop rollout_proc4_evt_loop terminating...
721
+ [2023-03-08 14:33:16,441][671990] Component RolloutWorker_w4 stopped!
722
+ [2023-03-08 14:33:16,477][682730] Stopping RolloutWorker_w0...
723
+ [2023-03-08 14:33:16,478][682730] Loop rollout_proc0_evt_loop terminating...
724
+ [2023-03-08 14:33:16,477][671990] Component RolloutWorker_w0 stopped!
725
+ [2023-03-08 14:33:16,493][682716] Saving /home/michal/programming/deep-rl-course/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
726
+ [2023-03-08 14:33:16,584][682716] Stopping LearnerWorker_p0...
727
+ [2023-03-08 14:33:16,584][682716] Loop learner_proc0_evt_loop terminating...
728
+ [2023-03-08 14:33:16,584][671990] Component LearnerWorker_p0 stopped!
729
+ [2023-03-08 14:33:16,586][671990] Waiting for process learner_proc0 to stop...
730
+ [2023-03-08 14:33:16,976][671990] Waiting for process inference_proc0-0 to join...
731
+ [2023-03-08 14:33:16,977][671990] Waiting for process rollout_proc0 to join...
732
+ [2023-03-08 14:33:16,978][671990] Waiting for process rollout_proc1 to join...
733
+ [2023-03-08 14:33:16,978][671990] Waiting for process rollout_proc2 to join...
734
+ [2023-03-08 14:33:16,979][671990] Waiting for process rollout_proc3 to join...
735
+ [2023-03-08 14:33:16,979][671990] Waiting for process rollout_proc4 to join...
736
+ [2023-03-08 14:33:16,980][671990] Waiting for process rollout_proc5 to join...
737
+ [2023-03-08 14:33:16,980][671990] Waiting for process rollout_proc6 to join...
738
+ [2023-03-08 14:33:16,981][671990] Waiting for process rollout_proc7 to join...
739
+ [2023-03-08 14:33:16,981][671990] Batcher 0 profile tree view:
740
+ batching: 10.8707, releasing_batches: 0.0115
741
+ [2023-03-08 14:33:16,982][671990] InferenceWorker_p0-w0 profile tree view:
742
+ wait_policy: 0.0000
743
+ wait_policy_total: 2.2257
744
+ update_model: 1.3996
745
+ weight_update: 0.0006
746
+ one_step: 0.0009
747
+ handle_policy_step: 96.0541
748
+ deserialize: 5.3775, stack: 0.4795, obs_to_device_normalize: 29.8978, forward: 30.9313, send_messages: 5.9810
749
+ prepare_outputs: 19.0649
750
+ to_cpu: 14.4507
751
+ [2023-03-08 14:33:16,982][671990] Learner 0 profile tree view:
752
+ misc: 0.0051, prepare_batch: 6.8415
753
+ train: 21.3246
754
+ epoch_init: 0.0031, minibatch_init: 0.0037, losses_postprocess: 0.3033, kl_divergence: 0.1304, after_optimizer: 10.3912
755
+ calculate_losses: 6.6995
756
+ losses_init: 0.0019, forward_head: 0.3111, bptt_initial: 4.7548, tail: 0.2907, advantages_returns: 0.0916, losses: 0.6107
757
+ bptt: 0.5510
758
+ bptt_forward_core: 0.5280
759
+ update: 3.5779
760
+ clip: 0.5862
761
+ [2023-03-08 14:33:16,983][671990] RolloutWorker_w0 profile tree view:
762
+ wait_for_trajectories: 0.0811, enqueue_policy_requests: 3.7469, env_step: 59.1615, overhead: 4.0872, complete_rollouts: 0.1260
763
+ save_policy_outputs: 4.2104
764
+ split_output_tensors: 2.0489
765
+ [2023-03-08 14:33:16,983][671990] RolloutWorker_w7 profile tree view:
766
+ wait_for_trajectories: 0.0810, enqueue_policy_requests: 3.7304, env_step: 59.5087, overhead: 4.1451, complete_rollouts: 0.1230
767
+ save_policy_outputs: 4.2532
768
+ split_output_tensors: 2.0561
769
+ [2023-03-08 14:33:16,984][671990] Loop Runner_EvtLoop terminating...
770
+ [2023-03-08 14:33:16,984][671990] Runner profile tree view:
771
+ main_loop: 109.1432
772
+ [2023-03-08 14:33:16,985][671990] Collected {0: 4005888}, FPS: 33663.2
773
+ [2023-03-08 14:40:07,216][671990] Loading existing experiment configuration from /home/michal/programming/deep-rl-course/train_dir/default_experiment/config.json
774
+ [2023-03-08 14:40:07,217][671990] Overriding arg 'num_workers' with value 1 passed from command line
775
+ [2023-03-08 14:40:07,217][671990] Adding new argument 'no_render'=True that is not in the saved config file!
776
+ [2023-03-08 14:40:07,217][671990] Adding new argument 'save_video'=True that is not in the saved config file!
777
+ [2023-03-08 14:40:07,218][671990] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
778
+ [2023-03-08 14:40:07,218][671990] Adding new argument 'video_name'=None that is not in the saved config file!
779
+ [2023-03-08 14:40:07,218][671990] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
780
+ [2023-03-08 14:40:07,219][671990] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
781
+ [2023-03-08 14:40:07,219][671990] Adding new argument 'push_to_hub'=False that is not in the saved config file!
782
+ [2023-03-08 14:40:07,219][671990] Adding new argument 'hf_repository'=None that is not in the saved config file!
783
+ [2023-03-08 14:40:07,219][671990] Adding new argument 'policy_index'=0 that is not in the saved config file!
784
+ [2023-03-08 14:40:07,220][671990] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
785
+ [2023-03-08 14:40:07,220][671990] Adding new argument 'train_script'=None that is not in the saved config file!
786
+ [2023-03-08 14:40:07,220][671990] Adding new argument 'enjoy_script'=None that is not in the saved config file!
787
+ [2023-03-08 14:40:07,221][671990] Using frameskip 1 and render_action_repeat=4 for evaluation
788
+ [2023-03-08 14:40:07,228][671990] Doom resolution: 160x120, resize resolution: (128, 72)
789
+ [2023-03-08 14:40:07,229][671990] RunningMeanStd input shape: (3, 72, 128)
790
+ [2023-03-08 14:40:07,229][671990] RunningMeanStd input shape: (1,)
791
+ [2023-03-08 14:40:07,237][671990] ConvEncoder: input_channels=3
792
+ [2023-03-08 14:40:07,303][671990] Conv encoder output size: 512
793
+ [2023-03-08 14:40:07,304][671990] Policy head output size: 512
794
+ [2023-03-08 14:40:08,412][671990] Loading state from checkpoint /home/michal/programming/deep-rl-course/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
795
+ [2023-03-08 14:40:08,706][671990] Num frames 100...
796
+ [2023-03-08 14:40:08,766][671990] Num frames 200...
797
+ [2023-03-08 14:40:08,828][671990] Num frames 300...
798
+ [2023-03-08 14:40:08,891][671990] Num frames 400...
799
+ [2023-03-08 14:40:08,951][671990] Num frames 500...
800
+ [2023-03-08 14:40:09,012][671990] Num frames 600...
801
+ [2023-03-08 14:40:09,071][671990] Num frames 700...
802
+ [2023-03-08 14:40:09,134][671990] Num frames 800...
803
+ [2023-03-08 14:40:09,206][671990] Avg episode rewards: #0: 19.320, true rewards: #0: 8.320
804
+ [2023-03-08 14:40:09,207][671990] Avg episode reward: 19.320, avg true_objective: 8.320
805
+ [2023-03-08 14:40:09,252][671990] Num frames 900...
806
+ [2023-03-08 14:40:09,312][671990] Num frames 1000...
807
+ [2023-03-08 14:40:09,370][671990] Num frames 1100...
808
+ [2023-03-08 14:40:09,428][671990] Num frames 1200...
809
+ [2023-03-08 14:40:09,487][671990] Num frames 1300...
810
+ [2023-03-08 14:40:09,548][671990] Num frames 1400...
811
+ [2023-03-08 14:40:09,609][671990] Num frames 1500...
812
+ [2023-03-08 14:40:09,674][671990] Num frames 1600...
813
+ [2023-03-08 14:40:09,784][671990] Avg episode rewards: #0: 17.480, true rewards: #0: 8.480
814
+ [2023-03-08 14:40:09,785][671990] Avg episode reward: 17.480, avg true_objective: 8.480
815
+ [2023-03-08 14:40:09,791][671990] Num frames 1700...
816
+ [2023-03-08 14:40:09,851][671990] Num frames 1800...
817
+ [2023-03-08 14:40:09,910][671990] Num frames 1900...
818
+ [2023-03-08 14:40:09,969][671990] Num frames 2000...
819
+ [2023-03-08 14:40:10,027][671990] Num frames 2100...
820
+ [2023-03-08 14:40:10,086][671990] Num frames 2200...
821
+ [2023-03-08 14:40:10,145][671990] Num frames 2300...
822
+ [2023-03-08 14:40:10,238][671990] Avg episode rewards: #0: 16.897, true rewards: #0: 7.897
823
+ [2023-03-08 14:40:10,239][671990] Avg episode reward: 16.897, avg true_objective: 7.897
824
+ [2023-03-08 14:40:10,263][671990] Num frames 2400...
825
+ [2023-03-08 14:40:10,322][671990] Num frames 2500...
826
+ [2023-03-08 14:40:10,381][671990] Num frames 2600...
827
+ [2023-03-08 14:40:10,444][671990] Num frames 2700...
828
+ [2023-03-08 14:40:10,505][671990] Num frames 2800...
829
+ [2023-03-08 14:40:10,565][671990] Num frames 2900...
830
+ [2023-03-08 14:40:10,627][671990] Num frames 3000...
831
+ [2023-03-08 14:40:10,722][671990] Avg episode rewards: #0: 15.433, true rewards: #0: 7.682
832
+ [2023-03-08 14:40:10,723][671990] Avg episode reward: 15.433, avg true_objective: 7.682
833
+ [2023-03-08 14:40:10,743][671990] Num frames 3100...
834
+ [2023-03-08 14:40:10,802][671990] Num frames 3200...
835
+ [2023-03-08 14:40:10,862][671990] Num frames 3300...
836
+ [2023-03-08 14:40:10,920][671990] Num frames 3400...
837
+ [2023-03-08 14:40:10,979][671990] Num frames 3500...
838
+ [2023-03-08 14:40:11,041][671990] Num frames 3600...
839
+ [2023-03-08 14:40:11,103][671990] Num frames 3700...
840
+ [2023-03-08 14:40:11,161][671990] Num frames 3800...
841
+ [2023-03-08 14:40:11,227][671990] Num frames 3900...
842
+ [2023-03-08 14:40:11,287][671990] Num frames 4000...
843
+ [2023-03-08 14:40:11,350][671990] Num frames 4100...
844
+ [2023-03-08 14:40:11,411][671990] Num frames 4200...
845
+ [2023-03-08 14:40:11,469][671990] Num frames 4300...
846
+ [2023-03-08 14:40:11,531][671990] Num frames 4400...
847
+ [2023-03-08 14:40:11,596][671990] Num frames 4500...
848
+ [2023-03-08 14:40:11,655][671990] Num frames 4600...
849
+ [2023-03-08 14:40:11,715][671990] Num frames 4700...
850
+ [2023-03-08 14:40:11,775][671990] Num frames 4800...
851
+ [2023-03-08 14:40:11,835][671990] Num frames 4900...
852
+ [2023-03-08 14:40:11,896][671990] Num frames 5000...
853
+ [2023-03-08 14:40:11,957][671990] Num frames 5100...
854
+ [2023-03-08 14:40:12,056][671990] Avg episode rewards: #0: 23.346, true rewards: #0: 10.346
855
+ [2023-03-08 14:40:12,056][671990] Avg episode reward: 23.346, avg true_objective: 10.346
856
+ [2023-03-08 14:40:12,079][671990] Num frames 5200...
857
+ [2023-03-08 14:40:12,143][671990] Num frames 5300...
858
+ [2023-03-08 14:40:12,204][671990] Num frames 5400...
859
+ [2023-03-08 14:40:12,265][671990] Num frames 5500...
860
+ [2023-03-08 14:40:12,324][671990] Num frames 5600...
861
+ [2023-03-08 14:40:12,383][671990] Num frames 5700...
862
+ [2023-03-08 14:40:12,442][671990] Num frames 5800...
863
+ [2023-03-08 14:40:12,502][671990] Num frames 5900...
864
+ [2023-03-08 14:40:12,563][671990] Num frames 6000...
865
+ [2023-03-08 14:40:12,624][671990] Num frames 6100...
866
+ [2023-03-08 14:40:12,685][671990] Num frames 6200...
867
+ [2023-03-08 14:40:12,747][671990] Num frames 6300...
868
+ [2023-03-08 14:40:12,845][671990] Avg episode rewards: #0: 24.122, true rewards: #0: 10.622
869
+ [2023-03-08 14:40:12,846][671990] Avg episode reward: 24.122, avg true_objective: 10.622
870
+ [2023-03-08 14:40:12,867][671990] Num frames 6400...
871
+ [2023-03-08 14:40:12,933][671990] Num frames 6500...
872
+ [2023-03-08 14:40:13,000][671990] Num frames 6600...
873
+ [2023-03-08 14:40:13,059][671990] Num frames 6700...
874
+ [2023-03-08 14:40:13,122][671990] Num frames 6800...
875
+ [2023-03-08 14:40:13,182][671990] Num frames 6900...
876
+ [2023-03-08 14:40:13,241][671990] Num frames 7000...
877
+ [2023-03-08 14:40:13,302][671990] Num frames 7100...
878
+ [2023-03-08 14:40:13,361][671990] Num frames 7200...
879
+ [2023-03-08 14:40:13,422][671990] Num frames 7300...
880
+ [2023-03-08 14:40:13,484][671990] Num frames 7400...
881
+ [2023-03-08 14:40:13,556][671990] Avg episode rewards: #0: 23.613, true rewards: #0: 10.613
882
+ [2023-03-08 14:40:13,557][671990] Avg episode reward: 23.613, avg true_objective: 10.613
883
+ [2023-03-08 14:40:13,604][671990] Num frames 7500...
884
+ [2023-03-08 14:40:13,664][671990] Num frames 7600...
885
+ [2023-03-08 14:40:13,723][671990] Num frames 7700...
886
+ [2023-03-08 14:40:13,782][671990] Num frames 7800...
887
+ [2023-03-08 14:40:13,841][671990] Num frames 7900...
888
+ [2023-03-08 14:40:13,901][671990] Num frames 8000...
889
+ [2023-03-08 14:40:13,960][671990] Num frames 8100...
890
+ [2023-03-08 14:40:14,019][671990] Num frames 8200...
891
+ [2023-03-08 14:40:14,079][671990] Num frames 8300...
892
+ [2023-03-08 14:40:14,139][671990] Num frames 8400...
893
+ [2023-03-08 14:40:14,201][671990] Num frames 8500...
894
+ [2023-03-08 14:40:14,261][671990] Num frames 8600...
895
+ [2023-03-08 14:40:14,322][671990] Num frames 8700...
896
+ [2023-03-08 14:40:14,410][671990] Avg episode rewards: #0: 24.446, true rewards: #0: 10.946
897
+ [2023-03-08 14:40:14,411][671990] Avg episode reward: 24.446, avg true_objective: 10.946
898
+ [2023-03-08 14:40:14,439][671990] Num frames 8800...
899
+ [2023-03-08 14:40:14,499][671990] Num frames 8900...
900
+ [2023-03-08 14:40:14,559][671990] Num frames 9000...
901
+ [2023-03-08 14:40:14,618][671990] Num frames 9100...
902
+ [2023-03-08 14:40:14,678][671990] Num frames 9200...
903
+ [2023-03-08 14:40:14,737][671990] Num frames 9300...
904
+ [2023-03-08 14:40:14,796][671990] Num frames 9400...
905
+ [2023-03-08 14:40:14,856][671990] Num frames 9500...
906
+ [2023-03-08 14:40:14,916][671990] Num frames 9600...
907
+ [2023-03-08 14:40:14,976][671990] Num frames 9700...
908
+ [2023-03-08 14:40:15,039][671990] Num frames 9800...
909
+ [2023-03-08 14:40:15,099][671990] Num frames 9900...
910
+ [2023-03-08 14:40:15,161][671990] Num frames 10000...
911
+ [2023-03-08 14:40:15,237][671990] Avg episode rewards: #0: 24.819, true rewards: #0: 11.152
912
+ [2023-03-08 14:40:15,238][671990] Avg episode reward: 24.819, avg true_objective: 11.152
913
+ [2023-03-08 14:40:15,277][671990] Num frames 10100...
914
+ [2023-03-08 14:40:15,336][671990] Num frames 10200...
915
+ [2023-03-08 14:40:15,397][671990] Num frames 10300...
916
+ [2023-03-08 14:40:15,460][671990] Num frames 10400...
917
+ [2023-03-08 14:40:15,520][671990] Num frames 10500...
918
+ [2023-03-08 14:40:15,581][671990] Num frames 10600...
919
+ [2023-03-08 14:40:15,641][671990] Num frames 10700...
920
+ [2023-03-08 14:40:15,738][671990] Avg episode rewards: #0: 23.873, true rewards: #0: 10.773
921
+ [2023-03-08 14:40:15,739][671990] Avg episode reward: 23.873, avg true_objective: 10.773
922
+ [2023-03-08 14:40:26,813][671990] Replay video saved to /home/michal/programming/deep-rl-course/train_dir/default_experiment/replay.mp4!
923
+ [2023-03-08 14:41:24,935][671990] Loading existing experiment configuration from /home/michal/programming/deep-rl-course/train_dir/default_experiment/config.json
924
+ [2023-03-08 14:41:24,936][671990] Overriding arg 'num_workers' with value 1 passed from command line
925
+ [2023-03-08 14:41:24,936][671990] Adding new argument 'no_render'=True that is not in the saved config file!
926
+ [2023-03-08 14:41:24,936][671990] Adding new argument 'save_video'=True that is not in the saved config file!
927
+ [2023-03-08 14:41:24,937][671990] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
928
+ [2023-03-08 14:41:24,937][671990] Adding new argument 'video_name'=None that is not in the saved config file!
929
+ [2023-03-08 14:41:24,937][671990] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
930
+ [2023-03-08 14:41:24,938][671990] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
931
+ [2023-03-08 14:41:24,938][671990] Adding new argument 'push_to_hub'=True that is not in the saved config file!
932
+ [2023-03-08 14:41:24,938][671990] Adding new argument 'hf_repository'='michal512/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
933
+ [2023-03-08 14:41:24,939][671990] Adding new argument 'policy_index'=0 that is not in the saved config file!
934
+ [2023-03-08 14:41:24,939][671990] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
935
+ [2023-03-08 14:41:24,940][671990] Adding new argument 'train_script'=None that is not in the saved config file!
936
+ [2023-03-08 14:41:24,941][671990] Adding new argument 'enjoy_script'=None that is not in the saved config file!
937
+ [2023-03-08 14:41:24,941][671990] Using frameskip 1 and render_action_repeat=4 for evaluation
938
+ [2023-03-08 14:41:24,948][671990] RunningMeanStd input shape: (3, 72, 128)
939
+ [2023-03-08 14:41:24,949][671990] RunningMeanStd input shape: (1,)
940
+ [2023-03-08 14:41:24,955][671990] ConvEncoder: input_channels=3
941
+ [2023-03-08 14:41:24,972][671990] Conv encoder output size: 512
942
+ [2023-03-08 14:41:24,972][671990] Policy head output size: 512
943
+ [2023-03-08 14:41:24,997][671990] Loading state from checkpoint /home/michal/programming/deep-rl-course/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
944
+ [2023-03-08 14:41:25,284][671990] Num frames 100...
945
+ [2023-03-08 14:41:25,343][671990] Num frames 200...
946
+ [2023-03-08 14:41:25,400][671990] Num frames 300...
947
+ [2023-03-08 14:41:25,463][671990] Num frames 400...
948
+ [2023-03-08 14:41:25,522][671990] Num frames 500...
949
+ [2023-03-08 14:41:25,580][671990] Num frames 600...
950
+ [2023-03-08 14:41:25,639][671990] Num frames 700...
951
+ [2023-03-08 14:41:25,698][671990] Num frames 800...
952
+ [2023-03-08 14:41:25,757][671990] Num frames 900...
953
+ [2023-03-08 14:41:25,827][671990] Avg episode rewards: #0: 18.280, true rewards: #0: 9.280
954
+ [2023-03-08 14:41:25,828][671990] Avg episode reward: 18.280, avg true_objective: 9.280
955
+ [2023-03-08 14:41:25,875][671990] Num frames 1000...
956
+ [2023-03-08 14:41:25,934][671990] Num frames 1100...
957
+ [2023-03-08 14:41:26,007][671990] Num frames 1200...
958
+ [2023-03-08 14:41:26,076][671990] Num frames 1300...
959
+ [2023-03-08 14:41:26,139][671990] Num frames 1400...
960
+ [2023-03-08 14:41:26,205][671990] Num frames 1500...
961
+ [2023-03-08 14:41:26,271][671990] Num frames 1600...
962
+ [2023-03-08 14:41:26,330][671990] Num frames 1700...
963
+ [2023-03-08 14:41:26,421][671990] Avg episode rewards: #0: 18.305, true rewards: #0: 8.805
964
+ [2023-03-08 14:41:26,422][671990] Avg episode reward: 18.305, avg true_objective: 8.805
965
+ [2023-03-08 14:41:26,449][671990] Num frames 1800...
966
+ [2023-03-08 14:41:26,507][671990] Num frames 1900...
967
+ [2023-03-08 14:41:26,569][671990] Num frames 2000...
968
+ [2023-03-08 14:41:26,628][671990] Num frames 2100...
969
+ [2023-03-08 14:41:26,687][671990] Num frames 2200...
970
+ [2023-03-08 14:41:26,745][671990] Num frames 2300...
971
+ [2023-03-08 14:41:26,808][671990] Num frames 2400...
972
+ [2023-03-08 14:41:26,869][671990] Num frames 2500...
973
+ [2023-03-08 14:41:26,928][671990] Num frames 2600...
974
+ [2023-03-08 14:41:26,987][671990] Num frames 2700...
975
+ [2023-03-08 14:41:27,046][671990] Num frames 2800...
976
+ [2023-03-08 14:41:27,105][671990] Num frames 2900...
977
+ [2023-03-08 14:41:27,163][671990] Num frames 3000...
978
+ [2023-03-08 14:41:27,259][671990] Avg episode rewards: #0: 20.577, true rewards: #0: 10.243
979
+ [2023-03-08 14:41:27,260][671990] Avg episode reward: 20.577, avg true_objective: 10.243
980
+ [2023-03-08 14:41:27,279][671990] Num frames 3100...
981
+ [2023-03-08 14:41:27,338][671990] Num frames 3200...
982
+ [2023-03-08 14:41:27,398][671990] Num frames 3300...
983
+ [2023-03-08 14:41:27,458][671990] Num frames 3400...
984
+ [2023-03-08 14:41:27,518][671990] Num frames 3500...
985
+ [2023-03-08 14:41:27,578][671990] Num frames 3600...
986
+ [2023-03-08 14:41:27,641][671990] Num frames 3700...
987
+ [2023-03-08 14:41:27,700][671990] Num frames 3800...
988
+ [2023-03-08 14:41:27,759][671990] Num frames 3900...
989
+ [2023-03-08 14:41:27,817][671990] Num frames 4000...
990
+ [2023-03-08 14:41:27,890][671990] Avg episode rewards: #0: 20.333, true rewards: #0: 10.082
991
+ [2023-03-08 14:41:27,891][671990] Avg episode reward: 20.333, avg true_objective: 10.082
992
+ [2023-03-08 14:41:27,935][671990] Num frames 4100...
993
+ [2023-03-08 14:41:27,996][671990] Num frames 4200...
994
+ [2023-03-08 14:41:28,056][671990] Num frames 4300...
995
+ [2023-03-08 14:41:28,116][671990] Num frames 4400...
996
+ [2023-03-08 14:41:28,218][671990] Avg episode rewards: #0: 17.362, true rewards: #0: 8.962
997
+ [2023-03-08 14:41:28,219][671990] Avg episode reward: 17.362, avg true_objective: 8.962
998
+ [2023-03-08 14:41:28,234][671990] Num frames 4500...
999
+ [2023-03-08 14:41:28,297][671990] Num frames 4600...
1000
+ [2023-03-08 14:41:28,359][671990] Num frames 4700...
1001
+ [2023-03-08 14:41:28,420][671990] Num frames 4800...
1002
+ [2023-03-08 14:41:28,480][671990] Num frames 4900...
1003
+ [2023-03-08 14:41:28,540][671990] Num frames 5000...
1004
+ [2023-03-08 14:41:28,601][671990] Num frames 5100...
1005
+ [2023-03-08 14:41:28,663][671990] Num frames 5200...
1006
+ [2023-03-08 14:41:28,723][671990] Num frames 5300...
1007
+ [2023-03-08 14:41:28,784][671990] Num frames 5400...
1008
+ [2023-03-08 14:41:28,846][671990] Num frames 5500...
1009
+ [2023-03-08 14:41:28,922][671990] Avg episode rewards: #0: 17.895, true rewards: #0: 9.228
1010
+ [2023-03-08 14:41:28,923][671990] Avg episode reward: 17.895, avg true_objective: 9.228
1011
+ [2023-03-08 14:41:28,965][671990] Num frames 5600...
1012
+ [2023-03-08 14:41:29,033][671990] Num frames 5700...
1013
+ [2023-03-08 14:41:29,092][671990] Num frames 5800...
1014
+ [2023-03-08 14:41:29,151][671990] Num frames 5900...
1015
+ [2023-03-08 14:41:29,211][671990] Num frames 6000...
1016
+ [2023-03-08 14:41:29,271][671990] Num frames 6100...
1017
+ [2023-03-08 14:41:29,329][671990] Num frames 6200...
1018
+ [2023-03-08 14:41:29,387][671990] Num frames 6300...
1019
+ [2023-03-08 14:41:29,445][671990] Num frames 6400...
1020
+ [2023-03-08 14:41:29,502][671990] Num frames 6500...
1021
+ [2023-03-08 14:41:29,561][671990] Num frames 6600...
1022
+ [2023-03-08 14:41:29,624][671990] Num frames 6700...
1023
+ [2023-03-08 14:41:29,684][671990] Num frames 6800...
1024
+ [2023-03-08 14:41:29,740][671990] Avg episode rewards: #0: 19.579, true rewards: #0: 9.721
1025
+ [2023-03-08 14:41:29,741][671990] Avg episode reward: 19.579, avg true_objective: 9.721
1026
+ [2023-03-08 14:41:29,801][671990] Num frames 6900...
1027
+ [2023-03-08 14:41:29,864][671990] Num frames 7000...
1028
+ [2023-03-08 14:41:29,922][671990] Num frames 7100...
1029
+ [2023-03-08 14:41:29,980][671990] Num frames 7200...
1030
+ [2023-03-08 14:41:30,039][671990] Num frames 7300...
1031
+ [2023-03-08 14:41:30,097][671990] Num frames 7400...
1032
+ [2023-03-08 14:41:30,156][671990] Num frames 7500...
1033
+ [2023-03-08 14:41:30,214][671990] Num frames 7600...
1034
+ [2023-03-08 14:41:30,273][671990] Num frames 7700...
1035
+ [2023-03-08 14:41:30,332][671990] Num frames 7800...
1036
+ [2023-03-08 14:41:30,389][671990] Avg episode rewards: #0: 19.508, true rewards: #0: 9.757
1037
+ [2023-03-08 14:41:30,390][671990] Avg episode reward: 19.508, avg true_objective: 9.757
1038
+ [2023-03-08 14:41:30,448][671990] Num frames 7900...
1039
+ [2023-03-08 14:41:30,506][671990] Num frames 8000...
1040
+ [2023-03-08 14:41:30,567][671990] Num frames 8100...
1041
+ [2023-03-08 14:41:30,627][671990] Num frames 8200...
1042
+ [2023-03-08 14:41:30,718][671990] Avg episode rewards: #0: 17.949, true rewards: #0: 9.171
1043
+ [2023-03-08 14:41:30,718][671990] Avg episode reward: 17.949, avg true_objective: 9.171
1044
+ [2023-03-08 14:41:30,749][671990] Num frames 8300...
1045
+ [2023-03-08 14:41:30,808][671990] Num frames 8400...
1046
+ [2023-03-08 14:41:30,869][671990] Num frames 8500...
1047
+ [2023-03-08 14:41:30,931][671990] Num frames 8600...
1048
+ [2023-03-08 14:41:30,993][671990] Num frames 8700...
1049
+ [2023-03-08 14:41:31,053][671990] Num frames 8800...
1050
+ [2023-03-08 14:41:31,136][671990] Avg episode rewards: #0: 17.552, true rewards: #0: 8.852
1051
+ [2023-03-08 14:41:31,137][671990] Avg episode reward: 17.552, avg true_objective: 8.852
1052
+ [2023-03-08 14:41:40,764][671990] Replay video saved to /home/michal/programming/deep-rl-course/train_dir/default_experiment/replay.mp4!