dimitarrskv
commited on
Commit
•
b29c42e
1
Parent(s):
7b28de2
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .summary/0/events.out.tfevents.1693653185.cf44f8e7e020 +3 -0
- README.md +56 -0
- checkpoint_p0/best_000000115_471040_reward_4.691.pth +3 -0
- checkpoint_p0/checkpoint_000000053_217088.pth +3 -0
- checkpoint_p0/checkpoint_000000124_507904.pth +3 -0
- config.json +142 -0
- replay.mp4 +3 -0
- sf_log.txt +486 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
replay.mp4 filter=lfs diff=lfs merge=lfs -text
|
.summary/0/events.out.tfevents.1693653185.cf44f8e7e020
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7ede1cc4e9ba13215dafdf8204a53a805e13226dd9786da35f87e904c6e0a95
|
3 |
+
size 136442
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: sample-factory
|
3 |
+
tags:
|
4 |
+
- deep-reinforcement-learning
|
5 |
+
- reinforcement-learning
|
6 |
+
- sample-factory
|
7 |
+
model-index:
|
8 |
+
- name: APPO
|
9 |
+
results:
|
10 |
+
- task:
|
11 |
+
type: reinforcement-learning
|
12 |
+
name: reinforcement-learning
|
13 |
+
dataset:
|
14 |
+
name: doom_health_gathering_supreme
|
15 |
+
type: doom_health_gathering_supreme
|
16 |
+
metrics:
|
17 |
+
- type: mean_reward
|
18 |
+
value: 4.19 +/- 0.68
|
19 |
+
name: mean_reward
|
20 |
+
verified: false
|
21 |
+
---
|
22 |
+
|
23 |
+
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
|
24 |
+
|
25 |
+
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
|
26 |
+
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
|
27 |
+
|
28 |
+
|
29 |
+
## Downloading the model
|
30 |
+
|
31 |
+
After installing Sample-Factory, download the model with:
|
32 |
+
```
|
33 |
+
python -m sample_factory.huggingface.load_from_hub -r dimitarrskv/rl_course_vizdoom_health_gathering_supreme
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
## Using the model
|
38 |
+
|
39 |
+
To run the model after download, use the `enjoy` script corresponding to this environment:
|
40 |
+
```
|
41 |
+
python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
|
42 |
+
```
|
43 |
+
|
44 |
+
|
45 |
+
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
|
46 |
+
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
|
47 |
+
|
48 |
+
## Training with this model
|
49 |
+
|
50 |
+
To continue training with this model, use the `train` script corresponding to this environment:
|
51 |
+
```
|
52 |
+
python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
|
53 |
+
```
|
54 |
+
|
55 |
+
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
56 |
+
|
checkpoint_p0/best_000000115_471040_reward_4.691.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da343425da33363f96968390fba0ba90d7df77c2be96d416a580a52b2cdf4422
|
3 |
+
size 34928614
|
checkpoint_p0/checkpoint_000000053_217088.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b585910ac84e10d82b04fec15cd18d1ab234686da8db7931ea6b041150b4d99
|
3 |
+
size 34929028
|
checkpoint_p0/checkpoint_000000124_507904.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d19f095175386c51be865a7b8974338b9efef264d459083278d34a3b491394b5
|
3 |
+
size 34929028
|
config.json
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"help": false,
|
3 |
+
"algo": "APPO",
|
4 |
+
"env": "doom_health_gathering_supreme",
|
5 |
+
"experiment": "default_experiment",
|
6 |
+
"train_dir": "/content/train_dir",
|
7 |
+
"restart_behavior": "resume",
|
8 |
+
"device": "gpu",
|
9 |
+
"seed": null,
|
10 |
+
"num_policies": 1,
|
11 |
+
"async_rl": true,
|
12 |
+
"serial_mode": false,
|
13 |
+
"batched_sampling": false,
|
14 |
+
"num_batches_to_accumulate": 2,
|
15 |
+
"worker_num_splits": 2,
|
16 |
+
"policy_workers_per_policy": 1,
|
17 |
+
"max_policy_lag": 1000,
|
18 |
+
"num_workers": 8,
|
19 |
+
"num_envs_per_worker": 4,
|
20 |
+
"batch_size": 1024,
|
21 |
+
"num_batches_per_epoch": 1,
|
22 |
+
"num_epochs": 1,
|
23 |
+
"rollout": 32,
|
24 |
+
"recurrence": 32,
|
25 |
+
"shuffle_minibatches": false,
|
26 |
+
"gamma": 0.99,
|
27 |
+
"reward_scale": 1.0,
|
28 |
+
"reward_clip": 1000.0,
|
29 |
+
"value_bootstrap": false,
|
30 |
+
"normalize_returns": true,
|
31 |
+
"exploration_loss_coeff": 0.001,
|
32 |
+
"value_loss_coeff": 0.5,
|
33 |
+
"kl_loss_coeff": 0.0,
|
34 |
+
"exploration_loss": "symmetric_kl",
|
35 |
+
"gae_lambda": 0.95,
|
36 |
+
"ppo_clip_ratio": 0.1,
|
37 |
+
"ppo_clip_value": 0.2,
|
38 |
+
"with_vtrace": false,
|
39 |
+
"vtrace_rho": 1.0,
|
40 |
+
"vtrace_c": 1.0,
|
41 |
+
"optimizer": "adam",
|
42 |
+
"adam_eps": 1e-06,
|
43 |
+
"adam_beta1": 0.9,
|
44 |
+
"adam_beta2": 0.999,
|
45 |
+
"max_grad_norm": 4.0,
|
46 |
+
"learning_rate": 0.0001,
|
47 |
+
"lr_schedule": "constant",
|
48 |
+
"lr_schedule_kl_threshold": 0.008,
|
49 |
+
"lr_adaptive_min": 1e-06,
|
50 |
+
"lr_adaptive_max": 0.01,
|
51 |
+
"obs_subtract_mean": 0.0,
|
52 |
+
"obs_scale": 255.0,
|
53 |
+
"normalize_input": true,
|
54 |
+
"normalize_input_keys": null,
|
55 |
+
"decorrelate_experience_max_seconds": 0,
|
56 |
+
"decorrelate_envs_on_one_worker": true,
|
57 |
+
"actor_worker_gpus": [],
|
58 |
+
"set_workers_cpu_affinity": true,
|
59 |
+
"force_envs_single_thread": false,
|
60 |
+
"default_niceness": 0,
|
61 |
+
"log_to_file": true,
|
62 |
+
"experiment_summaries_interval": 10,
|
63 |
+
"flush_summaries_interval": 30,
|
64 |
+
"stats_avg": 100,
|
65 |
+
"summaries_use_frameskip": true,
|
66 |
+
"heartbeat_interval": 20,
|
67 |
+
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 500000,
|
69 |
+
"train_for_seconds": 10000000000,
|
70 |
+
"save_every_sec": 120,
|
71 |
+
"keep_checkpoints": 2,
|
72 |
+
"load_checkpoint_kind": "latest",
|
73 |
+
"save_milestones_sec": -1,
|
74 |
+
"save_best_every_sec": 5,
|
75 |
+
"save_best_metric": "reward",
|
76 |
+
"save_best_after": 100000,
|
77 |
+
"benchmark": false,
|
78 |
+
"encoder_mlp_layers": [
|
79 |
+
512,
|
80 |
+
512
|
81 |
+
],
|
82 |
+
"encoder_conv_architecture": "convnet_simple",
|
83 |
+
"encoder_conv_mlp_layers": [
|
84 |
+
512
|
85 |
+
],
|
86 |
+
"use_rnn": true,
|
87 |
+
"rnn_size": 512,
|
88 |
+
"rnn_type": "gru",
|
89 |
+
"rnn_num_layers": 1,
|
90 |
+
"decoder_mlp_layers": [],
|
91 |
+
"nonlinearity": "elu",
|
92 |
+
"policy_initialization": "orthogonal",
|
93 |
+
"policy_init_gain": 1.0,
|
94 |
+
"actor_critic_share_weights": true,
|
95 |
+
"adaptive_stddev": true,
|
96 |
+
"continuous_tanh_scale": 0.0,
|
97 |
+
"initial_stddev": 1.0,
|
98 |
+
"use_env_info_cache": false,
|
99 |
+
"env_gpu_actions": false,
|
100 |
+
"env_gpu_observations": true,
|
101 |
+
"env_frameskip": 4,
|
102 |
+
"env_framestack": 1,
|
103 |
+
"pixel_format": "CHW",
|
104 |
+
"use_record_episode_statistics": false,
|
105 |
+
"with_wandb": false,
|
106 |
+
"wandb_user": null,
|
107 |
+
"wandb_project": "sample_factory",
|
108 |
+
"wandb_group": null,
|
109 |
+
"wandb_job_type": "SF",
|
110 |
+
"wandb_tags": [],
|
111 |
+
"with_pbt": false,
|
112 |
+
"pbt_mix_policies_in_one_env": true,
|
113 |
+
"pbt_period_env_steps": 5000000,
|
114 |
+
"pbt_start_mutation": 20000000,
|
115 |
+
"pbt_replace_fraction": 0.3,
|
116 |
+
"pbt_mutation_rate": 0.15,
|
117 |
+
"pbt_replace_reward_gap": 0.1,
|
118 |
+
"pbt_replace_reward_gap_absolute": 1e-06,
|
119 |
+
"pbt_optimize_gamma": false,
|
120 |
+
"pbt_target_objective": "true_objective",
|
121 |
+
"pbt_perturb_min": 1.1,
|
122 |
+
"pbt_perturb_max": 1.5,
|
123 |
+
"num_agents": -1,
|
124 |
+
"num_humans": 0,
|
125 |
+
"num_bots": -1,
|
126 |
+
"start_bot_difficulty": null,
|
127 |
+
"timelimit": null,
|
128 |
+
"res_w": 128,
|
129 |
+
"res_h": 72,
|
130 |
+
"wide_aspect_ratio": false,
|
131 |
+
"eval_env_frameskip": 1,
|
132 |
+
"fps": 35,
|
133 |
+
"command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=500000",
|
134 |
+
"cli_args": {
|
135 |
+
"env": "doom_health_gathering_supreme",
|
136 |
+
"num_workers": 8,
|
137 |
+
"num_envs_per_worker": 4,
|
138 |
+
"train_for_env_steps": 500000
|
139 |
+
},
|
140 |
+
"git_hash": "unknown",
|
141 |
+
"git_repo_name": "not a git repository"
|
142 |
+
}
|
replay.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2bf4761d31c1f53f5dc3989216f23977021081c87192987bcbf583ed685cea99
|
3 |
+
size 6244445
|
sf_log.txt
ADDED
@@ -0,0 +1,486 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2023-09-02 11:13:14,450][02307] Saving configuration to /content/train_dir/default_experiment/config.json...
|
2 |
+
[2023-09-02 11:13:14,453][02307] Rollout worker 0 uses device cpu
|
3 |
+
[2023-09-02 11:13:14,455][02307] Rollout worker 1 uses device cpu
|
4 |
+
[2023-09-02 11:13:14,456][02307] Rollout worker 2 uses device cpu
|
5 |
+
[2023-09-02 11:13:14,457][02307] Rollout worker 3 uses device cpu
|
6 |
+
[2023-09-02 11:13:14,458][02307] Rollout worker 4 uses device cpu
|
7 |
+
[2023-09-02 11:13:14,459][02307] Rollout worker 5 uses device cpu
|
8 |
+
[2023-09-02 11:13:14,460][02307] Rollout worker 6 uses device cpu
|
9 |
+
[2023-09-02 11:13:14,461][02307] Rollout worker 7 uses device cpu
|
10 |
+
[2023-09-02 11:13:14,669][02307] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
11 |
+
[2023-09-02 11:13:14,673][02307] InferenceWorker_p0-w0: min num requests: 2
|
12 |
+
[2023-09-02 11:13:14,750][02307] Starting all processes...
|
13 |
+
[2023-09-02 11:13:14,766][02307] Starting process learner_proc0
|
14 |
+
[2023-09-02 11:13:14,857][02307] Starting all processes...
|
15 |
+
[2023-09-02 11:13:14,883][02307] Starting process inference_proc0-0
|
16 |
+
[2023-09-02 11:13:14,897][02307] Starting process rollout_proc0
|
17 |
+
[2023-09-02 11:13:14,898][02307] Starting process rollout_proc1
|
18 |
+
[2023-09-02 11:13:14,898][02307] Starting process rollout_proc2
|
19 |
+
[2023-09-02 11:13:14,898][02307] Starting process rollout_proc3
|
20 |
+
[2023-09-02 11:13:14,899][02307] Starting process rollout_proc4
|
21 |
+
[2023-09-02 11:13:14,899][02307] Starting process rollout_proc5
|
22 |
+
[2023-09-02 11:13:14,899][02307] Starting process rollout_proc6
|
23 |
+
[2023-09-02 11:13:14,899][02307] Starting process rollout_proc7
|
24 |
+
[2023-09-02 11:13:31,075][10559] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
25 |
+
[2023-09-02 11:13:31,082][10559] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
26 |
+
[2023-09-02 11:13:31,134][10559] Num visible devices: 1
|
27 |
+
[2023-09-02 11:13:31,186][10559] Starting seed is not provided
|
28 |
+
[2023-09-02 11:13:31,187][10559] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
29 |
+
[2023-09-02 11:13:31,188][10559] Initializing actor-critic model on device cuda:0
|
30 |
+
[2023-09-02 11:13:31,189][10559] RunningMeanStd input shape: (3, 72, 128)
|
31 |
+
[2023-09-02 11:13:31,192][10559] RunningMeanStd input shape: (1,)
|
32 |
+
[2023-09-02 11:13:31,287][10578] Worker 4 uses CPU cores [0]
|
33 |
+
[2023-09-02 11:13:31,309][10559] ConvEncoder: input_channels=3
|
34 |
+
[2023-09-02 11:13:31,451][10580] Worker 6 uses CPU cores [0]
|
35 |
+
[2023-09-02 11:13:31,470][10573] Worker 0 uses CPU cores [0]
|
36 |
+
[2023-09-02 11:13:31,518][10576] Worker 3 uses CPU cores [1]
|
37 |
+
[2023-09-02 11:13:31,541][10577] Worker 5 uses CPU cores [1]
|
38 |
+
[2023-09-02 11:13:31,595][10579] Worker 7 uses CPU cores [1]
|
39 |
+
[2023-09-02 11:13:31,598][10574] Worker 1 uses CPU cores [1]
|
40 |
+
[2023-09-02 11:13:31,598][10575] Worker 2 uses CPU cores [0]
|
41 |
+
[2023-09-02 11:13:31,622][10572] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
42 |
+
[2023-09-02 11:13:31,622][10572] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
43 |
+
[2023-09-02 11:13:31,637][10572] Num visible devices: 1
|
44 |
+
[2023-09-02 11:13:31,738][10559] Conv encoder output size: 512
|
45 |
+
[2023-09-02 11:13:31,738][10559] Policy head output size: 512
|
46 |
+
[2023-09-02 11:13:31,786][10559] Created Actor Critic model with architecture:
|
47 |
+
[2023-09-02 11:13:31,786][10559] ActorCriticSharedWeights(
|
48 |
+
(obs_normalizer): ObservationNormalizer(
|
49 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
50 |
+
(running_mean_std): ModuleDict(
|
51 |
+
(obs): RunningMeanStdInPlace()
|
52 |
+
)
|
53 |
+
)
|
54 |
+
)
|
55 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
56 |
+
(encoder): VizdoomEncoder(
|
57 |
+
(basic_encoder): ConvEncoder(
|
58 |
+
(enc): RecursiveScriptModule(
|
59 |
+
original_name=ConvEncoderImpl
|
60 |
+
(conv_head): RecursiveScriptModule(
|
61 |
+
original_name=Sequential
|
62 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
63 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
64 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
65 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
66 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
67 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
68 |
+
)
|
69 |
+
(mlp_layers): RecursiveScriptModule(
|
70 |
+
original_name=Sequential
|
71 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
72 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
73 |
+
)
|
74 |
+
)
|
75 |
+
)
|
76 |
+
)
|
77 |
+
(core): ModelCoreRNN(
|
78 |
+
(core): GRU(512, 512)
|
79 |
+
)
|
80 |
+
(decoder): MlpDecoder(
|
81 |
+
(mlp): Identity()
|
82 |
+
)
|
83 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
84 |
+
(action_parameterization): ActionParameterizationDefault(
|
85 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
86 |
+
)
|
87 |
+
)
|
88 |
+
[2023-09-02 11:13:34,660][02307] Heartbeat connected on Batcher_0
|
89 |
+
[2023-09-02 11:13:34,670][02307] Heartbeat connected on InferenceWorker_p0-w0
|
90 |
+
[2023-09-02 11:13:34,683][02307] Heartbeat connected on RolloutWorker_w0
|
91 |
+
[2023-09-02 11:13:34,688][02307] Heartbeat connected on RolloutWorker_w1
|
92 |
+
[2023-09-02 11:13:34,692][02307] Heartbeat connected on RolloutWorker_w2
|
93 |
+
[2023-09-02 11:13:34,696][02307] Heartbeat connected on RolloutWorker_w3
|
94 |
+
[2023-09-02 11:13:34,700][02307] Heartbeat connected on RolloutWorker_w4
|
95 |
+
[2023-09-02 11:13:34,711][02307] Heartbeat connected on RolloutWorker_w5
|
96 |
+
[2023-09-02 11:13:34,733][02307] Heartbeat connected on RolloutWorker_w6
|
97 |
+
[2023-09-02 11:13:34,752][02307] Heartbeat connected on RolloutWorker_w7
|
98 |
+
[2023-09-02 11:13:39,825][10559] Using optimizer <class 'torch.optim.adam.Adam'>
|
99 |
+
[2023-09-02 11:13:39,826][10559] No checkpoints found
|
100 |
+
[2023-09-02 11:13:39,826][10559] Did not load from checkpoint, starting from scratch!
|
101 |
+
[2023-09-02 11:13:39,827][10559] Initialized policy 0 weights for model version 0
|
102 |
+
[2023-09-02 11:13:39,832][10559] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
103 |
+
[2023-09-02 11:13:39,841][10559] LearnerWorker_p0 finished initialization!
|
104 |
+
[2023-09-02 11:13:39,842][02307] Heartbeat connected on LearnerWorker_p0
|
105 |
+
[2023-09-02 11:13:39,966][10572] RunningMeanStd input shape: (3, 72, 128)
|
106 |
+
[2023-09-02 11:13:39,968][10572] RunningMeanStd input shape: (1,)
|
107 |
+
[2023-09-02 11:13:39,986][10572] ConvEncoder: input_channels=3
|
108 |
+
[2023-09-02 11:13:40,142][10572] Conv encoder output size: 512
|
109 |
+
[2023-09-02 11:13:40,143][10572] Policy head output size: 512
|
110 |
+
[2023-09-02 11:13:40,278][02307] Inference worker 0-0 is ready!
|
111 |
+
[2023-09-02 11:13:40,279][02307] All inference workers are ready! Signal rollout workers to start!
|
112 |
+
[2023-09-02 11:13:40,486][10574] Doom resolution: 160x120, resize resolution: (128, 72)
|
113 |
+
[2023-09-02 11:13:40,484][10576] Doom resolution: 160x120, resize resolution: (128, 72)
|
114 |
+
[2023-09-02 11:13:40,488][10579] Doom resolution: 160x120, resize resolution: (128, 72)
|
115 |
+
[2023-09-02 11:13:40,494][10577] Doom resolution: 160x120, resize resolution: (128, 72)
|
116 |
+
[2023-09-02 11:13:40,682][10575] Doom resolution: 160x120, resize resolution: (128, 72)
|
117 |
+
[2023-09-02 11:13:40,693][10578] Doom resolution: 160x120, resize resolution: (128, 72)
|
118 |
+
[2023-09-02 11:13:40,702][10580] Doom resolution: 160x120, resize resolution: (128, 72)
|
119 |
+
[2023-09-02 11:13:40,704][10573] Doom resolution: 160x120, resize resolution: (128, 72)
|
120 |
+
[2023-09-02 11:13:40,750][02307] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
121 |
+
[2023-09-02 11:13:41,880][10580] Decorrelating experience for 0 frames...
|
122 |
+
[2023-09-02 11:13:41,883][10575] Decorrelating experience for 0 frames...
|
123 |
+
[2023-09-02 11:13:42,084][10576] Decorrelating experience for 0 frames...
|
124 |
+
[2023-09-02 11:13:42,091][10574] Decorrelating experience for 0 frames...
|
125 |
+
[2023-09-02 11:13:42,093][10577] Decorrelating experience for 0 frames...
|
126 |
+
[2023-09-02 11:13:42,475][10577] Decorrelating experience for 32 frames...
|
127 |
+
[2023-09-02 11:13:42,983][10577] Decorrelating experience for 64 frames...
|
128 |
+
[2023-09-02 11:13:43,306][10580] Decorrelating experience for 32 frames...
|
129 |
+
[2023-09-02 11:13:43,312][10575] Decorrelating experience for 32 frames...
|
130 |
+
[2023-09-02 11:13:43,316][10573] Decorrelating experience for 0 frames...
|
131 |
+
[2023-09-02 11:13:43,576][10578] Decorrelating experience for 0 frames...
|
132 |
+
[2023-09-02 11:13:43,684][10577] Decorrelating experience for 96 frames...
|
133 |
+
[2023-09-02 11:13:44,460][10574] Decorrelating experience for 32 frames...
|
134 |
+
[2023-09-02 11:13:44,473][10576] Decorrelating experience for 32 frames...
|
135 |
+
[2023-09-02 11:13:45,434][10578] Decorrelating experience for 32 frames...
|
136 |
+
[2023-09-02 11:13:45,545][10580] Decorrelating experience for 64 frames...
|
137 |
+
[2023-09-02 11:13:45,749][02307] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
138 |
+
[2023-09-02 11:13:45,789][10573] Decorrelating experience for 32 frames...
|
139 |
+
[2023-09-02 11:13:46,542][10575] Decorrelating experience for 64 frames...
|
140 |
+
[2023-09-02 11:13:47,044][10574] Decorrelating experience for 64 frames...
|
141 |
+
[2023-09-02 11:13:47,081][10578] Decorrelating experience for 64 frames...
|
142 |
+
[2023-09-02 11:13:47,215][10573] Decorrelating experience for 64 frames...
|
143 |
+
[2023-09-02 11:13:47,425][10579] Decorrelating experience for 0 frames...
|
144 |
+
[2023-09-02 11:13:47,966][10573] Decorrelating experience for 96 frames...
|
145 |
+
[2023-09-02 11:13:48,309][10576] Decorrelating experience for 64 frames...
|
146 |
+
[2023-09-02 11:13:49,220][10579] Decorrelating experience for 32 frames...
|
147 |
+
[2023-09-02 11:13:49,397][10580] Decorrelating experience for 96 frames...
|
148 |
+
[2023-09-02 11:13:50,426][10576] Decorrelating experience for 96 frames...
|
149 |
+
[2023-09-02 11:13:50,705][10575] Decorrelating experience for 96 frames...
|
150 |
+
[2023-09-02 11:13:50,749][02307] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 121.4. Samples: 1214. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
151 |
+
[2023-09-02 11:13:50,754][02307] Avg episode reward: [(0, '3.186')]
|
152 |
+
[2023-09-02 11:13:52,449][10559] Signal inference workers to stop experience collection...
|
153 |
+
[2023-09-02 11:13:52,464][10572] InferenceWorker_p0-w0: stopping experience collection
|
154 |
+
[2023-09-02 11:13:52,668][10574] Decorrelating experience for 96 frames...
|
155 |
+
[2023-09-02 11:13:52,820][10578] Decorrelating experience for 96 frames...
|
156 |
+
[2023-09-02 11:13:54,082][10579] Decorrelating experience for 64 frames...
|
157 |
+
[2023-09-02 11:13:55,122][10579] Decorrelating experience for 96 frames...
|
158 |
+
[2023-09-02 11:13:55,748][02307] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 152.0. Samples: 2280. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
159 |
+
[2023-09-02 11:13:55,751][02307] Avg episode reward: [(0, '3.058')]
|
160 |
+
[2023-09-02 11:13:56,652][10559] Signal inference workers to resume experience collection...
|
161 |
+
[2023-09-02 11:13:56,653][10572] InferenceWorker_p0-w0: resuming experience collection
|
162 |
+
[2023-09-02 11:14:00,748][02307] Fps is (10 sec: 819.2, 60 sec: 409.6, 300 sec: 409.6). Total num frames: 8192. Throughput: 0: 143.8. Samples: 2876. Policy #0 lag: (min: 1.0, avg: 1.0, max: 1.0)
|
163 |
+
[2023-09-02 11:14:00,752][02307] Avg episode reward: [(0, '2.819')]
|
164 |
+
[2023-09-02 11:14:05,749][02307] Fps is (10 sec: 2867.2, 60 sec: 1146.9, 300 sec: 1146.9). Total num frames: 28672. Throughput: 0: 284.5. Samples: 7112. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
165 |
+
[2023-09-02 11:14:05,760][02307] Avg episode reward: [(0, '3.575')]
|
166 |
+
[2023-09-02 11:14:08,166][10572] Updated weights for policy 0, policy_version 10 (0.0013)
|
167 |
+
[2023-09-02 11:14:10,753][02307] Fps is (10 sec: 4094.3, 60 sec: 1638.3, 300 sec: 1638.3). Total num frames: 49152. Throughput: 0: 423.9. Samples: 12718. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
168 |
+
[2023-09-02 11:14:10,758][02307] Avg episode reward: [(0, '4.120')]
|
169 |
+
[2023-09-02 11:14:15,754][02307] Fps is (10 sec: 3274.9, 60 sec: 1755.2, 300 sec: 1755.2). Total num frames: 61440. Throughput: 0: 421.6. Samples: 14758. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
170 |
+
[2023-09-02 11:14:15,757][02307] Avg episode reward: [(0, '4.419')]
|
171 |
+
[2023-09-02 11:14:20,749][02307] Fps is (10 sec: 2458.6, 60 sec: 1843.3, 300 sec: 1843.3). Total num frames: 73728. Throughput: 0: 458.0. Samples: 18318. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
172 |
+
[2023-09-02 11:14:20,756][02307] Avg episode reward: [(0, '4.492')]
|
173 |
+
[2023-09-02 11:14:22,814][10572] Updated weights for policy 0, policy_version 20 (0.0047)
|
174 |
+
[2023-09-02 11:14:25,748][02307] Fps is (10 sec: 2868.9, 60 sec: 2002.6, 300 sec: 2002.6). Total num frames: 90112. Throughput: 0: 518.5. Samples: 23330. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
175 |
+
[2023-09-02 11:14:25,756][02307] Avg episode reward: [(0, '4.464')]
|
176 |
+
[2023-09-02 11:14:30,748][02307] Fps is (10 sec: 3686.4, 60 sec: 2211.9, 300 sec: 2211.9). Total num frames: 110592. Throughput: 0: 585.3. Samples: 26340. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
177 |
+
[2023-09-02 11:14:30,751][02307] Avg episode reward: [(0, '4.339')]
|
178 |
+
[2023-09-02 11:14:30,754][10559] Saving new best policy, reward=4.339!
|
179 |
+
[2023-09-02 11:14:33,815][10572] Updated weights for policy 0, policy_version 30 (0.0031)
|
180 |
+
[2023-09-02 11:14:35,749][02307] Fps is (10 sec: 3686.4, 60 sec: 2308.7, 300 sec: 2308.7). Total num frames: 126976. Throughput: 0: 671.9. Samples: 31450. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
181 |
+
[2023-09-02 11:14:35,755][02307] Avg episode reward: [(0, '4.350')]
|
182 |
+
[2023-09-02 11:14:35,763][10559] Saving new best policy, reward=4.350!
|
183 |
+
[2023-09-02 11:14:40,750][02307] Fps is (10 sec: 2866.7, 60 sec: 2321.1, 300 sec: 2321.1). Total num frames: 139264. Throughput: 0: 731.4. Samples: 35196. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
184 |
+
[2023-09-02 11:14:40,753][02307] Avg episode reward: [(0, '4.580')]
|
185 |
+
[2023-09-02 11:14:40,756][10559] Saving new best policy, reward=4.580!
|
186 |
+
[2023-09-02 11:14:45,749][02307] Fps is (10 sec: 2867.2, 60 sec: 2594.1, 300 sec: 2394.6). Total num frames: 155648. Throughput: 0: 763.3. Samples: 37226. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
187 |
+
[2023-09-02 11:14:45,751][02307] Avg episode reward: [(0, '4.562')]
|
188 |
+
[2023-09-02 11:14:47,692][10572] Updated weights for policy 0, policy_version 40 (0.0019)
|
189 |
+
[2023-09-02 11:14:50,748][02307] Fps is (10 sec: 3277.4, 60 sec: 2867.2, 300 sec: 2457.7). Total num frames: 172032. Throughput: 0: 801.2. Samples: 43164. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
190 |
+
[2023-09-02 11:14:50,755][02307] Avg episode reward: [(0, '4.499')]
|
191 |
+
[2023-09-02 11:14:55,749][02307] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 2512.3). Total num frames: 188416. Throughput: 0: 778.6. Samples: 47750. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
192 |
+
[2023-09-02 11:14:55,754][02307] Avg episode reward: [(0, '4.529')]
|
193 |
+
[2023-09-02 11:15:00,751][02307] Fps is (10 sec: 2866.4, 60 sec: 3208.4, 300 sec: 2508.8). Total num frames: 200704. Throughput: 0: 775.6. Samples: 49658. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
194 |
+
[2023-09-02 11:15:00,755][02307] Avg episode reward: [(0, '4.474')]
|
195 |
+
[2023-09-02 11:15:01,704][10572] Updated weights for policy 0, policy_version 50 (0.0039)
|
196 |
+
[2023-09-02 11:15:05,749][02307] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 2554.0). Total num frames: 217088. Throughput: 0: 781.3. Samples: 53478. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
197 |
+
[2023-09-02 11:15:05,754][02307] Avg episode reward: [(0, '4.404')]
|
198 |
+
[2023-09-02 11:15:05,762][10559] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000053_217088.pth...
|
199 |
+
[2023-09-02 11:15:10,749][02307] Fps is (10 sec: 3277.6, 60 sec: 3072.2, 300 sec: 2594.2). Total num frames: 233472. Throughput: 0: 801.5. Samples: 59398. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
200 |
+
[2023-09-02 11:15:10,756][02307] Avg episode reward: [(0, '4.407')]
|
201 |
+
[2023-09-02 11:15:12,920][10572] Updated weights for policy 0, policy_version 60 (0.0024)
|
202 |
+
[2023-09-02 11:15:15,748][02307] Fps is (10 sec: 3276.8, 60 sec: 3140.6, 300 sec: 2630.1). Total num frames: 249856. Throughput: 0: 799.3. Samples: 62308. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
203 |
+
[2023-09-02 11:15:15,751][02307] Avg episode reward: [(0, '4.386')]
|
204 |
+
[2023-09-02 11:15:20,748][02307] Fps is (10 sec: 3276.9, 60 sec: 3208.5, 300 sec: 2662.4). Total num frames: 266240. Throughput: 0: 773.2. Samples: 66246. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
205 |
+
[2023-09-02 11:15:20,751][02307] Avg episode reward: [(0, '4.529')]
|
206 |
+
[2023-09-02 11:15:25,749][02307] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 2652.7). Total num frames: 278528. Throughput: 0: 774.6. Samples: 70050. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
207 |
+
[2023-09-02 11:15:25,756][02307] Avg episode reward: [(0, '4.409')]
|
208 |
+
[2023-09-02 11:15:27,634][10572] Updated weights for policy 0, policy_version 70 (0.0043)
|
209 |
+
[2023-09-02 11:15:30,748][02307] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 2718.3). Total num frames: 299008. Throughput: 0: 794.4. Samples: 72976. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
210 |
+
[2023-09-02 11:15:30,756][02307] Avg episode reward: [(0, '4.445')]
|
211 |
+
[2023-09-02 11:15:35,749][02307] Fps is (10 sec: 3686.1, 60 sec: 3140.2, 300 sec: 2742.6). Total num frames: 315392. Throughput: 0: 795.5. Samples: 78960. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
212 |
+
[2023-09-02 11:15:35,752][02307] Avg episode reward: [(0, '4.537')]
|
213 |
+
[2023-09-02 11:15:39,050][10572] Updated weights for policy 0, policy_version 80 (0.0026)
|
214 |
+
[2023-09-02 11:15:40,748][02307] Fps is (10 sec: 3276.8, 60 sec: 3208.6, 300 sec: 2764.8). Total num frames: 331776. Throughput: 0: 788.0. Samples: 83210. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
215 |
+
[2023-09-02 11:15:40,754][02307] Avg episode reward: [(0, '4.472')]
|
216 |
+
[2023-09-02 11:15:45,750][02307] Fps is (10 sec: 2866.9, 60 sec: 3140.2, 300 sec: 2752.5). Total num frames: 344064. Throughput: 0: 787.5. Samples: 85096. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
217 |
+
[2023-09-02 11:15:45,753][02307] Avg episode reward: [(0, '4.476')]
|
218 |
+
[2023-09-02 11:15:50,748][02307] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 2772.7). Total num frames: 360448. Throughput: 0: 804.7. Samples: 89690. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
219 |
+
[2023-09-02 11:15:50,751][02307] Avg episode reward: [(0, '4.482')]
|
220 |
+
[2023-09-02 11:15:52,229][10572] Updated weights for policy 0, policy_version 90 (0.0035)
|
221 |
+
[2023-09-02 11:15:55,748][02307] Fps is (10 sec: 3687.0, 60 sec: 3208.5, 300 sec: 2821.7). Total num frames: 380928. Throughput: 0: 809.7. Samples: 95834. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
222 |
+
[2023-09-02 11:15:55,751][02307] Avg episode reward: [(0, '4.533')]
|
223 |
+
[2023-09-02 11:16:00,749][02307] Fps is (10 sec: 3276.8, 60 sec: 3208.7, 300 sec: 2808.7). Total num frames: 393216. Throughput: 0: 796.1. Samples: 98134. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
224 |
+
[2023-09-02 11:16:00,751][02307] Avg episode reward: [(0, '4.540')]
|
225 |
+
[2023-09-02 11:16:05,749][02307] Fps is (10 sec: 2457.5, 60 sec: 3140.2, 300 sec: 2796.6). Total num frames: 405504. Throughput: 0: 792.2. Samples: 101894. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
226 |
+
[2023-09-02 11:16:05,757][02307] Avg episode reward: [(0, '4.650')]
|
227 |
+
[2023-09-02 11:16:05,770][10559] Saving new best policy, reward=4.650!
|
228 |
+
[2023-09-02 11:16:06,089][10572] Updated weights for policy 0, policy_version 100 (0.0013)
|
229 |
+
[2023-09-02 11:16:10,748][02307] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 2839.9). Total num frames: 425984. Throughput: 0: 813.4. Samples: 106652. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
230 |
+
[2023-09-02 11:16:10,751][02307] Avg episode reward: [(0, '4.556')]
|
231 |
+
[2023-09-02 11:16:15,748][02307] Fps is (10 sec: 3686.6, 60 sec: 3208.5, 300 sec: 2854.0). Total num frames: 442368. Throughput: 0: 815.0. Samples: 109650. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
232 |
+
[2023-09-02 11:16:15,755][02307] Avg episode reward: [(0, '4.647')]
|
233 |
+
[2023-09-02 11:16:16,843][10572] Updated weights for policy 0, policy_version 110 (0.0022)
|
234 |
+
[2023-09-02 11:16:20,748][02307] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 2867.2). Total num frames: 458752. Throughput: 0: 797.8. Samples: 114860. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
235 |
+
[2023-09-02 11:16:20,751][02307] Avg episode reward: [(0, '4.559')]
|
236 |
+
[2023-09-02 11:16:25,749][02307] Fps is (10 sec: 2867.0, 60 sec: 3208.5, 300 sec: 2854.8). Total num frames: 471040. Throughput: 0: 790.0. Samples: 118762. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
237 |
+
[2023-09-02 11:16:25,755][02307] Avg episode reward: [(0, '4.691')]
|
238 |
+
[2023-09-02 11:16:25,769][10559] Saving new best policy, reward=4.691!
|
239 |
+
[2023-09-02 11:16:30,750][02307] Fps is (10 sec: 2866.7, 60 sec: 3140.2, 300 sec: 2867.2). Total num frames: 487424. Throughput: 0: 788.4. Samples: 120572. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
240 |
+
[2023-09-02 11:16:30,752][02307] Avg episode reward: [(0, '4.614')]
|
241 |
+
[2023-09-02 11:16:31,245][10572] Updated weights for policy 0, policy_version 120 (0.0019)
|
242 |
+
[2023-09-02 11:16:35,519][10559] Stopping Batcher_0...
|
243 |
+
[2023-09-02 11:16:35,519][10559] Loop batcher_evt_loop terminating...
|
244 |
+
[2023-09-02 11:16:35,520][02307] Component Batcher_0 stopped!
|
245 |
+
[2023-09-02 11:16:35,530][10559] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000124_507904.pth...
|
246 |
+
[2023-09-02 11:16:35,557][10575] Stopping RolloutWorker_w2...
|
247 |
+
[2023-09-02 11:16:35,558][02307] Component RolloutWorker_w2 stopped!
|
248 |
+
[2023-09-02 11:16:35,562][10575] Loop rollout_proc2_evt_loop terminating...
|
249 |
+
[2023-09-02 11:16:35,591][10580] Stopping RolloutWorker_w6...
|
250 |
+
[2023-09-02 11:16:35,590][02307] Component RolloutWorker_w6 stopped!
|
251 |
+
[2023-09-02 11:16:35,597][10580] Loop rollout_proc6_evt_loop terminating...
|
252 |
+
[2023-09-02 11:16:35,610][10578] Stopping RolloutWorker_w4...
|
253 |
+
[2023-09-02 11:16:35,609][02307] Component RolloutWorker_w4 stopped!
|
254 |
+
[2023-09-02 11:16:35,617][02307] Component RolloutWorker_w7 stopped!
|
255 |
+
[2023-09-02 11:16:35,619][10579] Stopping RolloutWorker_w7...
|
256 |
+
[2023-09-02 11:16:35,623][10579] Loop rollout_proc7_evt_loop terminating...
|
257 |
+
[2023-09-02 11:16:35,610][10578] Loop rollout_proc4_evt_loop terminating...
|
258 |
+
[2023-09-02 11:16:35,627][02307] Component RolloutWorker_w3 stopped!
|
259 |
+
[2023-09-02 11:16:35,628][10576] Stopping RolloutWorker_w3...
|
260 |
+
[2023-09-02 11:16:35,632][02307] Component RolloutWorker_w5 stopped!
|
261 |
+
[2023-09-02 11:16:35,634][10577] Stopping RolloutWorker_w5...
|
262 |
+
[2023-09-02 11:16:35,638][10577] Loop rollout_proc5_evt_loop terminating...
|
263 |
+
[2023-09-02 11:16:35,631][10576] Loop rollout_proc3_evt_loop terminating...
|
264 |
+
[2023-09-02 11:16:35,637][10572] Weights refcount: 2 0
|
265 |
+
[2023-09-02 11:16:35,641][02307] Component InferenceWorker_p0-w0 stopped!
|
266 |
+
[2023-09-02 11:16:35,643][02307] Component RolloutWorker_w0 stopped!
|
267 |
+
[2023-09-02 11:16:35,641][10572] Stopping InferenceWorker_p0-w0...
|
268 |
+
[2023-09-02 11:16:35,650][10572] Loop inference_proc0-0_evt_loop terminating...
|
269 |
+
[2023-09-02 11:16:35,642][10573] Stopping RolloutWorker_w0...
|
270 |
+
[2023-09-02 11:16:35,655][02307] Component RolloutWorker_w1 stopped!
|
271 |
+
[2023-09-02 11:16:35,657][10574] Stopping RolloutWorker_w1...
|
272 |
+
[2023-09-02 11:16:35,659][10574] Loop rollout_proc1_evt_loop terminating...
|
273 |
+
[2023-09-02 11:16:35,653][10573] Loop rollout_proc0_evt_loop terminating...
|
274 |
+
[2023-09-02 11:16:35,682][10559] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000124_507904.pth...
|
275 |
+
[2023-09-02 11:16:35,886][10559] Stopping LearnerWorker_p0...
|
276 |
+
[2023-09-02 11:16:35,886][02307] Component LearnerWorker_p0 stopped!
|
277 |
+
[2023-09-02 11:16:35,888][02307] Waiting for process learner_proc0 to stop...
|
278 |
+
[2023-09-02 11:16:35,888][10559] Loop learner_proc0_evt_loop terminating...
|
279 |
+
[2023-09-02 11:16:37,785][02307] Waiting for process inference_proc0-0 to join...
|
280 |
+
[2023-09-02 11:16:37,788][02307] Waiting for process rollout_proc0 to join...
|
281 |
+
[2023-09-02 11:16:39,731][02307] Waiting for process rollout_proc1 to join...
|
282 |
+
[2023-09-02 11:16:39,734][02307] Waiting for process rollout_proc2 to join...
|
283 |
+
[2023-09-02 11:16:39,735][02307] Waiting for process rollout_proc3 to join...
|
284 |
+
[2023-09-02 11:16:39,737][02307] Waiting for process rollout_proc4 to join...
|
285 |
+
[2023-09-02 11:16:39,739][02307] Waiting for process rollout_proc5 to join...
|
286 |
+
[2023-09-02 11:16:39,741][02307] Waiting for process rollout_proc6 to join...
|
287 |
+
[2023-09-02 11:16:39,743][02307] Waiting for process rollout_proc7 to join...
|
288 |
+
[2023-09-02 11:16:39,746][02307] Batcher 0 profile tree view:
|
289 |
+
batching: 3.9791, releasing_batches: 0.0058
|
290 |
+
[2023-09-02 11:16:39,747][02307] InferenceWorker_p0-w0 profile tree view:
|
291 |
+
wait_policy: 0.0043
|
292 |
+
wait_policy_total: 80.1493
|
293 |
+
update_model: 1.2177
|
294 |
+
weight_update: 0.0022
|
295 |
+
one_step: 0.0039
|
296 |
+
handle_policy_step: 83.9183
|
297 |
+
deserialize: 2.2387, stack: 0.4321, obs_to_device_normalize: 16.0242, forward: 47.0405, send_messages: 3.7519
|
298 |
+
prepare_outputs: 10.5362
|
299 |
+
to_cpu: 5.9112
|
300 |
+
[2023-09-02 11:16:39,749][02307] Learner 0 profile tree view:
|
301 |
+
misc: 0.0008, prepare_batch: 10.9081
|
302 |
+
train: 11.7425
|
303 |
+
epoch_init: 0.0008, minibatch_init: 0.0009, losses_postprocess: 0.0622, kl_divergence: 0.0836, after_optimizer: 0.5661
|
304 |
+
calculate_losses: 3.7279
|
305 |
+
losses_init: 0.0005, forward_head: 0.3784, bptt_initial: 2.3691, tail: 0.1694, advantages_returns: 0.0290, losses: 0.4587
|
306 |
+
bptt: 0.2574
|
307 |
+
bptt_forward_core: 0.2505
|
308 |
+
update: 7.1774
|
309 |
+
clip: 4.1199
|
310 |
+
[2023-09-02 11:16:39,751][02307] RolloutWorker_w0 profile tree view:
|
311 |
+
wait_for_trajectories: 0.0691, enqueue_policy_requests: 22.7559, env_step: 120.8244, overhead: 3.8799, complete_rollouts: 1.1832
|
312 |
+
save_policy_outputs: 3.1243
|
313 |
+
split_output_tensors: 1.5323
|
314 |
+
[2023-09-02 11:16:39,752][02307] RolloutWorker_w7 profile tree view:
|
315 |
+
wait_for_trajectories: 0.0595, enqueue_policy_requests: 21.1253, env_step: 119.6121, overhead: 3.2637, complete_rollouts: 0.8818
|
316 |
+
save_policy_outputs: 3.0154
|
317 |
+
split_output_tensors: 1.4709
|
318 |
+
[2023-09-02 11:16:39,754][02307] Loop Runner_EvtLoop terminating...
|
319 |
+
[2023-09-02 11:16:39,755][02307] Runner profile tree view:
|
320 |
+
main_loop: 205.0056
|
321 |
+
[2023-09-02 11:16:39,759][02307] Collected {0: 507904}, FPS: 2477.5
|
322 |
+
[2023-09-02 11:16:39,820][02307] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
323 |
+
[2023-09-02 11:16:39,823][02307] Overriding arg 'num_workers' with value 1 passed from command line
|
324 |
+
[2023-09-02 11:16:39,825][02307] Adding new argument 'no_render'=True that is not in the saved config file!
|
325 |
+
[2023-09-02 11:16:39,829][02307] Adding new argument 'save_video'=True that is not in the saved config file!
|
326 |
+
[2023-09-02 11:16:39,830][02307] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
327 |
+
[2023-09-02 11:16:39,832][02307] Adding new argument 'video_name'=None that is not in the saved config file!
|
328 |
+
[2023-09-02 11:16:39,833][02307] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
329 |
+
[2023-09-02 11:16:39,835][02307] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
330 |
+
[2023-09-02 11:16:39,836][02307] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
331 |
+
[2023-09-02 11:16:39,837][02307] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
332 |
+
[2023-09-02 11:16:39,838][02307] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
333 |
+
[2023-09-02 11:16:39,839][02307] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
334 |
+
[2023-09-02 11:16:39,840][02307] Adding new argument 'train_script'=None that is not in the saved config file!
|
335 |
+
[2023-09-02 11:16:39,841][02307] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
336 |
+
[2023-09-02 11:16:39,842][02307] Using frameskip 1 and render_action_repeat=4 for evaluation
|
337 |
+
[2023-09-02 11:16:39,888][02307] Doom resolution: 160x120, resize resolution: (128, 72)
|
338 |
+
[2023-09-02 11:16:39,893][02307] RunningMeanStd input shape: (3, 72, 128)
|
339 |
+
[2023-09-02 11:16:39,895][02307] RunningMeanStd input shape: (1,)
|
340 |
+
[2023-09-02 11:16:39,917][02307] ConvEncoder: input_channels=3
|
341 |
+
[2023-09-02 11:16:40,111][02307] Conv encoder output size: 512
|
342 |
+
[2023-09-02 11:16:40,113][02307] Policy head output size: 512
|
343 |
+
[2023-09-02 11:16:43,131][02307] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000124_507904.pth...
|
344 |
+
[2023-09-02 11:16:44,366][02307] Num frames 100...
|
345 |
+
[2023-09-02 11:16:44,493][02307] Num frames 200...
|
346 |
+
[2023-09-02 11:16:44,625][02307] Num frames 300...
|
347 |
+
[2023-09-02 11:16:44,791][02307] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
348 |
+
[2023-09-02 11:16:44,792][02307] Avg episode reward: 3.840, avg true_objective: 3.840
|
349 |
+
[2023-09-02 11:16:44,819][02307] Num frames 400...
|
350 |
+
[2023-09-02 11:16:44,960][02307] Num frames 500...
|
351 |
+
[2023-09-02 11:16:45,100][02307] Num frames 600...
|
352 |
+
[2023-09-02 11:16:45,224][02307] Num frames 700...
|
353 |
+
[2023-09-02 11:16:45,362][02307] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
354 |
+
[2023-09-02 11:16:45,363][02307] Avg episode reward: 3.840, avg true_objective: 3.840
|
355 |
+
[2023-09-02 11:16:45,410][02307] Num frames 800...
|
356 |
+
[2023-09-02 11:16:45,549][02307] Num frames 900...
|
357 |
+
[2023-09-02 11:16:45,692][02307] Num frames 1000...
|
358 |
+
[2023-09-02 11:16:45,843][02307] Num frames 1100...
|
359 |
+
[2023-09-02 11:16:45,972][02307] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
360 |
+
[2023-09-02 11:16:45,973][02307] Avg episode reward: 3.840, avg true_objective: 3.840
|
361 |
+
[2023-09-02 11:16:46,049][02307] Num frames 1200...
|
362 |
+
[2023-09-02 11:16:46,180][02307] Num frames 1300...
|
363 |
+
[2023-09-02 11:16:46,313][02307] Num frames 1400...
|
364 |
+
[2023-09-02 11:16:46,440][02307] Num frames 1500...
|
365 |
+
[2023-09-02 11:16:46,581][02307] Num frames 1600...
|
366 |
+
[2023-09-02 11:16:46,633][02307] Avg episode rewards: #0: 4.250, true rewards: #0: 4.000
|
367 |
+
[2023-09-02 11:16:46,634][02307] Avg episode reward: 4.250, avg true_objective: 4.000
|
368 |
+
[2023-09-02 11:16:46,768][02307] Num frames 1700...
|
369 |
+
[2023-09-02 11:16:46,905][02307] Num frames 1800...
|
370 |
+
[2023-09-02 11:16:47,034][02307] Num frames 1900...
|
371 |
+
[2023-09-02 11:16:47,168][02307] Num frames 2000...
|
372 |
+
[2023-09-02 11:16:47,296][02307] Num frames 2100...
|
373 |
+
[2023-09-02 11:16:47,408][02307] Avg episode rewards: #0: 4.888, true rewards: #0: 4.288
|
374 |
+
[2023-09-02 11:16:47,410][02307] Avg episode reward: 4.888, avg true_objective: 4.288
|
375 |
+
[2023-09-02 11:16:47,496][02307] Num frames 2200...
|
376 |
+
[2023-09-02 11:16:47,643][02307] Num frames 2300...
|
377 |
+
[2023-09-02 11:16:47,790][02307] Num frames 2400...
|
378 |
+
[2023-09-02 11:16:47,922][02307] Num frames 2500...
|
379 |
+
[2023-09-02 11:16:48,099][02307] Avg episode rewards: #0: 4.987, true rewards: #0: 4.320
|
380 |
+
[2023-09-02 11:16:48,101][02307] Avg episode reward: 4.987, avg true_objective: 4.320
|
381 |
+
[2023-09-02 11:16:48,116][02307] Num frames 2600...
|
382 |
+
[2023-09-02 11:16:48,252][02307] Num frames 2700...
|
383 |
+
[2023-09-02 11:16:48,382][02307] Num frames 2800...
|
384 |
+
[2023-09-02 11:16:48,498][02307] Avg episode rewards: #0: 4.640, true rewards: #0: 4.069
|
385 |
+
[2023-09-02 11:16:48,501][02307] Avg episode reward: 4.640, avg true_objective: 4.069
|
386 |
+
[2023-09-02 11:16:48,579][02307] Num frames 2900...
|
387 |
+
[2023-09-02 11:16:48,705][02307] Num frames 3000...
|
388 |
+
[2023-09-02 11:16:48,844][02307] Num frames 3100...
|
389 |
+
[2023-09-02 11:16:48,976][02307] Num frames 3200...
|
390 |
+
[2023-09-02 11:16:49,031][02307] Avg episode rewards: #0: 4.625, true rewards: #0: 4.000
|
391 |
+
[2023-09-02 11:16:49,032][02307] Avg episode reward: 4.625, avg true_objective: 4.000
|
392 |
+
[2023-09-02 11:16:49,168][02307] Num frames 3300...
|
393 |
+
[2023-09-02 11:16:49,296][02307] Num frames 3400...
|
394 |
+
[2023-09-02 11:16:49,429][02307] Num frames 3500...
|
395 |
+
[2023-09-02 11:16:49,586][02307] Avg episode rewards: #0: 4.538, true rewards: #0: 3.982
|
396 |
+
[2023-09-02 11:16:49,588][02307] Avg episode reward: 4.538, avg true_objective: 3.982
|
397 |
+
[2023-09-02 11:16:49,613][02307] Num frames 3600...
|
398 |
+
[2023-09-02 11:16:49,748][02307] Num frames 3700...
|
399 |
+
[2023-09-02 11:16:49,894][02307] Num frames 3800...
|
400 |
+
[2023-09-02 11:16:50,019][02307] Num frames 3900...
|
401 |
+
[2023-09-02 11:16:50,159][02307] Avg episode rewards: #0: 4.468, true rewards: #0: 3.968
|
402 |
+
[2023-09-02 11:16:50,160][02307] Avg episode reward: 4.468, avg true_objective: 3.968
|
403 |
+
[2023-09-02 11:17:15,137][02307] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
404 |
+
[2023-09-02 11:17:15,310][02307] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
405 |
+
[2023-09-02 11:17:15,312][02307] Overriding arg 'num_workers' with value 1 passed from command line
|
406 |
+
[2023-09-02 11:17:15,314][02307] Adding new argument 'no_render'=True that is not in the saved config file!
|
407 |
+
[2023-09-02 11:17:15,317][02307] Adding new argument 'save_video'=True that is not in the saved config file!
|
408 |
+
[2023-09-02 11:17:15,319][02307] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
409 |
+
[2023-09-02 11:17:15,321][02307] Adding new argument 'video_name'=None that is not in the saved config file!
|
410 |
+
[2023-09-02 11:17:15,323][02307] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
411 |
+
[2023-09-02 11:17:15,323][02307] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
412 |
+
[2023-09-02 11:17:15,324][02307] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
413 |
+
[2023-09-02 11:17:15,325][02307] Adding new argument 'hf_repository'='dimitarrskv/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
414 |
+
[2023-09-02 11:17:15,326][02307] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
415 |
+
[2023-09-02 11:17:15,327][02307] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
416 |
+
[2023-09-02 11:17:15,328][02307] Adding new argument 'train_script'=None that is not in the saved config file!
|
417 |
+
[2023-09-02 11:17:15,329][02307] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
418 |
+
[2023-09-02 11:17:15,330][02307] Using frameskip 1 and render_action_repeat=4 for evaluation
|
419 |
+
[2023-09-02 11:17:15,377][02307] RunningMeanStd input shape: (3, 72, 128)
|
420 |
+
[2023-09-02 11:17:15,380][02307] RunningMeanStd input shape: (1,)
|
421 |
+
[2023-09-02 11:17:15,397][02307] ConvEncoder: input_channels=3
|
422 |
+
[2023-09-02 11:17:15,455][02307] Conv encoder output size: 512
|
423 |
+
[2023-09-02 11:17:15,458][02307] Policy head output size: 512
|
424 |
+
[2023-09-02 11:17:15,488][02307] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000124_507904.pth...
|
425 |
+
[2023-09-02 11:17:16,362][02307] Num frames 100...
|
426 |
+
[2023-09-02 11:17:16,541][02307] Num frames 200...
|
427 |
+
[2023-09-02 11:17:16,729][02307] Num frames 300...
|
428 |
+
[2023-09-02 11:17:16,955][02307] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
429 |
+
[2023-09-02 11:17:16,958][02307] Avg episode reward: 3.840, avg true_objective: 3.840
|
430 |
+
[2023-09-02 11:17:17,006][02307] Num frames 400...
|
431 |
+
[2023-09-02 11:17:17,209][02307] Num frames 500...
|
432 |
+
[2023-09-02 11:17:17,386][02307] Num frames 600...
|
433 |
+
[2023-09-02 11:17:17,574][02307] Num frames 700...
|
434 |
+
[2023-09-02 11:17:17,770][02307] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
435 |
+
[2023-09-02 11:17:17,772][02307] Avg episode reward: 3.840, avg true_objective: 3.840
|
436 |
+
[2023-09-02 11:17:17,858][02307] Num frames 800...
|
437 |
+
[2023-09-02 11:17:18,076][02307] Num frames 900...
|
438 |
+
[2023-09-02 11:17:18,296][02307] Num frames 1000...
|
439 |
+
[2023-09-02 11:17:18,515][02307] Num frames 1100...
|
440 |
+
[2023-09-02 11:17:18,676][02307] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
|
441 |
+
[2023-09-02 11:17:18,678][02307] Avg episode reward: 3.840, avg true_objective: 3.840
|
442 |
+
[2023-09-02 11:17:18,810][02307] Num frames 1200...
|
443 |
+
[2023-09-02 11:17:19,020][02307] Num frames 1300...
|
444 |
+
[2023-09-02 11:17:19,208][02307] Num frames 1400...
|
445 |
+
[2023-09-02 11:17:19,397][02307] Num frames 1500...
|
446 |
+
[2023-09-02 11:17:19,598][02307] Num frames 1600...
|
447 |
+
[2023-09-02 11:17:19,653][02307] Avg episode rewards: #0: 4.250, true rewards: #0: 4.000
|
448 |
+
[2023-09-02 11:17:19,655][02307] Avg episode reward: 4.250, avg true_objective: 4.000
|
449 |
+
[2023-09-02 11:17:19,848][02307] Num frames 1700...
|
450 |
+
[2023-09-02 11:17:20,039][02307] Num frames 1800...
|
451 |
+
[2023-09-02 11:17:20,225][02307] Num frames 1900...
|
452 |
+
[2023-09-02 11:17:20,416][02307] Num frames 2000...
|
453 |
+
[2023-09-02 11:17:20,567][02307] Avg episode rewards: #0: 4.496, true rewards: #0: 4.096
|
454 |
+
[2023-09-02 11:17:20,569][02307] Avg episode reward: 4.496, avg true_objective: 4.096
|
455 |
+
[2023-09-02 11:17:20,672][02307] Num frames 2100...
|
456 |
+
[2023-09-02 11:17:20,858][02307] Num frames 2200...
|
457 |
+
[2023-09-02 11:17:21,015][02307] Num frames 2300...
|
458 |
+
[2023-09-02 11:17:21,142][02307] Num frames 2400...
|
459 |
+
[2023-09-02 11:17:21,269][02307] Num frames 2500...
|
460 |
+
[2023-09-02 11:17:21,397][02307] Num frames 2600...
|
461 |
+
[2023-09-02 11:17:21,523][02307] Avg episode rewards: #0: 5.260, true rewards: #0: 4.427
|
462 |
+
[2023-09-02 11:17:21,525][02307] Avg episode reward: 5.260, avg true_objective: 4.427
|
463 |
+
[2023-09-02 11:17:21,586][02307] Num frames 2700...
|
464 |
+
[2023-09-02 11:17:21,714][02307] Num frames 2800...
|
465 |
+
[2023-09-02 11:17:21,838][02307] Num frames 2900...
|
466 |
+
[2023-09-02 11:17:21,962][02307] Num frames 3000...
|
467 |
+
[2023-09-02 11:17:22,073][02307] Avg episode rewards: #0: 5.057, true rewards: #0: 4.343
|
468 |
+
[2023-09-02 11:17:22,075][02307] Avg episode reward: 5.057, avg true_objective: 4.343
|
469 |
+
[2023-09-02 11:17:22,158][02307] Num frames 3100...
|
470 |
+
[2023-09-02 11:17:22,289][02307] Num frames 3200...
|
471 |
+
[2023-09-02 11:17:22,423][02307] Num frames 3300...
|
472 |
+
[2023-09-02 11:17:22,556][02307] Num frames 3400...
|
473 |
+
[2023-09-02 11:17:22,643][02307] Avg episode rewards: #0: 4.905, true rewards: #0: 4.280
|
474 |
+
[2023-09-02 11:17:22,645][02307] Avg episode reward: 4.905, avg true_objective: 4.280
|
475 |
+
[2023-09-02 11:17:22,789][02307] Num frames 3500...
|
476 |
+
[2023-09-02 11:17:22,975][02307] Num frames 3600...
|
477 |
+
[2023-09-02 11:17:23,166][02307] Num frames 3700...
|
478 |
+
[2023-09-02 11:17:23,347][02307] Num frames 3800...
|
479 |
+
[2023-09-02 11:17:23,422][02307] Avg episode rewards: #0: 4.787, true rewards: #0: 4.231
|
480 |
+
[2023-09-02 11:17:23,426][02307] Avg episode reward: 4.787, avg true_objective: 4.231
|
481 |
+
[2023-09-02 11:17:23,608][02307] Num frames 3900...
|
482 |
+
[2023-09-02 11:17:23,795][02307] Num frames 4000...
|
483 |
+
[2023-09-02 11:17:23,974][02307] Num frames 4100...
|
484 |
+
[2023-09-02 11:17:24,198][02307] Avg episode rewards: #0: 4.692, true rewards: #0: 4.192
|
485 |
+
[2023-09-02 11:17:24,203][02307] Avg episode reward: 4.692, avg true_objective: 4.192
|
486 |
+
[2023-09-02 11:17:49,327][02307] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|