mtlulka commited on
Commit
ccc8417
1 Parent(s): 5f12115

Upload . with huggingface_hub

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1677095821.ee26f048b538 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c6caa8b390d5822ee8dcce4d351705be93be968d656e864dfb984c6d6afb2a6
3
+ size 906155
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ model-index:
8
+ - name: APPO
9
+ results:
10
+ - task:
11
+ type: reinforcement-learning
12
+ name: reinforcement-learning
13
+ dataset:
14
+ name: doom_health_gathering_supreme
15
+ type: doom_health_gathering_supreme
16
+ metrics:
17
+ - type: mean_reward
18
+ value: 9.55 +/- 3.76
19
+ name: mean_reward
20
+ verified: false
21
+ ---
22
+
23
+ A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r mtlulka/rl_course_vizdoom_health_gathering_supreme
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
checkpoint_p0/best_000000922_3776512_reward_22.528.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f2cc7ee0ba17a9cd96a95044c65e9ea18d9f2ad82627aac3926ff7952fcb40b
3
+ size 34928614
checkpoint_p0/checkpoint_000000888_3637248.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4efb462fe742bd870f6ca7b1b5475bba58162bacefd234f75d264e594940b98
3
+ size 34929028
checkpoint_p0/checkpoint_000000978_4005888.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c2fa9a3367f902d075e5188c73988242531b1f069af609f9dc6c1aeafdeac60
3
+ size 34929028
config.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/content/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "lr_adaptive_min": 1e-06,
50
+ "lr_adaptive_max": 0.01,
51
+ "obs_subtract_mean": 0.0,
52
+ "obs_scale": 255.0,
53
+ "normalize_input": true,
54
+ "normalize_input_keys": null,
55
+ "decorrelate_experience_max_seconds": 0,
56
+ "decorrelate_envs_on_one_worker": true,
57
+ "actor_worker_gpus": [],
58
+ "set_workers_cpu_affinity": true,
59
+ "force_envs_single_thread": false,
60
+ "default_niceness": 0,
61
+ "log_to_file": true,
62
+ "experiment_summaries_interval": 10,
63
+ "flush_summaries_interval": 30,
64
+ "stats_avg": 100,
65
+ "summaries_use_frameskip": true,
66
+ "heartbeat_interval": 20,
67
+ "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 4000000,
69
+ "train_for_seconds": 10000000000,
70
+ "save_every_sec": 120,
71
+ "keep_checkpoints": 2,
72
+ "load_checkpoint_kind": "latest",
73
+ "save_milestones_sec": -1,
74
+ "save_best_every_sec": 5,
75
+ "save_best_metric": "reward",
76
+ "save_best_after": 100000,
77
+ "benchmark": false,
78
+ "encoder_mlp_layers": [
79
+ 512,
80
+ 512
81
+ ],
82
+ "encoder_conv_architecture": "convnet_simple",
83
+ "encoder_conv_mlp_layers": [
84
+ 512
85
+ ],
86
+ "use_rnn": true,
87
+ "rnn_size": 512,
88
+ "rnn_type": "gru",
89
+ "rnn_num_layers": 1,
90
+ "decoder_mlp_layers": [],
91
+ "nonlinearity": "elu",
92
+ "policy_initialization": "orthogonal",
93
+ "policy_init_gain": 1.0,
94
+ "actor_critic_share_weights": true,
95
+ "adaptive_stddev": true,
96
+ "continuous_tanh_scale": 0.0,
97
+ "initial_stddev": 1.0,
98
+ "use_env_info_cache": false,
99
+ "env_gpu_actions": false,
100
+ "env_gpu_observations": true,
101
+ "env_frameskip": 4,
102
+ "env_framestack": 1,
103
+ "pixel_format": "CHW",
104
+ "use_record_episode_statistics": false,
105
+ "with_wandb": false,
106
+ "wandb_user": null,
107
+ "wandb_project": "sample_factory",
108
+ "wandb_group": null,
109
+ "wandb_job_type": "SF",
110
+ "wandb_tags": [],
111
+ "with_pbt": false,
112
+ "pbt_mix_policies_in_one_env": true,
113
+ "pbt_period_env_steps": 5000000,
114
+ "pbt_start_mutation": 20000000,
115
+ "pbt_replace_fraction": 0.3,
116
+ "pbt_mutation_rate": 0.15,
117
+ "pbt_replace_reward_gap": 0.1,
118
+ "pbt_replace_reward_gap_absolute": 1e-06,
119
+ "pbt_optimize_gamma": false,
120
+ "pbt_target_objective": "true_objective",
121
+ "pbt_perturb_min": 1.1,
122
+ "pbt_perturb_max": 1.5,
123
+ "num_agents": -1,
124
+ "num_humans": 0,
125
+ "num_bots": -1,
126
+ "start_bot_difficulty": null,
127
+ "timelimit": null,
128
+ "res_w": 128,
129
+ "res_h": 72,
130
+ "wide_aspect_ratio": false,
131
+ "eval_env_frameskip": 1,
132
+ "fps": 35,
133
+ "command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
134
+ "cli_args": {
135
+ "env": "doom_health_gathering_supreme",
136
+ "num_workers": 8,
137
+ "num_envs_per_worker": 4,
138
+ "train_for_env_steps": 4000000
139
+ },
140
+ "git_hash": "unknown",
141
+ "git_repo_name": "not a git repository"
142
+ }
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3834a2150be3b009aff126e1935c2bf330acc7f365cd096b7b4d7cc7c019a0ec
3
+ size 17884779
sf_log.txt ADDED
@@ -0,0 +1,961 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-02-22 19:57:05,655][00330] Saving configuration to /content/train_dir/default_experiment/config.json...
2
+ [2023-02-22 19:57:05,661][00330] Rollout worker 0 uses device cpu
3
+ [2023-02-22 19:57:05,662][00330] Rollout worker 1 uses device cpu
4
+ [2023-02-22 19:57:05,664][00330] Rollout worker 2 uses device cpu
5
+ [2023-02-22 19:57:05,665][00330] Rollout worker 3 uses device cpu
6
+ [2023-02-22 19:57:05,668][00330] Rollout worker 4 uses device cpu
7
+ [2023-02-22 19:57:05,673][00330] Rollout worker 5 uses device cpu
8
+ [2023-02-22 19:57:05,674][00330] Rollout worker 6 uses device cpu
9
+ [2023-02-22 19:57:05,676][00330] Rollout worker 7 uses device cpu
10
+ [2023-02-22 19:57:05,849][00330] Using GPUs [0] for process 0 (actually maps to GPUs [0])
11
+ [2023-02-22 19:57:05,854][00330] InferenceWorker_p0-w0: min num requests: 2
12
+ [2023-02-22 19:57:05,883][00330] Starting all processes...
13
+ [2023-02-22 19:57:05,885][00330] Starting process learner_proc0
14
+ [2023-02-22 19:57:05,935][00330] Starting all processes...
15
+ [2023-02-22 19:57:05,945][00330] Starting process inference_proc0-0
16
+ [2023-02-22 19:57:05,945][00330] Starting process rollout_proc0
17
+ [2023-02-22 19:57:05,947][00330] Starting process rollout_proc1
18
+ [2023-02-22 19:57:05,947][00330] Starting process rollout_proc2
19
+ [2023-02-22 19:57:05,947][00330] Starting process rollout_proc3
20
+ [2023-02-22 19:57:05,947][00330] Starting process rollout_proc4
21
+ [2023-02-22 19:57:05,947][00330] Starting process rollout_proc5
22
+ [2023-02-22 19:57:05,947][00330] Starting process rollout_proc6
23
+ [2023-02-22 19:57:05,947][00330] Starting process rollout_proc7
24
+ [2023-02-22 19:57:14,943][19151] Worker 1 uses CPU cores [1]
25
+ [2023-02-22 19:57:15,123][19153] Worker 3 uses CPU cores [1]
26
+ [2023-02-22 19:57:15,191][19135] Using GPUs [0] for process 0 (actually maps to GPUs [0])
27
+ [2023-02-22 19:57:15,200][19135] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
28
+ [2023-02-22 19:57:15,467][19155] Worker 5 uses CPU cores [1]
29
+ [2023-02-22 19:57:15,495][19149] Using GPUs [0] for process 0 (actually maps to GPUs [0])
30
+ [2023-02-22 19:57:15,499][19149] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
31
+ [2023-02-22 19:57:15,521][19161] Worker 7 uses CPU cores [1]
32
+ [2023-02-22 19:57:15,550][19160] Worker 6 uses CPU cores [0]
33
+ [2023-02-22 19:57:15,570][19152] Worker 2 uses CPU cores [0]
34
+ [2023-02-22 19:57:15,638][19150] Worker 0 uses CPU cores [0]
35
+ [2023-02-22 19:57:15,670][19154] Worker 4 uses CPU cores [0]
36
+ [2023-02-22 19:57:16,165][19149] Num visible devices: 1
37
+ [2023-02-22 19:57:16,166][19135] Num visible devices: 1
38
+ [2023-02-22 19:57:16,168][19135] Starting seed is not provided
39
+ [2023-02-22 19:57:16,168][19135] Using GPUs [0] for process 0 (actually maps to GPUs [0])
40
+ [2023-02-22 19:57:16,169][19135] Initializing actor-critic model on device cuda:0
41
+ [2023-02-22 19:57:16,169][19135] RunningMeanStd input shape: (3, 72, 128)
42
+ [2023-02-22 19:57:16,170][19135] RunningMeanStd input shape: (1,)
43
+ [2023-02-22 19:57:16,192][19135] ConvEncoder: input_channels=3
44
+ [2023-02-22 19:57:16,510][19135] Conv encoder output size: 512
45
+ [2023-02-22 19:57:16,510][19135] Policy head output size: 512
46
+ [2023-02-22 19:57:16,562][19135] Created Actor Critic model with architecture:
47
+ [2023-02-22 19:57:16,563][19135] ActorCriticSharedWeights(
48
+ (obs_normalizer): ObservationNormalizer(
49
+ (running_mean_std): RunningMeanStdDictInPlace(
50
+ (running_mean_std): ModuleDict(
51
+ (obs): RunningMeanStdInPlace()
52
+ )
53
+ )
54
+ )
55
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
56
+ (encoder): VizdoomEncoder(
57
+ (basic_encoder): ConvEncoder(
58
+ (enc): RecursiveScriptModule(
59
+ original_name=ConvEncoderImpl
60
+ (conv_head): RecursiveScriptModule(
61
+ original_name=Sequential
62
+ (0): RecursiveScriptModule(original_name=Conv2d)
63
+ (1): RecursiveScriptModule(original_name=ELU)
64
+ (2): RecursiveScriptModule(original_name=Conv2d)
65
+ (3): RecursiveScriptModule(original_name=ELU)
66
+ (4): RecursiveScriptModule(original_name=Conv2d)
67
+ (5): RecursiveScriptModule(original_name=ELU)
68
+ )
69
+ (mlp_layers): RecursiveScriptModule(
70
+ original_name=Sequential
71
+ (0): RecursiveScriptModule(original_name=Linear)
72
+ (1): RecursiveScriptModule(original_name=ELU)
73
+ )
74
+ )
75
+ )
76
+ )
77
+ (core): ModelCoreRNN(
78
+ (core): GRU(512, 512)
79
+ )
80
+ (decoder): MlpDecoder(
81
+ (mlp): Identity()
82
+ )
83
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
84
+ (action_parameterization): ActionParameterizationDefault(
85
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
86
+ )
87
+ )
88
+ [2023-02-22 19:57:23,001][19135] Using optimizer <class 'torch.optim.adam.Adam'>
89
+ [2023-02-22 19:57:23,003][19135] No checkpoints found
90
+ [2023-02-22 19:57:23,003][19135] Did not load from checkpoint, starting from scratch!
91
+ [2023-02-22 19:57:23,004][19135] Initialized policy 0 weights for model version 0
92
+ [2023-02-22 19:57:23,006][19135] LearnerWorker_p0 finished initialization!
93
+ [2023-02-22 19:57:23,007][19135] Using GPUs [0] for process 0 (actually maps to GPUs [0])
94
+ [2023-02-22 19:57:23,242][19149] RunningMeanStd input shape: (3, 72, 128)
95
+ [2023-02-22 19:57:23,243][19149] RunningMeanStd input shape: (1,)
96
+ [2023-02-22 19:57:23,255][19149] ConvEncoder: input_channels=3
97
+ [2023-02-22 19:57:23,352][19149] Conv encoder output size: 512
98
+ [2023-02-22 19:57:23,352][19149] Policy head output size: 512
99
+ [2023-02-22 19:57:25,526][00330] Inference worker 0-0 is ready!
100
+ [2023-02-22 19:57:25,528][00330] All inference workers are ready! Signal rollout workers to start!
101
+ [2023-02-22 19:57:25,644][19151] Doom resolution: 160x120, resize resolution: (128, 72)
102
+ [2023-02-22 19:57:25,657][19152] Doom resolution: 160x120, resize resolution: (128, 72)
103
+ [2023-02-22 19:57:25,672][19150] Doom resolution: 160x120, resize resolution: (128, 72)
104
+ [2023-02-22 19:57:25,671][19153] Doom resolution: 160x120, resize resolution: (128, 72)
105
+ [2023-02-22 19:57:25,681][19155] Doom resolution: 160x120, resize resolution: (128, 72)
106
+ [2023-02-22 19:57:25,681][19154] Doom resolution: 160x120, resize resolution: (128, 72)
107
+ [2023-02-22 19:57:25,689][19160] Doom resolution: 160x120, resize resolution: (128, 72)
108
+ [2023-02-22 19:57:25,699][19161] Doom resolution: 160x120, resize resolution: (128, 72)
109
+ [2023-02-22 19:57:25,842][00330] Heartbeat connected on Batcher_0
110
+ [2023-02-22 19:57:25,845][00330] Heartbeat connected on LearnerWorker_p0
111
+ [2023-02-22 19:57:25,893][00330] Heartbeat connected on InferenceWorker_p0-w0
112
+ [2023-02-22 19:57:26,164][00330] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
113
+ [2023-02-22 19:57:26,523][19151] Decorrelating experience for 0 frames...
114
+ [2023-02-22 19:57:26,524][19161] Decorrelating experience for 0 frames...
115
+ [2023-02-22 19:57:27,104][19150] Decorrelating experience for 0 frames...
116
+ [2023-02-22 19:57:27,114][19152] Decorrelating experience for 0 frames...
117
+ [2023-02-22 19:57:27,121][19154] Decorrelating experience for 0 frames...
118
+ [2023-02-22 19:57:27,124][19160] Decorrelating experience for 0 frames...
119
+ [2023-02-22 19:57:27,176][19151] Decorrelating experience for 32 frames...
120
+ [2023-02-22 19:57:27,224][19153] Decorrelating experience for 0 frames...
121
+ [2023-02-22 19:57:27,910][19153] Decorrelating experience for 32 frames...
122
+ [2023-02-22 19:57:28,005][19151] Decorrelating experience for 64 frames...
123
+ [2023-02-22 19:57:28,466][19152] Decorrelating experience for 32 frames...
124
+ [2023-02-22 19:57:28,472][19150] Decorrelating experience for 32 frames...
125
+ [2023-02-22 19:57:28,474][19160] Decorrelating experience for 32 frames...
126
+ [2023-02-22 19:57:28,482][19154] Decorrelating experience for 32 frames...
127
+ [2023-02-22 19:57:29,140][19153] Decorrelating experience for 64 frames...
128
+ [2023-02-22 19:57:29,180][19151] Decorrelating experience for 96 frames...
129
+ [2023-02-22 19:57:29,470][00330] Heartbeat connected on RolloutWorker_w1
130
+ [2023-02-22 19:57:29,775][19155] Decorrelating experience for 0 frames...
131
+ [2023-02-22 19:57:29,809][19161] Decorrelating experience for 32 frames...
132
+ [2023-02-22 19:57:30,616][19155] Decorrelating experience for 32 frames...
133
+ [2023-02-22 19:57:30,639][19153] Decorrelating experience for 96 frames...
134
+ [2023-02-22 19:57:30,772][00330] Heartbeat connected on RolloutWorker_w3
135
+ [2023-02-22 19:57:31,164][00330] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
136
+ [2023-02-22 19:57:31,261][19155] Decorrelating experience for 64 frames...
137
+ [2023-02-22 19:57:31,813][19155] Decorrelating experience for 96 frames...
138
+ [2023-02-22 19:57:31,976][00330] Heartbeat connected on RolloutWorker_w5
139
+ [2023-02-22 19:57:31,992][19160] Decorrelating experience for 64 frames...
140
+ [2023-02-22 19:57:32,003][19150] Decorrelating experience for 64 frames...
141
+ [2023-02-22 19:57:32,049][19154] Decorrelating experience for 64 frames...
142
+ [2023-02-22 19:57:32,442][19152] Decorrelating experience for 64 frames...
143
+ [2023-02-22 19:57:34,117][19160] Decorrelating experience for 96 frames...
144
+ [2023-02-22 19:57:34,162][19154] Decorrelating experience for 96 frames...
145
+ [2023-02-22 19:57:34,558][00330] Heartbeat connected on RolloutWorker_w6
146
+ [2023-02-22 19:57:34,620][00330] Heartbeat connected on RolloutWorker_w4
147
+ [2023-02-22 19:57:34,685][19150] Decorrelating experience for 96 frames...
148
+ [2023-02-22 19:57:34,958][19152] Decorrelating experience for 96 frames...
149
+ [2023-02-22 19:57:35,153][00330] Heartbeat connected on RolloutWorker_w0
150
+ [2023-02-22 19:57:35,401][00330] Heartbeat connected on RolloutWorker_w2
151
+ [2023-02-22 19:57:36,164][00330] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 2.4. Samples: 24. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
152
+ [2023-02-22 19:57:36,166][00330] Avg episode reward: [(0, '0.853')]
153
+ [2023-02-22 19:57:38,183][19161] Decorrelating experience for 64 frames...
154
+ [2023-02-22 19:57:38,504][19135] Signal inference workers to stop experience collection...
155
+ [2023-02-22 19:57:38,518][19149] InferenceWorker_p0-w0: stopping experience collection
156
+ [2023-02-22 19:57:38,903][19161] Decorrelating experience for 96 frames...
157
+ [2023-02-22 19:57:39,017][00330] Heartbeat connected on RolloutWorker_w7
158
+ [2023-02-22 19:57:41,129][19135] Signal inference workers to resume experience collection...
159
+ [2023-02-22 19:57:41,133][19149] InferenceWorker_p0-w0: resuming experience collection
160
+ [2023-02-22 19:57:41,164][00330] Fps is (10 sec: 409.6, 60 sec: 273.1, 300 sec: 273.1). Total num frames: 4096. Throughput: 0: 158.8. Samples: 2382. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0)
161
+ [2023-02-22 19:57:41,171][00330] Avg episode reward: [(0, '2.021')]
162
+ [2023-02-22 19:57:46,164][00330] Fps is (10 sec: 2457.6, 60 sec: 1228.8, 300 sec: 1228.8). Total num frames: 24576. Throughput: 0: 222.2. Samples: 4444. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
163
+ [2023-02-22 19:57:46,169][00330] Avg episode reward: [(0, '3.487')]
164
+ [2023-02-22 19:57:49,668][19149] Updated weights for policy 0, policy_version 10 (0.0348)
165
+ [2023-02-22 19:57:51,164][00330] Fps is (10 sec: 4096.0, 60 sec: 1802.2, 300 sec: 1802.2). Total num frames: 45056. Throughput: 0: 436.6. Samples: 10914. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
166
+ [2023-02-22 19:57:51,170][00330] Avg episode reward: [(0, '4.145')]
167
+ [2023-02-22 19:57:56,164][00330] Fps is (10 sec: 3276.8, 60 sec: 1911.5, 300 sec: 1911.5). Total num frames: 57344. Throughput: 0: 515.3. Samples: 15458. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
168
+ [2023-02-22 19:57:56,169][00330] Avg episode reward: [(0, '4.286')]
169
+ [2023-02-22 19:58:00,739][19149] Updated weights for policy 0, policy_version 20 (0.0046)
170
+ [2023-02-22 19:58:01,164][00330] Fps is (10 sec: 3686.4, 60 sec: 2340.6, 300 sec: 2340.6). Total num frames: 81920. Throughput: 0: 521.1. Samples: 18238. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
171
+ [2023-02-22 19:58:01,172][00330] Avg episode reward: [(0, '4.324')]
172
+ [2023-02-22 19:58:06,164][00330] Fps is (10 sec: 4915.2, 60 sec: 2662.4, 300 sec: 2662.4). Total num frames: 106496. Throughput: 0: 641.3. Samples: 25652. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
173
+ [2023-02-22 19:58:06,167][00330] Avg episode reward: [(0, '4.203')]
174
+ [2023-02-22 19:58:06,174][19135] Saving new best policy, reward=4.203!
175
+ [2023-02-22 19:58:10,335][19149] Updated weights for policy 0, policy_version 30 (0.0021)
176
+ [2023-02-22 19:58:11,164][00330] Fps is (10 sec: 4096.0, 60 sec: 2730.7, 300 sec: 2730.7). Total num frames: 122880. Throughput: 0: 699.8. Samples: 31492. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
177
+ [2023-02-22 19:58:11,169][00330] Avg episode reward: [(0, '4.104')]
178
+ [2023-02-22 19:58:16,166][00330] Fps is (10 sec: 3276.1, 60 sec: 2785.2, 300 sec: 2785.2). Total num frames: 139264. Throughput: 0: 752.3. Samples: 33856. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
179
+ [2023-02-22 19:58:16,172][00330] Avg episode reward: [(0, '4.264')]
180
+ [2023-02-22 19:58:16,176][19135] Saving new best policy, reward=4.264!
181
+ [2023-02-22 19:58:21,127][19149] Updated weights for policy 0, policy_version 40 (0.0021)
182
+ [2023-02-22 19:58:21,165][00330] Fps is (10 sec: 4096.0, 60 sec: 2978.9, 300 sec: 2978.9). Total num frames: 163840. Throughput: 0: 876.4. Samples: 39464. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
183
+ [2023-02-22 19:58:21,172][00330] Avg episode reward: [(0, '4.435')]
184
+ [2023-02-22 19:58:21,182][19135] Saving new best policy, reward=4.435!
185
+ [2023-02-22 19:58:26,165][00330] Fps is (10 sec: 4506.4, 60 sec: 3072.0, 300 sec: 3072.0). Total num frames: 184320. Throughput: 0: 988.3. Samples: 46856. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
186
+ [2023-02-22 19:58:26,171][00330] Avg episode reward: [(0, '4.481')]
187
+ [2023-02-22 19:58:26,224][19135] Saving new best policy, reward=4.481!
188
+ [2023-02-22 19:58:30,822][19149] Updated weights for policy 0, policy_version 50 (0.0015)
189
+ [2023-02-22 19:58:31,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3150.8). Total num frames: 204800. Throughput: 0: 1010.7. Samples: 49924. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
190
+ [2023-02-22 19:58:31,173][00330] Avg episode reward: [(0, '4.600')]
191
+ [2023-02-22 19:58:31,187][19135] Saving new best policy, reward=4.600!
192
+ [2023-02-22 19:58:36,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3101.3). Total num frames: 217088. Throughput: 0: 969.6. Samples: 54544. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
193
+ [2023-02-22 19:58:36,170][00330] Avg episode reward: [(0, '4.508')]
194
+ [2023-02-22 19:58:41,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3959.5, 300 sec: 3222.2). Total num frames: 241664. Throughput: 0: 1007.0. Samples: 60772. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
195
+ [2023-02-22 19:58:41,172][00330] Avg episode reward: [(0, '4.439')]
196
+ [2023-02-22 19:58:41,610][19149] Updated weights for policy 0, policy_version 60 (0.0027)
197
+ [2023-02-22 19:58:46,165][00330] Fps is (10 sec: 4915.1, 60 sec: 4027.7, 300 sec: 3328.0). Total num frames: 266240. Throughput: 0: 1027.1. Samples: 64456. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
198
+ [2023-02-22 19:58:46,167][00330] Avg episode reward: [(0, '4.452')]
199
+ [2023-02-22 19:58:51,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3959.5, 300 sec: 3325.0). Total num frames: 282624. Throughput: 0: 1003.5. Samples: 70810. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
200
+ [2023-02-22 19:58:51,171][00330] Avg episode reward: [(0, '4.488')]
201
+ [2023-02-22 19:58:51,382][19149] Updated weights for policy 0, policy_version 70 (0.0015)
202
+ [2023-02-22 19:58:56,164][00330] Fps is (10 sec: 3276.9, 60 sec: 4027.7, 300 sec: 3322.3). Total num frames: 299008. Throughput: 0: 978.5. Samples: 75526. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
203
+ [2023-02-22 19:58:56,172][00330] Avg episode reward: [(0, '4.457')]
204
+ [2023-02-22 19:59:01,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3406.1). Total num frames: 323584. Throughput: 0: 996.5. Samples: 78698. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
205
+ [2023-02-22 19:59:01,167][00330] Avg episode reward: [(0, '4.389')]
206
+ [2023-02-22 19:59:01,180][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000079_323584.pth...
207
+ [2023-02-22 19:59:01,734][19149] Updated weights for policy 0, policy_version 80 (0.0028)
208
+ [2023-02-22 19:59:06,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3481.6). Total num frames: 348160. Throughput: 0: 1035.7. Samples: 86072. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
209
+ [2023-02-22 19:59:06,173][00330] Avg episode reward: [(0, '4.392')]
210
+ [2023-02-22 19:59:11,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3471.8). Total num frames: 364544. Throughput: 0: 996.5. Samples: 91700. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
211
+ [2023-02-22 19:59:11,174][00330] Avg episode reward: [(0, '4.296')]
212
+ [2023-02-22 19:59:11,946][19149] Updated weights for policy 0, policy_version 90 (0.0011)
213
+ [2023-02-22 19:59:16,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.9, 300 sec: 3463.0). Total num frames: 380928. Throughput: 0: 978.1. Samples: 93940. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
214
+ [2023-02-22 19:59:16,171][00330] Avg episode reward: [(0, '4.512')]
215
+ [2023-02-22 19:59:21,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3959.5, 300 sec: 3490.5). Total num frames: 401408. Throughput: 0: 1010.7. Samples: 100026. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
216
+ [2023-02-22 19:59:21,167][00330] Avg episode reward: [(0, '4.689')]
217
+ [2023-02-22 19:59:21,226][19135] Saving new best policy, reward=4.689!
218
+ [2023-02-22 19:59:22,068][19149] Updated weights for policy 0, policy_version 100 (0.0029)
219
+ [2023-02-22 19:59:26,165][00330] Fps is (10 sec: 4505.5, 60 sec: 4027.7, 300 sec: 3549.9). Total num frames: 425984. Throughput: 0: 1031.2. Samples: 107178. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
220
+ [2023-02-22 19:59:26,172][00330] Avg episode reward: [(0, '4.713')]
221
+ [2023-02-22 19:59:26,236][19135] Saving new best policy, reward=4.713!
222
+ [2023-02-22 19:59:31,165][00330] Fps is (10 sec: 4095.9, 60 sec: 3959.4, 300 sec: 3538.9). Total num frames: 442368. Throughput: 0: 1008.9. Samples: 109858. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
223
+ [2023-02-22 19:59:31,172][00330] Avg episode reward: [(0, '4.694')]
224
+ [2023-02-22 19:59:32,729][19149] Updated weights for policy 0, policy_version 110 (0.0031)
225
+ [2023-02-22 19:59:36,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3528.9). Total num frames: 458752. Throughput: 0: 972.5. Samples: 114572. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
226
+ [2023-02-22 19:59:36,167][00330] Avg episode reward: [(0, '4.625')]
227
+ [2023-02-22 19:59:41,164][00330] Fps is (10 sec: 4096.1, 60 sec: 4027.7, 300 sec: 3580.2). Total num frames: 483328. Throughput: 0: 1013.1. Samples: 121116. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
228
+ [2023-02-22 19:59:41,176][00330] Avg episode reward: [(0, '4.517')]
229
+ [2023-02-22 19:59:42,452][19149] Updated weights for policy 0, policy_version 120 (0.0023)
230
+ [2023-02-22 19:59:46,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3627.9). Total num frames: 507904. Throughput: 0: 1023.1. Samples: 124738. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
231
+ [2023-02-22 19:59:46,172][00330] Avg episode reward: [(0, '4.358')]
232
+ [2023-02-22 19:59:51,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3615.8). Total num frames: 524288. Throughput: 0: 991.2. Samples: 130674. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
233
+ [2023-02-22 19:59:51,168][00330] Avg episode reward: [(0, '4.428')]
234
+ [2023-02-22 19:59:53,444][19149] Updated weights for policy 0, policy_version 130 (0.0021)
235
+ [2023-02-22 19:59:56,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3604.5). Total num frames: 540672. Throughput: 0: 970.7. Samples: 135382. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
236
+ [2023-02-22 19:59:56,170][00330] Avg episode reward: [(0, '4.412')]
237
+ [2023-02-22 20:00:01,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3646.8). Total num frames: 565248. Throughput: 0: 997.2. Samples: 138812. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
238
+ [2023-02-22 20:00:01,169][00330] Avg episode reward: [(0, '4.425')]
239
+ [2023-02-22 20:00:03,812][19149] Updated weights for policy 0, policy_version 140 (0.0019)
240
+ [2023-02-22 20:00:06,165][00330] Fps is (10 sec: 3686.3, 60 sec: 3822.9, 300 sec: 3609.6). Total num frames: 577536. Throughput: 0: 989.5. Samples: 144552. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
241
+ [2023-02-22 20:00:06,169][00330] Avg episode reward: [(0, '4.364')]
242
+ [2023-02-22 20:00:11,169][00330] Fps is (10 sec: 2456.6, 60 sec: 3754.4, 300 sec: 3574.6). Total num frames: 589824. Throughput: 0: 917.1. Samples: 148450. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
243
+ [2023-02-22 20:00:11,171][00330] Avg episode reward: [(0, '4.254')]
244
+ [2023-02-22 20:00:16,164][00330] Fps is (10 sec: 2867.3, 60 sec: 3754.7, 300 sec: 3565.9). Total num frames: 606208. Throughput: 0: 903.1. Samples: 150496. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
245
+ [2023-02-22 20:00:16,172][00330] Avg episode reward: [(0, '4.357')]
246
+ [2023-02-22 20:00:18,274][19149] Updated weights for policy 0, policy_version 150 (0.0028)
247
+ [2023-02-22 20:00:21,164][00330] Fps is (10 sec: 3687.9, 60 sec: 3754.7, 300 sec: 3581.1). Total num frames: 626688. Throughput: 0: 916.1. Samples: 155798. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
248
+ [2023-02-22 20:00:21,167][00330] Avg episode reward: [(0, '4.611')]
249
+ [2023-02-22 20:00:26,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3618.1). Total num frames: 651264. Throughput: 0: 936.2. Samples: 163246. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
250
+ [2023-02-22 20:00:26,167][00330] Avg episode reward: [(0, '4.823')]
251
+ [2023-02-22 20:00:26,174][19135] Saving new best policy, reward=4.823!
252
+ [2023-02-22 20:00:26,404][19149] Updated weights for policy 0, policy_version 160 (0.0011)
253
+ [2023-02-22 20:00:31,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3823.0, 300 sec: 3631.0). Total num frames: 671744. Throughput: 0: 929.2. Samples: 166554. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
254
+ [2023-02-22 20:00:31,174][00330] Avg episode reward: [(0, '4.734')]
255
+ [2023-02-22 20:00:36,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3600.2). Total num frames: 684032. Throughput: 0: 902.6. Samples: 171292. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
256
+ [2023-02-22 20:00:36,167][00330] Avg episode reward: [(0, '4.752')]
257
+ [2023-02-22 20:00:38,510][19149] Updated weights for policy 0, policy_version 170 (0.0015)
258
+ [2023-02-22 20:00:41,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3633.9). Total num frames: 708608. Throughput: 0: 924.4. Samples: 176978. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
259
+ [2023-02-22 20:00:41,171][00330] Avg episode reward: [(0, '4.711')]
260
+ [2023-02-22 20:00:46,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3645.4). Total num frames: 729088. Throughput: 0: 928.4. Samples: 180592. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
261
+ [2023-02-22 20:00:46,170][00330] Avg episode reward: [(0, '4.660')]
262
+ [2023-02-22 20:00:47,101][19149] Updated weights for policy 0, policy_version 180 (0.0019)
263
+ [2023-02-22 20:00:51,165][00330] Fps is (10 sec: 4095.9, 60 sec: 3754.7, 300 sec: 3656.4). Total num frames: 749568. Throughput: 0: 947.4. Samples: 187184. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
264
+ [2023-02-22 20:00:51,167][00330] Avg episode reward: [(0, '4.772')]
265
+ [2023-02-22 20:00:56,165][00330] Fps is (10 sec: 3686.3, 60 sec: 3754.6, 300 sec: 3647.4). Total num frames: 765952. Throughput: 0: 964.0. Samples: 191828. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
266
+ [2023-02-22 20:00:56,167][00330] Avg episode reward: [(0, '4.887')]
267
+ [2023-02-22 20:00:56,173][19135] Saving new best policy, reward=4.887!
268
+ [2023-02-22 20:00:58,784][19149] Updated weights for policy 0, policy_version 190 (0.0021)
269
+ [2023-02-22 20:01:01,164][00330] Fps is (10 sec: 3686.5, 60 sec: 3686.4, 300 sec: 3657.8). Total num frames: 786432. Throughput: 0: 983.8. Samples: 194766. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
270
+ [2023-02-22 20:01:01,171][00330] Avg episode reward: [(0, '4.990')]
271
+ [2023-02-22 20:01:01,188][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000192_786432.pth...
272
+ [2023-02-22 20:01:01,301][19135] Saving new best policy, reward=4.990!
273
+ [2023-02-22 20:01:06,164][00330] Fps is (10 sec: 4505.7, 60 sec: 3891.2, 300 sec: 3686.4). Total num frames: 811008. Throughput: 0: 1026.4. Samples: 201988. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
274
+ [2023-02-22 20:01:06,166][00330] Avg episode reward: [(0, '4.940')]
275
+ [2023-02-22 20:01:07,455][19149] Updated weights for policy 0, policy_version 200 (0.0026)
276
+ [2023-02-22 20:01:11,165][00330] Fps is (10 sec: 4096.0, 60 sec: 3959.7, 300 sec: 3677.3). Total num frames: 827392. Throughput: 0: 984.7. Samples: 207556. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
277
+ [2023-02-22 20:01:11,169][00330] Avg episode reward: [(0, '4.851')]
278
+ [2023-02-22 20:01:16,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3959.5, 300 sec: 3668.6). Total num frames: 843776. Throughput: 0: 961.6. Samples: 209828. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
279
+ [2023-02-22 20:01:16,171][00330] Avg episode reward: [(0, '4.934')]
280
+ [2023-02-22 20:01:19,496][19149] Updated weights for policy 0, policy_version 210 (0.0017)
281
+ [2023-02-22 20:01:21,164][00330] Fps is (10 sec: 3686.5, 60 sec: 3959.5, 300 sec: 3677.7). Total num frames: 864256. Throughput: 0: 986.8. Samples: 215700. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
282
+ [2023-02-22 20:01:21,170][00330] Avg episode reward: [(0, '5.327')]
283
+ [2023-02-22 20:01:21,254][19135] Saving new best policy, reward=5.327!
284
+ [2023-02-22 20:01:26,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3703.5). Total num frames: 888832. Throughput: 0: 1023.1. Samples: 223016. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
285
+ [2023-02-22 20:01:26,170][00330] Avg episode reward: [(0, '5.191')]
286
+ [2023-02-22 20:01:28,653][19149] Updated weights for policy 0, policy_version 220 (0.0011)
287
+ [2023-02-22 20:01:31,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3694.8). Total num frames: 905216. Throughput: 0: 1005.0. Samples: 225818. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
288
+ [2023-02-22 20:01:31,167][00330] Avg episode reward: [(0, '5.164')]
289
+ [2023-02-22 20:01:36,165][00330] Fps is (10 sec: 3276.7, 60 sec: 3959.5, 300 sec: 3686.4). Total num frames: 921600. Throughput: 0: 959.3. Samples: 230352. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
290
+ [2023-02-22 20:01:36,170][00330] Avg episode reward: [(0, '5.349')]
291
+ [2023-02-22 20:01:36,179][19135] Saving new best policy, reward=5.349!
292
+ [2023-02-22 20:01:40,062][19149] Updated weights for policy 0, policy_version 230 (0.0026)
293
+ [2023-02-22 20:01:41,165][00330] Fps is (10 sec: 4095.9, 60 sec: 3959.5, 300 sec: 3710.5). Total num frames: 946176. Throughput: 0: 997.1. Samples: 236696. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
294
+ [2023-02-22 20:01:41,173][00330] Avg episode reward: [(0, '5.201')]
295
+ [2023-02-22 20:01:46,165][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3733.7). Total num frames: 970752. Throughput: 0: 1012.2. Samples: 240316. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
296
+ [2023-02-22 20:01:46,170][00330] Avg episode reward: [(0, '5.021')]
297
+ [2023-02-22 20:01:49,781][19149] Updated weights for policy 0, policy_version 240 (0.0013)
298
+ [2023-02-22 20:01:51,167][00330] Fps is (10 sec: 4095.2, 60 sec: 3959.3, 300 sec: 3725.0). Total num frames: 987136. Throughput: 0: 982.3. Samples: 246192. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
299
+ [2023-02-22 20:01:51,170][00330] Avg episode reward: [(0, '5.367')]
300
+ [2023-02-22 20:01:51,185][19135] Saving new best policy, reward=5.367!
301
+ [2023-02-22 20:01:56,164][00330] Fps is (10 sec: 2867.2, 60 sec: 3891.2, 300 sec: 3701.6). Total num frames: 999424. Throughput: 0: 960.3. Samples: 250770. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
302
+ [2023-02-22 20:01:56,169][00330] Avg episode reward: [(0, '5.694')]
303
+ [2023-02-22 20:01:56,175][19135] Saving new best policy, reward=5.694!
304
+ [2023-02-22 20:02:00,602][19149] Updated weights for policy 0, policy_version 250 (0.0022)
305
+ [2023-02-22 20:02:01,164][00330] Fps is (10 sec: 3687.2, 60 sec: 3959.5, 300 sec: 3723.6). Total num frames: 1024000. Throughput: 0: 984.4. Samples: 254124. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
306
+ [2023-02-22 20:02:01,167][00330] Avg episode reward: [(0, '5.884')]
307
+ [2023-02-22 20:02:01,174][19135] Saving new best policy, reward=5.884!
308
+ [2023-02-22 20:02:06,164][00330] Fps is (10 sec: 4915.2, 60 sec: 3959.5, 300 sec: 3744.9). Total num frames: 1048576. Throughput: 0: 1017.0. Samples: 261466. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
309
+ [2023-02-22 20:02:06,167][00330] Avg episode reward: [(0, '5.899')]
310
+ [2023-02-22 20:02:06,172][19135] Saving new best policy, reward=5.899!
311
+ [2023-02-22 20:02:10,561][19149] Updated weights for policy 0, policy_version 260 (0.0041)
312
+ [2023-02-22 20:02:11,169][00330] Fps is (10 sec: 4094.3, 60 sec: 3959.2, 300 sec: 3736.6). Total num frames: 1064960. Throughput: 0: 971.7. Samples: 266748. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
313
+ [2023-02-22 20:02:11,175][00330] Avg episode reward: [(0, '5.905')]
314
+ [2023-02-22 20:02:11,183][19135] Saving new best policy, reward=5.905!
315
+ [2023-02-22 20:02:16,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3959.5, 300 sec: 3728.8). Total num frames: 1081344. Throughput: 0: 958.9. Samples: 268970. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
316
+ [2023-02-22 20:02:16,171][00330] Avg episode reward: [(0, '5.946')]
317
+ [2023-02-22 20:02:16,174][19135] Saving new best policy, reward=5.946!
318
+ [2023-02-22 20:02:20,846][19149] Updated weights for policy 0, policy_version 270 (0.0011)
319
+ [2023-02-22 20:02:21,164][00330] Fps is (10 sec: 4097.7, 60 sec: 4027.7, 300 sec: 3748.9). Total num frames: 1105920. Throughput: 0: 1000.0. Samples: 275352. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
320
+ [2023-02-22 20:02:21,167][00330] Avg episode reward: [(0, '6.345')]
321
+ [2023-02-22 20:02:21,178][19135] Saving new best policy, reward=6.345!
322
+ [2023-02-22 20:02:26,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3832.2). Total num frames: 1130496. Throughput: 0: 1022.5. Samples: 282710. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
323
+ [2023-02-22 20:02:26,168][00330] Avg episode reward: [(0, '6.453')]
324
+ [2023-02-22 20:02:26,176][19135] Saving new best policy, reward=6.453!
325
+ [2023-02-22 20:02:31,168][00330] Fps is (10 sec: 3685.2, 60 sec: 3959.3, 300 sec: 3873.8). Total num frames: 1142784. Throughput: 0: 994.2. Samples: 285058. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
326
+ [2023-02-22 20:02:31,170][00330] Avg episode reward: [(0, '6.289')]
327
+ [2023-02-22 20:02:31,375][19149] Updated weights for policy 0, policy_version 280 (0.0015)
328
+ [2023-02-22 20:02:36,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.8, 300 sec: 3929.4). Total num frames: 1163264. Throughput: 0: 969.5. Samples: 289816. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
329
+ [2023-02-22 20:02:36,167][00330] Avg episode reward: [(0, '6.476')]
330
+ [2023-02-22 20:02:36,173][19135] Saving new best policy, reward=6.476!
331
+ [2023-02-22 20:02:41,164][00330] Fps is (10 sec: 4097.3, 60 sec: 3959.5, 300 sec: 3929.4). Total num frames: 1183744. Throughput: 0: 1019.6. Samples: 296652. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
332
+ [2023-02-22 20:02:41,171][00330] Avg episode reward: [(0, '6.565')]
333
+ [2023-02-22 20:02:41,183][19135] Saving new best policy, reward=6.565!
334
+ [2023-02-22 20:02:41,442][19149] Updated weights for policy 0, policy_version 290 (0.0014)
335
+ [2023-02-22 20:02:46,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3943.3). Total num frames: 1208320. Throughput: 0: 1024.9. Samples: 300244. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
336
+ [2023-02-22 20:02:46,169][00330] Avg episode reward: [(0, '7.131')]
337
+ [2023-02-22 20:02:46,171][19135] Saving new best policy, reward=7.131!
338
+ [2023-02-22 20:02:51,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3959.6, 300 sec: 3957.2). Total num frames: 1224704. Throughput: 0: 983.0. Samples: 305702. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
339
+ [2023-02-22 20:02:51,167][00330] Avg episode reward: [(0, '7.273')]
340
+ [2023-02-22 20:02:51,180][19135] Saving new best policy, reward=7.273!
341
+ [2023-02-22 20:02:52,137][19149] Updated weights for policy 0, policy_version 300 (0.0014)
342
+ [2023-02-22 20:02:56,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3929.4). Total num frames: 1241088. Throughput: 0: 971.7. Samples: 310472. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
343
+ [2023-02-22 20:02:56,167][00330] Avg episode reward: [(0, '7.057')]
344
+ [2023-02-22 20:03:01,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3929.4). Total num frames: 1265664. Throughput: 0: 1003.0. Samples: 314106. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
345
+ [2023-02-22 20:03:01,173][00330] Avg episode reward: [(0, '8.054')]
346
+ [2023-02-22 20:03:01,185][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000309_1265664.pth...
347
+ [2023-02-22 20:03:01,298][19135] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000079_323584.pth
348
+ [2023-02-22 20:03:01,306][19135] Saving new best policy, reward=8.054!
349
+ [2023-02-22 20:03:01,858][19149] Updated weights for policy 0, policy_version 310 (0.0023)
350
+ [2023-02-22 20:03:06,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3943.3). Total num frames: 1286144. Throughput: 0: 1020.9. Samples: 321292. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
351
+ [2023-02-22 20:03:06,170][00330] Avg episode reward: [(0, '8.762')]
352
+ [2023-02-22 20:03:06,176][19135] Saving new best policy, reward=8.762!
353
+ [2023-02-22 20:03:11,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3959.7, 300 sec: 3943.3). Total num frames: 1302528. Throughput: 0: 965.4. Samples: 326152. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
354
+ [2023-02-22 20:03:11,168][00330] Avg episode reward: [(0, '8.654')]
355
+ [2023-02-22 20:03:13,444][19149] Updated weights for policy 0, policy_version 320 (0.0021)
356
+ [2023-02-22 20:03:16,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3959.5, 300 sec: 3915.5). Total num frames: 1318912. Throughput: 0: 963.6. Samples: 328416. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
357
+ [2023-02-22 20:03:16,173][00330] Avg episode reward: [(0, '8.731')]
358
+ [2023-02-22 20:03:21,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3959.5, 300 sec: 3929.4). Total num frames: 1343488. Throughput: 0: 1008.9. Samples: 335216. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
359
+ [2023-02-22 20:03:21,167][00330] Avg episode reward: [(0, '8.483')]
360
+ [2023-02-22 20:03:22,244][19149] Updated weights for policy 0, policy_version 330 (0.0023)
361
+ [2023-02-22 20:03:26,167][00330] Fps is (10 sec: 4913.9, 60 sec: 3959.3, 300 sec: 3943.2). Total num frames: 1368064. Throughput: 0: 1015.1. Samples: 342336. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
362
+ [2023-02-22 20:03:26,173][00330] Avg episode reward: [(0, '8.151')]
363
+ [2023-02-22 20:03:31,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.9, 300 sec: 3957.2). Total num frames: 1384448. Throughput: 0: 985.6. Samples: 344596. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
364
+ [2023-02-22 20:03:31,167][00330] Avg episode reward: [(0, '8.439')]
365
+ [2023-02-22 20:03:33,783][19149] Updated weights for policy 0, policy_version 340 (0.0018)
366
+ [2023-02-22 20:03:36,164][00330] Fps is (10 sec: 3277.7, 60 sec: 3959.5, 300 sec: 3929.4). Total num frames: 1400832. Throughput: 0: 970.3. Samples: 349366. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
367
+ [2023-02-22 20:03:36,171][00330] Avg episode reward: [(0, '8.764')]
368
+ [2023-02-22 20:03:36,175][19135] Saving new best policy, reward=8.764!
369
+ [2023-02-22 20:03:41,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3929.4). Total num frames: 1425408. Throughput: 0: 1025.4. Samples: 356614. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
370
+ [2023-02-22 20:03:41,169][00330] Avg episode reward: [(0, '9.187')]
371
+ [2023-02-22 20:03:41,178][19135] Saving new best policy, reward=9.187!
372
+ [2023-02-22 20:03:42,521][19149] Updated weights for policy 0, policy_version 350 (0.0021)
373
+ [2023-02-22 20:03:46,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3943.3). Total num frames: 1445888. Throughput: 0: 1022.8. Samples: 360130. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
374
+ [2023-02-22 20:03:46,169][00330] Avg episode reward: [(0, '8.826')]
375
+ [2023-02-22 20:03:51,167][00330] Fps is (10 sec: 3685.3, 60 sec: 3959.3, 300 sec: 3943.2). Total num frames: 1462272. Throughput: 0: 978.6. Samples: 365334. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
376
+ [2023-02-22 20:03:51,170][00330] Avg episode reward: [(0, '8.722')]
377
+ [2023-02-22 20:03:54,484][19149] Updated weights for policy 0, policy_version 360 (0.0027)
378
+ [2023-02-22 20:03:56,170][00330] Fps is (10 sec: 3684.5, 60 sec: 4027.4, 300 sec: 3929.3). Total num frames: 1482752. Throughput: 0: 986.8. Samples: 370564. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
379
+ [2023-02-22 20:03:56,178][00330] Avg episode reward: [(0, '8.299')]
380
+ [2023-02-22 20:04:01,164][00330] Fps is (10 sec: 4507.0, 60 sec: 4027.7, 300 sec: 3929.4). Total num frames: 1507328. Throughput: 0: 1019.8. Samples: 374308. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
381
+ [2023-02-22 20:04:01,167][00330] Avg episode reward: [(0, '9.504')]
382
+ [2023-02-22 20:04:01,175][19135] Saving new best policy, reward=9.504!
383
+ [2023-02-22 20:04:02,803][19149] Updated weights for policy 0, policy_version 370 (0.0011)
384
+ [2023-02-22 20:04:06,167][00330] Fps is (10 sec: 4507.0, 60 sec: 4027.6, 300 sec: 3943.2). Total num frames: 1527808. Throughput: 0: 1029.9. Samples: 381564. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
385
+ [2023-02-22 20:04:06,174][00330] Avg episode reward: [(0, '10.114')]
386
+ [2023-02-22 20:04:06,178][19135] Saving new best policy, reward=10.114!
387
+ [2023-02-22 20:04:11,165][00330] Fps is (10 sec: 3686.3, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 1544192. Throughput: 0: 973.5. Samples: 386142. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
388
+ [2023-02-22 20:04:11,167][00330] Avg episode reward: [(0, '10.291')]
389
+ [2023-02-22 20:04:11,182][19135] Saving new best policy, reward=10.291!
390
+ [2023-02-22 20:04:14,667][19149] Updated weights for policy 0, policy_version 380 (0.0018)
391
+ [2023-02-22 20:04:16,165][00330] Fps is (10 sec: 3277.4, 60 sec: 4027.7, 300 sec: 3929.4). Total num frames: 1560576. Throughput: 0: 973.9. Samples: 388420. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
392
+ [2023-02-22 20:04:16,171][00330] Avg episode reward: [(0, '9.661')]
393
+ [2023-02-22 20:04:21,164][00330] Fps is (10 sec: 3686.5, 60 sec: 3959.5, 300 sec: 3915.5). Total num frames: 1581056. Throughput: 0: 1015.2. Samples: 395050. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
394
+ [2023-02-22 20:04:21,167][00330] Avg episode reward: [(0, '9.779')]
395
+ [2023-02-22 20:04:26,167][00330] Fps is (10 sec: 3275.9, 60 sec: 3754.6, 300 sec: 3901.6). Total num frames: 1593344. Throughput: 0: 951.8. Samples: 399448. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
396
+ [2023-02-22 20:04:26,170][00330] Avg episode reward: [(0, '9.818')]
397
+ [2023-02-22 20:04:26,315][19149] Updated weights for policy 0, policy_version 390 (0.0021)
398
+ [2023-02-22 20:04:31,168][00330] Fps is (10 sec: 2866.2, 60 sec: 3754.5, 300 sec: 3901.6). Total num frames: 1609728. Throughput: 0: 913.1. Samples: 401222. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
399
+ [2023-02-22 20:04:31,175][00330] Avg episode reward: [(0, '9.211')]
400
+ [2023-02-22 20:04:36,164][00330] Fps is (10 sec: 3277.8, 60 sec: 3754.7, 300 sec: 3873.8). Total num frames: 1626112. Throughput: 0: 897.8. Samples: 405734. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
401
+ [2023-02-22 20:04:36,170][00330] Avg episode reward: [(0, '9.946')]
402
+ [2023-02-22 20:04:38,478][19149] Updated weights for policy 0, policy_version 400 (0.0020)
403
+ [2023-02-22 20:04:41,164][00330] Fps is (10 sec: 4097.4, 60 sec: 3754.7, 300 sec: 3873.8). Total num frames: 1650688. Throughput: 0: 937.4. Samples: 412740. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
404
+ [2023-02-22 20:04:41,167][00330] Avg episode reward: [(0, '10.217')]
405
+ [2023-02-22 20:04:46,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3887.7). Total num frames: 1671168. Throughput: 0: 935.0. Samples: 416384. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
406
+ [2023-02-22 20:04:46,171][00330] Avg episode reward: [(0, '11.116')]
407
+ [2023-02-22 20:04:46,174][19135] Saving new best policy, reward=11.116!
408
+ [2023-02-22 20:04:47,775][19149] Updated weights for policy 0, policy_version 410 (0.0014)
409
+ [2023-02-22 20:04:51,165][00330] Fps is (10 sec: 3686.3, 60 sec: 3754.8, 300 sec: 3887.7). Total num frames: 1687552. Throughput: 0: 894.2. Samples: 421802. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
410
+ [2023-02-22 20:04:51,173][00330] Avg episode reward: [(0, '11.084')]
411
+ [2023-02-22 20:04:56,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3755.0, 300 sec: 3873.8). Total num frames: 1708032. Throughput: 0: 905.4. Samples: 426886. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
412
+ [2023-02-22 20:04:56,166][00330] Avg episode reward: [(0, '10.671')]
413
+ [2023-02-22 20:04:58,741][19149] Updated weights for policy 0, policy_version 420 (0.0018)
414
+ [2023-02-22 20:05:01,164][00330] Fps is (10 sec: 4096.1, 60 sec: 3686.4, 300 sec: 3901.6). Total num frames: 1728512. Throughput: 0: 935.2. Samples: 430506. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
415
+ [2023-02-22 20:05:01,167][00330] Avg episode reward: [(0, '11.177')]
416
+ [2023-02-22 20:05:01,243][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000423_1732608.pth...
417
+ [2023-02-22 20:05:01,348][19135] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000192_786432.pth
418
+ [2023-02-22 20:05:01,362][19135] Saving new best policy, reward=11.177!
419
+ [2023-02-22 20:05:06,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3754.8, 300 sec: 3943.3). Total num frames: 1753088. Throughput: 0: 950.3. Samples: 437812. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
420
+ [2023-02-22 20:05:06,167][00330] Avg episode reward: [(0, '12.233')]
421
+ [2023-02-22 20:05:06,172][19135] Saving new best policy, reward=12.233!
422
+ [2023-02-22 20:05:08,511][19149] Updated weights for policy 0, policy_version 430 (0.0016)
423
+ [2023-02-22 20:05:11,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3929.4). Total num frames: 1765376. Throughput: 0: 956.7. Samples: 442496. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
424
+ [2023-02-22 20:05:11,169][00330] Avg episode reward: [(0, '13.313')]
425
+ [2023-02-22 20:05:11,186][19135] Saving new best policy, reward=13.313!
426
+ [2023-02-22 20:05:16,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3929.4). Total num frames: 1785856. Throughput: 0: 966.7. Samples: 444718. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
427
+ [2023-02-22 20:05:16,167][00330] Avg episode reward: [(0, '13.047')]
428
+ [2023-02-22 20:05:19,254][19149] Updated weights for policy 0, policy_version 440 (0.0018)
429
+ [2023-02-22 20:05:21,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3929.4). Total num frames: 1810432. Throughput: 0: 1020.0. Samples: 451634. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
430
+ [2023-02-22 20:05:21,171][00330] Avg episode reward: [(0, '13.524')]
431
+ [2023-02-22 20:05:21,181][19135] Saving new best policy, reward=13.524!
432
+ [2023-02-22 20:05:26,168][00330] Fps is (10 sec: 4503.8, 60 sec: 3959.4, 300 sec: 3929.3). Total num frames: 1830912. Throughput: 0: 1017.1. Samples: 458512. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
433
+ [2023-02-22 20:05:26,175][00330] Avg episode reward: [(0, '12.877')]
434
+ [2023-02-22 20:05:29,325][19149] Updated weights for policy 0, policy_version 450 (0.0011)
435
+ [2023-02-22 20:05:31,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3959.7, 300 sec: 3943.3). Total num frames: 1847296. Throughput: 0: 987.6. Samples: 460828. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
436
+ [2023-02-22 20:05:31,173][00330] Avg episode reward: [(0, '13.079')]
437
+ [2023-02-22 20:05:36,164][00330] Fps is (10 sec: 3278.1, 60 sec: 3959.5, 300 sec: 3915.5). Total num frames: 1863680. Throughput: 0: 974.5. Samples: 465656. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
438
+ [2023-02-22 20:05:36,167][00330] Avg episode reward: [(0, '12.898')]
439
+ [2023-02-22 20:05:39,359][19149] Updated weights for policy 0, policy_version 460 (0.0014)
440
+ [2023-02-22 20:05:41,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 1892352. Throughput: 0: 1027.6. Samples: 473130. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
441
+ [2023-02-22 20:05:41,170][00330] Avg episode reward: [(0, '11.448')]
442
+ [2023-02-22 20:05:46,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 1912832. Throughput: 0: 1027.1. Samples: 476724. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
443
+ [2023-02-22 20:05:46,167][00330] Avg episode reward: [(0, '11.618')]
444
+ [2023-02-22 20:05:49,729][19149] Updated weights for policy 0, policy_version 470 (0.0022)
445
+ [2023-02-22 20:05:51,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3959.5, 300 sec: 3929.4). Total num frames: 1925120. Throughput: 0: 972.8. Samples: 481590. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
446
+ [2023-02-22 20:05:51,166][00330] Avg episode reward: [(0, '12.573')]
447
+ [2023-02-22 20:05:56,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3959.5, 300 sec: 3929.4). Total num frames: 1945600. Throughput: 0: 989.7. Samples: 487034. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
448
+ [2023-02-22 20:05:56,166][00330] Avg episode reward: [(0, '13.820')]
449
+ [2023-02-22 20:05:56,173][19135] Saving new best policy, reward=13.820!
450
+ [2023-02-22 20:05:59,817][19149] Updated weights for policy 0, policy_version 480 (0.0016)
451
+ [2023-02-22 20:06:01,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3929.4). Total num frames: 1970176. Throughput: 0: 1020.8. Samples: 490656. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
452
+ [2023-02-22 20:06:01,171][00330] Avg episode reward: [(0, '15.370')]
453
+ [2023-02-22 20:06:01,182][19135] Saving new best policy, reward=15.370!
454
+ [2023-02-22 20:06:06,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3943.3). Total num frames: 1990656. Throughput: 0: 1018.9. Samples: 497486. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
455
+ [2023-02-22 20:06:06,170][00330] Avg episode reward: [(0, '17.401')]
456
+ [2023-02-22 20:06:06,183][19135] Saving new best policy, reward=17.401!
457
+ [2023-02-22 20:06:10,847][19149] Updated weights for policy 0, policy_version 490 (0.0027)
458
+ [2023-02-22 20:06:11,165][00330] Fps is (10 sec: 3686.4, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 2007040. Throughput: 0: 968.6. Samples: 502096. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
459
+ [2023-02-22 20:06:11,167][00330] Avg episode reward: [(0, '16.305')]
460
+ [2023-02-22 20:06:16,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 2027520. Throughput: 0: 973.2. Samples: 504624. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
461
+ [2023-02-22 20:06:16,171][00330] Avg episode reward: [(0, '15.757')]
462
+ [2023-02-22 20:06:20,041][19149] Updated weights for policy 0, policy_version 500 (0.0017)
463
+ [2023-02-22 20:06:21,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 2052096. Throughput: 0: 1028.6. Samples: 511944. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
464
+ [2023-02-22 20:06:21,172][00330] Avg episode reward: [(0, '13.234')]
465
+ [2023-02-22 20:06:26,168][00330] Fps is (10 sec: 4504.0, 60 sec: 4027.8, 300 sec: 3957.1). Total num frames: 2072576. Throughput: 0: 1005.6. Samples: 518386. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
466
+ [2023-02-22 20:06:26,172][00330] Avg episode reward: [(0, '13.110')]
467
+ [2023-02-22 20:06:31,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3959.5, 300 sec: 3943.3). Total num frames: 2084864. Throughput: 0: 976.8. Samples: 520678. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
468
+ [2023-02-22 20:06:31,170][00330] Avg episode reward: [(0, '13.076')]
469
+ [2023-02-22 20:06:31,199][19149] Updated weights for policy 0, policy_version 510 (0.0023)
470
+ [2023-02-22 20:06:36,166][00330] Fps is (10 sec: 3687.0, 60 sec: 4095.9, 300 sec: 3943.2). Total num frames: 2109440. Throughput: 0: 991.6. Samples: 526216. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
471
+ [2023-02-22 20:06:36,173][00330] Avg episode reward: [(0, '13.394')]
472
+ [2023-02-22 20:06:40,156][19149] Updated weights for policy 0, policy_version 520 (0.0018)
473
+ [2023-02-22 20:06:41,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 2134016. Throughput: 0: 1034.6. Samples: 533590. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
474
+ [2023-02-22 20:06:41,169][00330] Avg episode reward: [(0, '15.506')]
475
+ [2023-02-22 20:06:46,165][00330] Fps is (10 sec: 4096.8, 60 sec: 3959.5, 300 sec: 3943.3). Total num frames: 2150400. Throughput: 0: 1030.1. Samples: 537012. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
476
+ [2023-02-22 20:06:46,170][00330] Avg episode reward: [(0, '16.196')]
477
+ [2023-02-22 20:06:51,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2166784. Throughput: 0: 982.1. Samples: 541680. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
478
+ [2023-02-22 20:06:51,168][00330] Avg episode reward: [(0, '16.770')]
479
+ [2023-02-22 20:06:51,571][19149] Updated weights for policy 0, policy_version 530 (0.0027)
480
+ [2023-02-22 20:06:56,164][00330] Fps is (10 sec: 4096.1, 60 sec: 4096.0, 300 sec: 3957.2). Total num frames: 2191360. Throughput: 0: 1015.2. Samples: 547780. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
481
+ [2023-02-22 20:06:56,170][00330] Avg episode reward: [(0, '16.294')]
482
+ [2023-02-22 20:07:00,226][19149] Updated weights for policy 0, policy_version 540 (0.0031)
483
+ [2023-02-22 20:07:01,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4096.0, 300 sec: 3957.2). Total num frames: 2215936. Throughput: 0: 1041.0. Samples: 551468. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
484
+ [2023-02-22 20:07:01,172][00330] Avg episode reward: [(0, '14.415')]
485
+ [2023-02-22 20:07:01,197][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000541_2215936.pth...
486
+ [2023-02-22 20:07:01,316][19135] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000309_1265664.pth
487
+ [2023-02-22 20:07:06,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2232320. Throughput: 0: 1024.0. Samples: 558024. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
488
+ [2023-02-22 20:07:06,167][00330] Avg episode reward: [(0, '13.696')]
489
+ [2023-02-22 20:07:11,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2248704. Throughput: 0: 983.9. Samples: 562656. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
490
+ [2023-02-22 20:07:11,167][00330] Avg episode reward: [(0, '13.701')]
491
+ [2023-02-22 20:07:12,006][19149] Updated weights for policy 0, policy_version 550 (0.0017)
492
+ [2023-02-22 20:07:16,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 2269184. Throughput: 0: 995.6. Samples: 565482. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
493
+ [2023-02-22 20:07:16,167][00330] Avg episode reward: [(0, '13.760')]
494
+ [2023-02-22 20:07:20,611][19149] Updated weights for policy 0, policy_version 560 (0.0030)
495
+ [2023-02-22 20:07:21,165][00330] Fps is (10 sec: 4505.5, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 2293760. Throughput: 0: 1036.0. Samples: 572834. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
496
+ [2023-02-22 20:07:21,167][00330] Avg episode reward: [(0, '13.754')]
497
+ [2023-02-22 20:07:26,165][00330] Fps is (10 sec: 4505.1, 60 sec: 4027.9, 300 sec: 3971.1). Total num frames: 2314240. Throughput: 0: 1005.4. Samples: 578832. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
498
+ [2023-02-22 20:07:26,171][00330] Avg episode reward: [(0, '14.075')]
499
+ [2023-02-22 20:07:31,165][00330] Fps is (10 sec: 3276.6, 60 sec: 4027.7, 300 sec: 3943.3). Total num frames: 2326528. Throughput: 0: 980.2. Samples: 581122. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
500
+ [2023-02-22 20:07:31,167][00330] Avg episode reward: [(0, '13.525')]
501
+ [2023-02-22 20:07:32,496][19149] Updated weights for policy 0, policy_version 570 (0.0014)
502
+ [2023-02-22 20:07:36,164][00330] Fps is (10 sec: 3686.8, 60 sec: 4027.9, 300 sec: 3957.2). Total num frames: 2351104. Throughput: 0: 1005.3. Samples: 586918. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
503
+ [2023-02-22 20:07:36,167][00330] Avg episode reward: [(0, '14.217')]
504
+ [2023-02-22 20:07:40,590][19149] Updated weights for policy 0, policy_version 580 (0.0013)
505
+ [2023-02-22 20:07:41,164][00330] Fps is (10 sec: 4915.5, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2375680. Throughput: 0: 1034.8. Samples: 594348. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
506
+ [2023-02-22 20:07:41,166][00330] Avg episode reward: [(0, '14.374')]
507
+ [2023-02-22 20:07:46,165][00330] Fps is (10 sec: 4095.9, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2392064. Throughput: 0: 1023.2. Samples: 597510. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
508
+ [2023-02-22 20:07:46,167][00330] Avg episode reward: [(0, '14.227')]
509
+ [2023-02-22 20:07:51,165][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2408448. Throughput: 0: 979.5. Samples: 602100. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
510
+ [2023-02-22 20:07:51,174][00330] Avg episode reward: [(0, '14.265')]
511
+ [2023-02-22 20:07:52,529][19149] Updated weights for policy 0, policy_version 590 (0.0012)
512
+ [2023-02-22 20:07:56,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2433024. Throughput: 0: 1018.8. Samples: 608500. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
513
+ [2023-02-22 20:07:56,167][00330] Avg episode reward: [(0, '13.513')]
514
+ [2023-02-22 20:08:00,853][19149] Updated weights for policy 0, policy_version 600 (0.0014)
515
+ [2023-02-22 20:08:01,165][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 2457600. Throughput: 0: 1036.5. Samples: 612124. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
516
+ [2023-02-22 20:08:01,173][00330] Avg episode reward: [(0, '14.797')]
517
+ [2023-02-22 20:08:06,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 2473984. Throughput: 0: 1013.2. Samples: 618428. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
518
+ [2023-02-22 20:08:06,167][00330] Avg episode reward: [(0, '14.816')]
519
+ [2023-02-22 20:08:11,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 2490368. Throughput: 0: 982.9. Samples: 623062. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
520
+ [2023-02-22 20:08:11,171][00330] Avg episode reward: [(0, '14.670')]
521
+ [2023-02-22 20:08:12,745][19149] Updated weights for policy 0, policy_version 610 (0.0017)
522
+ [2023-02-22 20:08:16,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4096.0, 300 sec: 3971.0). Total num frames: 2514944. Throughput: 0: 1001.4. Samples: 626184. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
523
+ [2023-02-22 20:08:16,166][00330] Avg episode reward: [(0, '15.490')]
524
+ [2023-02-22 20:08:20,990][19149] Updated weights for policy 0, policy_version 620 (0.0020)
525
+ [2023-02-22 20:08:21,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4096.0, 300 sec: 3971.1). Total num frames: 2539520. Throughput: 0: 1035.4. Samples: 633512. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
526
+ [2023-02-22 20:08:21,167][00330] Avg episode reward: [(0, '16.165')]
527
+ [2023-02-22 20:08:26,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.8, 300 sec: 3971.0). Total num frames: 2555904. Throughput: 0: 995.7. Samples: 639156. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
528
+ [2023-02-22 20:08:26,172][00330] Avg episode reward: [(0, '16.536')]
529
+ [2023-02-22 20:08:31,167][00330] Fps is (10 sec: 3275.9, 60 sec: 4095.8, 300 sec: 3971.0). Total num frames: 2572288. Throughput: 0: 977.5. Samples: 641500. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
530
+ [2023-02-22 20:08:31,171][00330] Avg episode reward: [(0, '17.234')]
531
+ [2023-02-22 20:08:33,969][19149] Updated weights for policy 0, policy_version 630 (0.0020)
532
+ [2023-02-22 20:08:36,165][00330] Fps is (10 sec: 2867.0, 60 sec: 3891.2, 300 sec: 3929.4). Total num frames: 2584576. Throughput: 0: 975.4. Samples: 645994. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
533
+ [2023-02-22 20:08:36,168][00330] Avg episode reward: [(0, '17.502')]
534
+ [2023-02-22 20:08:36,174][19135] Saving new best policy, reward=17.502!
535
+ [2023-02-22 20:08:41,164][00330] Fps is (10 sec: 2868.0, 60 sec: 3754.7, 300 sec: 3915.5). Total num frames: 2600960. Throughput: 0: 936.4. Samples: 650638. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
536
+ [2023-02-22 20:08:41,172][00330] Avg episode reward: [(0, '18.449')]
537
+ [2023-02-22 20:08:41,182][19135] Saving new best policy, reward=18.449!
538
+ [2023-02-22 20:08:46,166][00330] Fps is (10 sec: 3276.5, 60 sec: 3754.6, 300 sec: 3915.5). Total num frames: 2617344. Throughput: 0: 915.9. Samples: 653342. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
539
+ [2023-02-22 20:08:46,176][00330] Avg episode reward: [(0, '17.334')]
540
+ [2023-02-22 20:08:46,755][19149] Updated weights for policy 0, policy_version 640 (0.0022)
541
+ [2023-02-22 20:08:51,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3901.7). Total num frames: 2633728. Throughput: 0: 876.3. Samples: 657860. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
542
+ [2023-02-22 20:08:51,169][00330] Avg episode reward: [(0, '18.218')]
543
+ [2023-02-22 20:08:56,164][00330] Fps is (10 sec: 4096.6, 60 sec: 3754.7, 300 sec: 3901.6). Total num frames: 2658304. Throughput: 0: 920.1. Samples: 664468. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
544
+ [2023-02-22 20:08:56,173][00330] Avg episode reward: [(0, '16.086')]
545
+ [2023-02-22 20:08:56,945][19149] Updated weights for policy 0, policy_version 650 (0.0026)
546
+ [2023-02-22 20:09:01,164][00330] Fps is (10 sec: 4915.2, 60 sec: 3754.7, 300 sec: 3915.5). Total num frames: 2682880. Throughput: 0: 934.0. Samples: 668214. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
547
+ [2023-02-22 20:09:01,166][00330] Avg episode reward: [(0, '15.099')]
548
+ [2023-02-22 20:09:01,179][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000655_2682880.pth...
549
+ [2023-02-22 20:09:01,296][19135] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000423_1732608.pth
550
+ [2023-02-22 20:09:06,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3901.6). Total num frames: 2695168. Throughput: 0: 904.8. Samples: 674228. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
551
+ [2023-02-22 20:09:06,170][00330] Avg episode reward: [(0, '15.210')]
552
+ [2023-02-22 20:09:07,330][19149] Updated weights for policy 0, policy_version 660 (0.0011)
553
+ [2023-02-22 20:09:11,164][00330] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3901.6). Total num frames: 2711552. Throughput: 0: 886.3. Samples: 679040. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
554
+ [2023-02-22 20:09:11,169][00330] Avg episode reward: [(0, '15.005')]
555
+ [2023-02-22 20:09:16,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3929.4). Total num frames: 2740224. Throughput: 0: 910.5. Samples: 682470. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
556
+ [2023-02-22 20:09:16,167][00330] Avg episode reward: [(0, '16.852')]
557
+ [2023-02-22 20:09:17,053][19149] Updated weights for policy 0, policy_version 670 (0.0020)
558
+ [2023-02-22 20:09:21,164][00330] Fps is (10 sec: 5324.8, 60 sec: 3754.7, 300 sec: 3971.1). Total num frames: 2764800. Throughput: 0: 973.7. Samples: 689810. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
559
+ [2023-02-22 20:09:21,169][00330] Avg episode reward: [(0, '17.953')]
560
+ [2023-02-22 20:09:26,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3957.2). Total num frames: 2777088. Throughput: 0: 993.8. Samples: 695360. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
561
+ [2023-02-22 20:09:26,170][00330] Avg episode reward: [(0, '17.841')]
562
+ [2023-02-22 20:09:27,464][19149] Updated weights for policy 0, policy_version 680 (0.0011)
563
+ [2023-02-22 20:09:31,164][00330] Fps is (10 sec: 2867.2, 60 sec: 3686.6, 300 sec: 3957.2). Total num frames: 2793472. Throughput: 0: 986.0. Samples: 697710. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
564
+ [2023-02-22 20:09:31,167][00330] Avg episode reward: [(0, '18.834')]
565
+ [2023-02-22 20:09:31,199][19135] Saving new best policy, reward=18.834!
566
+ [2023-02-22 20:09:36,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3957.2). Total num frames: 2818048. Throughput: 0: 1027.5. Samples: 704096. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
567
+ [2023-02-22 20:09:36,167][00330] Avg episode reward: [(0, '17.990')]
568
+ [2023-02-22 20:09:37,242][19149] Updated weights for policy 0, policy_version 690 (0.0011)
569
+ [2023-02-22 20:09:41,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 2842624. Throughput: 0: 1044.7. Samples: 711478. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
570
+ [2023-02-22 20:09:41,171][00330] Avg episode reward: [(0, '18.036')]
571
+ [2023-02-22 20:09:46,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.8, 300 sec: 3971.0). Total num frames: 2859008. Throughput: 0: 1018.6. Samples: 714050. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
572
+ [2023-02-22 20:09:46,170][00330] Avg episode reward: [(0, '17.893')]
573
+ [2023-02-22 20:09:47,673][19149] Updated weights for policy 0, policy_version 700 (0.0011)
574
+ [2023-02-22 20:09:51,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3957.2). Total num frames: 2875392. Throughput: 0: 986.5. Samples: 718622. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
575
+ [2023-02-22 20:09:51,166][00330] Avg episode reward: [(0, '17.453')]
576
+ [2023-02-22 20:09:56,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 2899968. Throughput: 0: 1037.1. Samples: 725710. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
577
+ [2023-02-22 20:09:56,167][00330] Avg episode reward: [(0, '18.794')]
578
+ [2023-02-22 20:09:57,065][19149] Updated weights for policy 0, policy_version 710 (0.0016)
579
+ [2023-02-22 20:10:01,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 2924544. Throughput: 0: 1044.4. Samples: 729468. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
580
+ [2023-02-22 20:10:01,171][00330] Avg episode reward: [(0, '18.607')]
581
+ [2023-02-22 20:10:06,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 2940928. Throughput: 0: 1001.1. Samples: 734860. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
582
+ [2023-02-22 20:10:06,167][00330] Avg episode reward: [(0, '19.151')]
583
+ [2023-02-22 20:10:06,170][19135] Saving new best policy, reward=19.151!
584
+ [2023-02-22 20:10:08,289][19149] Updated weights for policy 0, policy_version 720 (0.0021)
585
+ [2023-02-22 20:10:11,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4096.0, 300 sec: 3971.0). Total num frames: 2957312. Throughput: 0: 987.3. Samples: 739788. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
586
+ [2023-02-22 20:10:11,170][00330] Avg episode reward: [(0, '18.480')]
587
+ [2023-02-22 20:10:16,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 2981888. Throughput: 0: 1015.7. Samples: 743416. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
588
+ [2023-02-22 20:10:16,172][00330] Avg episode reward: [(0, '18.337')]
589
+ [2023-02-22 20:10:17,358][19149] Updated weights for policy 0, policy_version 730 (0.0014)
590
+ [2023-02-22 20:10:21,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 3985.0). Total num frames: 3006464. Throughput: 0: 1036.4. Samples: 750736. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
591
+ [2023-02-22 20:10:21,171][00330] Avg episode reward: [(0, '17.543')]
592
+ [2023-02-22 20:10:26,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3018752. Throughput: 0: 984.0. Samples: 755758. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
593
+ [2023-02-22 20:10:26,168][00330] Avg episode reward: [(0, '18.565')]
594
+ [2023-02-22 20:10:28,866][19149] Updated weights for policy 0, policy_version 740 (0.0011)
595
+ [2023-02-22 20:10:31,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 3039232. Throughput: 0: 978.0. Samples: 758060. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
596
+ [2023-02-22 20:10:31,172][00330] Avg episode reward: [(0, '18.186')]
597
+ [2023-02-22 20:10:36,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4096.0, 300 sec: 3971.0). Total num frames: 3063808. Throughput: 0: 1028.4. Samples: 764902. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
598
+ [2023-02-22 20:10:36,167][00330] Avg episode reward: [(0, '18.458')]
599
+ [2023-02-22 20:10:37,450][19149] Updated weights for policy 0, policy_version 750 (0.0023)
600
+ [2023-02-22 20:10:41,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3084288. Throughput: 0: 1031.9. Samples: 772144. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
601
+ [2023-02-22 20:10:41,172][00330] Avg episode reward: [(0, '17.603')]
602
+ [2023-02-22 20:10:46,168][00330] Fps is (10 sec: 3684.9, 60 sec: 4027.5, 300 sec: 3984.9). Total num frames: 3100672. Throughput: 0: 1000.7. Samples: 774504. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
603
+ [2023-02-22 20:10:46,171][00330] Avg episode reward: [(0, '17.045')]
604
+ [2023-02-22 20:10:49,207][19149] Updated weights for policy 0, policy_version 760 (0.0022)
605
+ [2023-02-22 20:10:51,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 3121152. Throughput: 0: 984.9. Samples: 779182. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
606
+ [2023-02-22 20:10:51,167][00330] Avg episode reward: [(0, '17.683')]
607
+ [2023-02-22 20:10:56,164][00330] Fps is (10 sec: 4507.4, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 3145728. Throughput: 0: 1040.7. Samples: 786618. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
608
+ [2023-02-22 20:10:56,166][00330] Avg episode reward: [(0, '18.608')]
609
+ [2023-02-22 20:10:57,482][19149] Updated weights for policy 0, policy_version 770 (0.0021)
610
+ [2023-02-22 20:11:01,168][00330] Fps is (10 sec: 4503.8, 60 sec: 4027.5, 300 sec: 3984.9). Total num frames: 3166208. Throughput: 0: 1042.6. Samples: 790338. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
611
+ [2023-02-22 20:11:01,174][00330] Avg episode reward: [(0, '18.472')]
612
+ [2023-02-22 20:11:01,187][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000773_3166208.pth...
613
+ [2023-02-22 20:11:01,340][19135] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000541_2215936.pth
614
+ [2023-02-22 20:11:06,166][00330] Fps is (10 sec: 3686.0, 60 sec: 4027.7, 300 sec: 3984.9). Total num frames: 3182592. Throughput: 0: 993.0. Samples: 795422. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
615
+ [2023-02-22 20:11:06,168][00330] Avg episode reward: [(0, '18.690')]
616
+ [2023-02-22 20:11:09,320][19149] Updated weights for policy 0, policy_version 780 (0.0011)
617
+ [2023-02-22 20:11:11,164][00330] Fps is (10 sec: 3687.9, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 3203072. Throughput: 0: 1000.3. Samples: 800770. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
618
+ [2023-02-22 20:11:11,167][00330] Avg episode reward: [(0, '18.887')]
619
+ [2023-02-22 20:11:16,164][00330] Fps is (10 sec: 4506.1, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 3227648. Throughput: 0: 1031.3. Samples: 804470. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
620
+ [2023-02-22 20:11:16,166][00330] Avg episode reward: [(0, '18.401')]
621
+ [2023-02-22 20:11:17,711][19149] Updated weights for policy 0, policy_version 790 (0.0016)
622
+ [2023-02-22 20:11:21,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3985.0). Total num frames: 3248128. Throughput: 0: 1039.6. Samples: 811684. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
623
+ [2023-02-22 20:11:21,168][00330] Avg episode reward: [(0, '17.420')]
624
+ [2023-02-22 20:11:26,165][00330] Fps is (10 sec: 3686.0, 60 sec: 4095.9, 300 sec: 3998.8). Total num frames: 3264512. Throughput: 0: 983.4. Samples: 816396. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
625
+ [2023-02-22 20:11:26,170][00330] Avg episode reward: [(0, '18.008')]
626
+ [2023-02-22 20:11:29,617][19149] Updated weights for policy 0, policy_version 800 (0.0027)
627
+ [2023-02-22 20:11:31,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 3284992. Throughput: 0: 981.7. Samples: 818676. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
628
+ [2023-02-22 20:11:31,169][00330] Avg episode reward: [(0, '19.793')]
629
+ [2023-02-22 20:11:31,178][19135] Saving new best policy, reward=19.793!
630
+ [2023-02-22 20:11:36,164][00330] Fps is (10 sec: 4096.4, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3305472. Throughput: 0: 1036.6. Samples: 825830. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
631
+ [2023-02-22 20:11:36,169][00330] Avg episode reward: [(0, '20.110')]
632
+ [2023-02-22 20:11:36,174][19135] Saving new best policy, reward=20.110!
633
+ [2023-02-22 20:11:37,978][19149] Updated weights for policy 0, policy_version 810 (0.0019)
634
+ [2023-02-22 20:11:41,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3984.9). Total num frames: 3325952. Throughput: 0: 1017.9. Samples: 832422. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
635
+ [2023-02-22 20:11:41,166][00330] Avg episode reward: [(0, '19.987')]
636
+ [2023-02-22 20:11:46,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4028.0, 300 sec: 3984.9). Total num frames: 3342336. Throughput: 0: 986.9. Samples: 834746. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
637
+ [2023-02-22 20:11:46,171][00330] Avg episode reward: [(0, '20.030')]
638
+ [2023-02-22 20:11:49,884][19149] Updated weights for policy 0, policy_version 820 (0.0013)
639
+ [2023-02-22 20:11:51,165][00330] Fps is (10 sec: 3686.3, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3362816. Throughput: 0: 987.1. Samples: 839842. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
640
+ [2023-02-22 20:11:51,167][00330] Avg episode reward: [(0, '20.099')]
641
+ [2023-02-22 20:11:56,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3387392. Throughput: 0: 1033.5. Samples: 847276. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
642
+ [2023-02-22 20:11:56,167][00330] Avg episode reward: [(0, '18.839')]
643
+ [2023-02-22 20:11:58,037][19149] Updated weights for policy 0, policy_version 830 (0.0016)
644
+ [2023-02-22 20:12:01,164][00330] Fps is (10 sec: 4505.7, 60 sec: 4028.0, 300 sec: 3984.9). Total num frames: 3407872. Throughput: 0: 1033.6. Samples: 850984. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
645
+ [2023-02-22 20:12:01,167][00330] Avg episode reward: [(0, '19.738')]
646
+ [2023-02-22 20:12:06,168][00330] Fps is (10 sec: 3685.2, 60 sec: 4027.6, 300 sec: 3984.9). Total num frames: 3424256. Throughput: 0: 979.9. Samples: 855782. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
647
+ [2023-02-22 20:12:06,173][00330] Avg episode reward: [(0, '20.647')]
648
+ [2023-02-22 20:12:06,178][19135] Saving new best policy, reward=20.647!
649
+ [2023-02-22 20:12:09,950][19149] Updated weights for policy 0, policy_version 840 (0.0034)
650
+ [2023-02-22 20:12:11,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4027.7, 300 sec: 3984.9). Total num frames: 3444736. Throughput: 0: 1002.3. Samples: 861498. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
651
+ [2023-02-22 20:12:11,170][00330] Avg episode reward: [(0, '20.478')]
652
+ [2023-02-22 20:12:16,164][00330] Fps is (10 sec: 4507.0, 60 sec: 4027.7, 300 sec: 3984.9). Total num frames: 3469312. Throughput: 0: 1033.3. Samples: 865176. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
653
+ [2023-02-22 20:12:16,167][00330] Avg episode reward: [(0, '19.244')]
654
+ [2023-02-22 20:12:18,365][19149] Updated weights for policy 0, policy_version 850 (0.0015)
655
+ [2023-02-22 20:12:21,167][00330] Fps is (10 sec: 4504.4, 60 sec: 4027.6, 300 sec: 3984.9). Total num frames: 3489792. Throughput: 0: 1027.3. Samples: 872062. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
656
+ [2023-02-22 20:12:21,170][00330] Avg episode reward: [(0, '20.002')]
657
+ [2023-02-22 20:12:26,166][00330] Fps is (10 sec: 3685.6, 60 sec: 4027.7, 300 sec: 3998.8). Total num frames: 3506176. Throughput: 0: 982.5. Samples: 876638. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
658
+ [2023-02-22 20:12:26,177][00330] Avg episode reward: [(0, '20.530')]
659
+ [2023-02-22 20:12:30,200][19149] Updated weights for policy 0, policy_version 860 (0.0023)
660
+ [2023-02-22 20:12:31,165][00330] Fps is (10 sec: 3687.3, 60 sec: 4027.7, 300 sec: 3984.9). Total num frames: 3526656. Throughput: 0: 989.5. Samples: 879274. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
661
+ [2023-02-22 20:12:31,166][00330] Avg episode reward: [(0, '20.120')]
662
+ [2023-02-22 20:12:36,164][00330] Fps is (10 sec: 4506.5, 60 sec: 4096.0, 300 sec: 3984.9). Total num frames: 3551232. Throughput: 0: 1039.7. Samples: 886630. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
663
+ [2023-02-22 20:12:36,172][00330] Avg episode reward: [(0, '20.323')]
664
+ [2023-02-22 20:12:38,532][19149] Updated weights for policy 0, policy_version 870 (0.0015)
665
+ [2023-02-22 20:12:41,169][00330] Fps is (10 sec: 4094.1, 60 sec: 4027.4, 300 sec: 3984.9). Total num frames: 3567616. Throughput: 0: 1015.2. Samples: 892964. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
666
+ [2023-02-22 20:12:41,173][00330] Avg episode reward: [(0, '19.603')]
667
+ [2023-02-22 20:12:46,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3984.9). Total num frames: 3584000. Throughput: 0: 981.7. Samples: 895162. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
668
+ [2023-02-22 20:12:46,168][00330] Avg episode reward: [(0, '19.856')]
669
+ [2023-02-22 20:12:51,164][00330] Fps is (10 sec: 2868.6, 60 sec: 3891.2, 300 sec: 3943.3). Total num frames: 3596288. Throughput: 0: 956.5. Samples: 898822. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
670
+ [2023-02-22 20:12:51,170][00330] Avg episode reward: [(0, '18.629')]
671
+ [2023-02-22 20:12:53,459][19149] Updated weights for policy 0, policy_version 880 (0.0024)
672
+ [2023-02-22 20:12:56,164][00330] Fps is (10 sec: 2867.2, 60 sec: 3754.7, 300 sec: 3915.5). Total num frames: 3612672. Throughput: 0: 939.6. Samples: 903780. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
673
+ [2023-02-22 20:12:56,174][00330] Avg episode reward: [(0, '19.594')]
674
+ [2023-02-22 20:13:01,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3943.3). Total num frames: 3637248. Throughput: 0: 939.8. Samples: 907466. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
675
+ [2023-02-22 20:13:01,167][00330] Avg episode reward: [(0, '18.709')]
676
+ [2023-02-22 20:13:01,178][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000888_3637248.pth...
677
+ [2023-02-22 20:13:01,327][19135] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000655_2682880.pth
678
+ [2023-02-22 20:13:03,485][19149] Updated weights for policy 0, policy_version 890 (0.0011)
679
+ [2023-02-22 20:13:06,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3823.1, 300 sec: 3943.3). Total num frames: 3653632. Throughput: 0: 906.0. Samples: 912830. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
680
+ [2023-02-22 20:13:06,172][00330] Avg episode reward: [(0, '19.137')]
681
+ [2023-02-22 20:13:11,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3915.5). Total num frames: 3670016. Throughput: 0: 915.7. Samples: 917844. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
682
+ [2023-02-22 20:13:11,171][00330] Avg episode reward: [(0, '19.690')]
683
+ [2023-02-22 20:13:14,102][19149] Updated weights for policy 0, policy_version 900 (0.0011)
684
+ [2023-02-22 20:13:16,164][00330] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3915.5). Total num frames: 3694592. Throughput: 0: 939.0. Samples: 921530. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
685
+ [2023-02-22 20:13:16,167][00330] Avg episode reward: [(0, '19.461')]
686
+ [2023-02-22 20:13:21,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3754.8, 300 sec: 3929.4). Total num frames: 3715072. Throughput: 0: 939.8. Samples: 928920. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
687
+ [2023-02-22 20:13:21,168][00330] Avg episode reward: [(0, '19.879')]
688
+ [2023-02-22 20:13:23,858][19149] Updated weights for policy 0, policy_version 910 (0.0024)
689
+ [2023-02-22 20:13:26,165][00330] Fps is (10 sec: 3686.0, 60 sec: 3754.7, 300 sec: 3929.4). Total num frames: 3731456. Throughput: 0: 906.0. Samples: 933730. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
690
+ [2023-02-22 20:13:26,178][00330] Avg episode reward: [(0, '21.456')]
691
+ [2023-02-22 20:13:26,183][19135] Saving new best policy, reward=21.456!
692
+ [2023-02-22 20:13:31,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3957.2). Total num frames: 3751936. Throughput: 0: 908.0. Samples: 936022. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
693
+ [2023-02-22 20:13:31,167][00330] Avg episode reward: [(0, '21.670')]
694
+ [2023-02-22 20:13:31,181][19135] Saving new best policy, reward=21.670!
695
+ [2023-02-22 20:13:34,442][19149] Updated weights for policy 0, policy_version 920 (0.0020)
696
+ [2023-02-22 20:13:36,164][00330] Fps is (10 sec: 4506.1, 60 sec: 3754.7, 300 sec: 3984.9). Total num frames: 3776512. Throughput: 0: 977.6. Samples: 942814. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
697
+ [2023-02-22 20:13:36,167][00330] Avg episode reward: [(0, '22.528')]
698
+ [2023-02-22 20:13:36,171][19135] Saving new best policy, reward=22.528!
699
+ [2023-02-22 20:13:41,164][00330] Fps is (10 sec: 4505.6, 60 sec: 3823.2, 300 sec: 3998.8). Total num frames: 3796992. Throughput: 0: 1023.2. Samples: 949822. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
700
+ [2023-02-22 20:13:41,167][00330] Avg episode reward: [(0, '22.285')]
701
+ [2023-02-22 20:13:44,338][19149] Updated weights for policy 0, policy_version 930 (0.0011)
702
+ [2023-02-22 20:13:46,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3998.8). Total num frames: 3813376. Throughput: 0: 993.4. Samples: 952170. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
703
+ [2023-02-22 20:13:46,168][00330] Avg episode reward: [(0, '22.198')]
704
+ [2023-02-22 20:13:51,164][00330] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 3971.0). Total num frames: 3829760. Throughput: 0: 979.2. Samples: 956894. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
705
+ [2023-02-22 20:13:51,167][00330] Avg episode reward: [(0, '21.517')]
706
+ [2023-02-22 20:13:54,630][19149] Updated weights for policy 0, policy_version 940 (0.0020)
707
+ [2023-02-22 20:13:56,164][00330] Fps is (10 sec: 4096.0, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3854336. Throughput: 0: 1032.1. Samples: 964290. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
708
+ [2023-02-22 20:13:56,167][00330] Avg episode reward: [(0, '20.790')]
709
+ [2023-02-22 20:14:01,164][00330] Fps is (10 sec: 4915.2, 60 sec: 4027.7, 300 sec: 4012.7). Total num frames: 3878912. Throughput: 0: 1033.7. Samples: 968046. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
710
+ [2023-02-22 20:14:01,168][00330] Avg episode reward: [(0, '21.668')]
711
+ [2023-02-22 20:14:04,827][19149] Updated weights for policy 0, policy_version 950 (0.0018)
712
+ [2023-02-22 20:14:06,164][00330] Fps is (10 sec: 3686.4, 60 sec: 3959.5, 300 sec: 3998.8). Total num frames: 3891200. Throughput: 0: 983.4. Samples: 973174. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
713
+ [2023-02-22 20:14:06,167][00330] Avg episode reward: [(0, '22.437')]
714
+ [2023-02-22 20:14:11,164][00330] Fps is (10 sec: 3276.8, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3911680. Throughput: 0: 993.9. Samples: 978454. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
715
+ [2023-02-22 20:14:11,167][00330] Avg episode reward: [(0, '21.280')]
716
+ [2023-02-22 20:14:14,716][19149] Updated weights for policy 0, policy_version 960 (0.0028)
717
+ [2023-02-22 20:14:16,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3971.0). Total num frames: 3936256. Throughput: 0: 1026.0. Samples: 982194. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
718
+ [2023-02-22 20:14:16,167][00330] Avg episode reward: [(0, '21.992')]
719
+ [2023-02-22 20:14:21,164][00330] Fps is (10 sec: 4505.6, 60 sec: 4027.7, 300 sec: 3998.8). Total num frames: 3956736. Throughput: 0: 1035.6. Samples: 989416. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
720
+ [2023-02-22 20:14:21,167][00330] Avg episode reward: [(0, '22.505')]
721
+ [2023-02-22 20:14:25,384][19149] Updated weights for policy 0, policy_version 970 (0.0013)
722
+ [2023-02-22 20:14:26,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4027.8, 300 sec: 3998.8). Total num frames: 3973120. Throughput: 0: 983.0. Samples: 994058. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
723
+ [2023-02-22 20:14:26,168][00330] Avg episode reward: [(0, '21.247')]
724
+ [2023-02-22 20:14:31,164][00330] Fps is (10 sec: 3686.4, 60 sec: 4027.7, 300 sec: 3984.9). Total num frames: 3993600. Throughput: 0: 984.2. Samples: 996458. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
725
+ [2023-02-22 20:14:31,169][00330] Avg episode reward: [(0, '20.606')]
726
+ [2023-02-22 20:14:33,189][19135] Stopping Batcher_0...
727
+ [2023-02-22 20:14:33,190][19135] Loop batcher_evt_loop terminating...
728
+ [2023-02-22 20:14:33,190][00330] Component Batcher_0 stopped!
729
+ [2023-02-22 20:14:33,203][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
730
+ [2023-02-22 20:14:33,252][19149] Weights refcount: 2 0
731
+ [2023-02-22 20:14:33,259][00330] Component InferenceWorker_p0-w0 stopped!
732
+ [2023-02-22 20:14:33,264][19149] Stopping InferenceWorker_p0-w0...
733
+ [2023-02-22 20:14:33,265][19149] Loop inference_proc0-0_evt_loop terminating...
734
+ [2023-02-22 20:14:33,271][00330] Component RolloutWorker_w3 stopped!
735
+ [2023-02-22 20:14:33,275][00330] Component RolloutWorker_w0 stopped!
736
+ [2023-02-22 20:14:33,279][19153] Stopping RolloutWorker_w3...
737
+ [2023-02-22 20:14:33,279][19153] Loop rollout_proc3_evt_loop terminating...
738
+ [2023-02-22 20:14:33,273][19150] Stopping RolloutWorker_w0...
739
+ [2023-02-22 20:14:33,286][00330] Component RolloutWorker_w7 stopped!
740
+ [2023-02-22 20:14:33,286][19160] Stopping RolloutWorker_w6...
741
+ [2023-02-22 20:14:33,288][00330] Component RolloutWorker_w6 stopped!
742
+ [2023-02-22 20:14:33,292][19161] Stopping RolloutWorker_w7...
743
+ [2023-02-22 20:14:33,293][19161] Loop rollout_proc7_evt_loop terminating...
744
+ [2023-02-22 20:14:33,298][19151] Stopping RolloutWorker_w1...
745
+ [2023-02-22 20:14:33,298][00330] Component RolloutWorker_w1 stopped!
746
+ [2023-02-22 20:14:33,289][19160] Loop rollout_proc6_evt_loop terminating...
747
+ [2023-02-22 20:14:33,288][19150] Loop rollout_proc0_evt_loop terminating...
748
+ [2023-02-22 20:14:33,307][19151] Loop rollout_proc1_evt_loop terminating...
749
+ [2023-02-22 20:14:33,314][00330] Component RolloutWorker_w5 stopped!
750
+ [2023-02-22 20:14:33,316][19152] Stopping RolloutWorker_w2...
751
+ [2023-02-22 20:14:33,318][19155] Stopping RolloutWorker_w5...
752
+ [2023-02-22 20:14:33,318][19155] Loop rollout_proc5_evt_loop terminating...
753
+ [2023-02-22 20:14:33,318][00330] Component RolloutWorker_w2 stopped!
754
+ [2023-02-22 20:14:33,322][00330] Component RolloutWorker_w4 stopped!
755
+ [2023-02-22 20:14:33,321][19154] Stopping RolloutWorker_w4...
756
+ [2023-02-22 20:14:33,325][19152] Loop rollout_proc2_evt_loop terminating...
757
+ [2023-02-22 20:14:33,328][19154] Loop rollout_proc4_evt_loop terminating...
758
+ [2023-02-22 20:14:33,378][19135] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000773_3166208.pth
759
+ [2023-02-22 20:14:33,388][19135] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
760
+ [2023-02-22 20:14:33,541][00330] Component LearnerWorker_p0 stopped!
761
+ [2023-02-22 20:14:33,544][00330] Waiting for process learner_proc0 to stop...
762
+ [2023-02-22 20:14:33,549][19135] Stopping LearnerWorker_p0...
763
+ [2023-02-22 20:14:33,553][19135] Loop learner_proc0_evt_loop terminating...
764
+ [2023-02-22 20:14:35,292][00330] Waiting for process inference_proc0-0 to join...
765
+ [2023-02-22 20:14:35,715][00330] Waiting for process rollout_proc0 to join...
766
+ [2023-02-22 20:14:36,155][00330] Waiting for process rollout_proc1 to join...
767
+ [2023-02-22 20:14:36,158][00330] Waiting for process rollout_proc2 to join...
768
+ [2023-02-22 20:14:36,173][00330] Waiting for process rollout_proc3 to join...
769
+ [2023-02-22 20:14:36,174][00330] Waiting for process rollout_proc4 to join...
770
+ [2023-02-22 20:14:36,178][00330] Waiting for process rollout_proc5 to join...
771
+ [2023-02-22 20:14:36,181][00330] Waiting for process rollout_proc6 to join...
772
+ [2023-02-22 20:14:36,182][00330] Waiting for process rollout_proc7 to join...
773
+ [2023-02-22 20:14:36,183][00330] Batcher 0 profile tree view:
774
+ batching: 24.0427, releasing_batches: 0.0282
775
+ [2023-02-22 20:14:36,184][00330] InferenceWorker_p0-w0 profile tree view:
776
+ wait_policy: 0.0059
777
+ wait_policy_total: 501.7868
778
+ update_model: 7.4170
779
+ weight_update: 0.0014
780
+ one_step: 0.0068
781
+ handle_policy_step: 479.3212
782
+ deserialize: 13.7627, stack: 2.7169, obs_to_device_normalize: 109.3395, forward: 227.8667, send_messages: 25.6261
783
+ prepare_outputs: 76.2001
784
+ to_cpu: 48.0151
785
+ [2023-02-22 20:14:36,186][00330] Learner 0 profile tree view:
786
+ misc: 0.0051, prepare_batch: 15.5271
787
+ train: 75.2453
788
+ epoch_init: 0.0077, minibatch_init: 0.0119, losses_postprocess: 0.5499, kl_divergence: 0.5853, after_optimizer: 32.9567
789
+ calculate_losses: 26.6132
790
+ losses_init: 0.0075, forward_head: 1.6213, bptt_initial: 17.7916, tail: 0.8722, advantages_returns: 0.2940, losses: 3.7363
791
+ bptt: 1.9715
792
+ bptt_forward_core: 1.8835
793
+ update: 13.9417
794
+ clip: 1.3752
795
+ [2023-02-22 20:14:36,187][00330] RolloutWorker_w0 profile tree view:
796
+ wait_for_trajectories: 0.4076, enqueue_policy_requests: 130.3319, env_step: 776.1720, overhead: 18.9517, complete_rollouts: 7.3204
797
+ save_policy_outputs: 18.4681
798
+ split_output_tensors: 9.1520
799
+ [2023-02-22 20:14:36,188][00330] RolloutWorker_w7 profile tree view:
800
+ wait_for_trajectories: 0.2971, enqueue_policy_requests: 130.6406, env_step: 774.4963, overhead: 17.7889, complete_rollouts: 5.9957
801
+ save_policy_outputs: 18.8443
802
+ split_output_tensors: 9.1612
803
+ [2023-02-22 20:14:36,189][00330] Loop Runner_EvtLoop terminating...
804
+ [2023-02-22 20:14:36,191][00330] Runner profile tree view:
805
+ main_loop: 1050.3082
806
+ [2023-02-22 20:14:36,192][00330] Collected {0: 4005888}, FPS: 3814.0
807
+ [2023-02-22 20:15:02,367][00330] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
808
+ [2023-02-22 20:15:02,369][00330] Overriding arg 'num_workers' with value 1 passed from command line
809
+ [2023-02-22 20:15:02,372][00330] Adding new argument 'no_render'=True that is not in the saved config file!
810
+ [2023-02-22 20:15:02,375][00330] Adding new argument 'save_video'=True that is not in the saved config file!
811
+ [2023-02-22 20:15:02,377][00330] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
812
+ [2023-02-22 20:15:02,380][00330] Adding new argument 'video_name'=None that is not in the saved config file!
813
+ [2023-02-22 20:15:02,381][00330] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
814
+ [2023-02-22 20:15:02,384][00330] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
815
+ [2023-02-22 20:15:02,387][00330] Adding new argument 'push_to_hub'=False that is not in the saved config file!
816
+ [2023-02-22 20:15:02,388][00330] Adding new argument 'hf_repository'=None that is not in the saved config file!
817
+ [2023-02-22 20:15:02,389][00330] Adding new argument 'policy_index'=0 that is not in the saved config file!
818
+ [2023-02-22 20:15:02,392][00330] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
819
+ [2023-02-22 20:15:02,394][00330] Adding new argument 'train_script'=None that is not in the saved config file!
820
+ [2023-02-22 20:15:02,395][00330] Adding new argument 'enjoy_script'=None that is not in the saved config file!
821
+ [2023-02-22 20:15:02,398][00330] Using frameskip 1 and render_action_repeat=4 for evaluation
822
+ [2023-02-22 20:15:02,422][00330] Doom resolution: 160x120, resize resolution: (128, 72)
823
+ [2023-02-22 20:15:02,424][00330] RunningMeanStd input shape: (3, 72, 128)
824
+ [2023-02-22 20:15:02,428][00330] RunningMeanStd input shape: (1,)
825
+ [2023-02-22 20:15:02,444][00330] ConvEncoder: input_channels=3
826
+ [2023-02-22 20:15:03,114][00330] Conv encoder output size: 512
827
+ [2023-02-22 20:15:03,116][00330] Policy head output size: 512
828
+ [2023-02-22 20:15:06,098][00330] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
829
+ [2023-02-22 20:15:07,645][00330] Num frames 100...
830
+ [2023-02-22 20:15:07,755][00330] Num frames 200...
831
+ [2023-02-22 20:15:07,864][00330] Num frames 300...
832
+ [2023-02-22 20:15:07,999][00330] Num frames 400...
833
+ [2023-02-22 20:15:08,125][00330] Num frames 500...
834
+ [2023-02-22 20:15:08,255][00330] Num frames 600...
835
+ [2023-02-22 20:15:08,374][00330] Num frames 700...
836
+ [2023-02-22 20:15:08,486][00330] Num frames 800...
837
+ [2023-02-22 20:15:08,609][00330] Num frames 900...
838
+ [2023-02-22 20:15:08,723][00330] Num frames 1000...
839
+ [2023-02-22 20:15:08,844][00330] Num frames 1100...
840
+ [2023-02-22 20:15:08,975][00330] Num frames 1200...
841
+ [2023-02-22 20:15:09,088][00330] Num frames 1300...
842
+ [2023-02-22 20:15:09,210][00330] Num frames 1400...
843
+ [2023-02-22 20:15:09,318][00330] Num frames 1500...
844
+ [2023-02-22 20:15:09,430][00330] Num frames 1600...
845
+ [2023-02-22 20:15:09,537][00330] Num frames 1700...
846
+ [2023-02-22 20:15:09,643][00330] Num frames 1800...
847
+ [2023-02-22 20:15:09,726][00330] Avg episode rewards: #0: 43.239, true rewards: #0: 18.240
848
+ [2023-02-22 20:15:09,728][00330] Avg episode reward: 43.239, avg true_objective: 18.240
849
+ [2023-02-22 20:15:09,820][00330] Num frames 1900...
850
+ [2023-02-22 20:15:09,933][00330] Num frames 2000...
851
+ [2023-02-22 20:15:10,043][00330] Num frames 2100...
852
+ [2023-02-22 20:15:10,152][00330] Num frames 2200...
853
+ [2023-02-22 20:15:10,261][00330] Num frames 2300...
854
+ [2023-02-22 20:15:10,370][00330] Num frames 2400...
855
+ [2023-02-22 20:15:10,478][00330] Num frames 2500...
856
+ [2023-02-22 20:15:10,586][00330] Num frames 2600...
857
+ [2023-02-22 20:15:10,694][00330] Num frames 2700...
858
+ [2023-02-22 20:15:10,800][00330] Num frames 2800...
859
+ [2023-02-22 20:15:10,914][00330] Num frames 2900...
860
+ [2023-02-22 20:15:11,056][00330] Avg episode rewards: #0: 32.880, true rewards: #0: 14.880
861
+ [2023-02-22 20:15:11,058][00330] Avg episode reward: 32.880, avg true_objective: 14.880
862
+ [2023-02-22 20:15:11,088][00330] Num frames 3000...
863
+ [2023-02-22 20:15:11,206][00330] Num frames 3100...
864
+ [2023-02-22 20:15:11,317][00330] Num frames 3200...
865
+ [2023-02-22 20:15:11,429][00330] Num frames 3300...
866
+ [2023-02-22 20:15:11,544][00330] Num frames 3400...
867
+ [2023-02-22 20:15:11,656][00330] Num frames 3500...
868
+ [2023-02-22 20:15:11,768][00330] Avg episode rewards: #0: 25.840, true rewards: #0: 11.840
869
+ [2023-02-22 20:15:11,769][00330] Avg episode reward: 25.840, avg true_objective: 11.840
870
+ [2023-02-22 20:15:11,831][00330] Num frames 3600...
871
+ [2023-02-22 20:15:11,941][00330] Num frames 3700...
872
+ [2023-02-22 20:15:12,057][00330] Num frames 3800...
873
+ [2023-02-22 20:15:12,166][00330] Num frames 3900...
874
+ [2023-02-22 20:15:12,276][00330] Num frames 4000...
875
+ [2023-02-22 20:15:12,389][00330] Num frames 4100...
876
+ [2023-02-22 20:15:12,500][00330] Num frames 4200...
877
+ [2023-02-22 20:15:12,608][00330] Num frames 4300...
878
+ [2023-02-22 20:15:12,718][00330] Num frames 4400...
879
+ [2023-02-22 20:15:12,825][00330] Num frames 4500...
880
+ [2023-02-22 20:15:12,940][00330] Num frames 4600...
881
+ [2023-02-22 20:15:13,063][00330] Num frames 4700...
882
+ [2023-02-22 20:15:13,172][00330] Num frames 4800...
883
+ [2023-02-22 20:15:13,284][00330] Num frames 4900...
884
+ [2023-02-22 20:15:13,397][00330] Num frames 5000...
885
+ [2023-02-22 20:15:13,480][00330] Avg episode rewards: #0: 27.560, true rewards: #0: 12.560
886
+ [2023-02-22 20:15:13,481][00330] Avg episode reward: 27.560, avg true_objective: 12.560
887
+ [2023-02-22 20:15:13,567][00330] Num frames 5100...
888
+ [2023-02-22 20:15:13,677][00330] Num frames 5200...
889
+ [2023-02-22 20:15:13,786][00330] Num frames 5300...
890
+ [2023-02-22 20:15:13,902][00330] Num frames 5400...
891
+ [2023-02-22 20:15:14,025][00330] Num frames 5500...
892
+ [2023-02-22 20:15:14,134][00330] Num frames 5600...
893
+ [2023-02-22 20:15:14,244][00330] Num frames 5700...
894
+ [2023-02-22 20:15:14,358][00330] Num frames 5800...
895
+ [2023-02-22 20:15:14,479][00330] Num frames 5900...
896
+ [2023-02-22 20:15:14,589][00330] Num frames 6000...
897
+ [2023-02-22 20:15:14,731][00330] Avg episode rewards: #0: 26.960, true rewards: #0: 12.160
898
+ [2023-02-22 20:15:14,734][00330] Avg episode reward: 26.960, avg true_objective: 12.160
899
+ [2023-02-22 20:15:14,758][00330] Num frames 6100...
900
+ [2023-02-22 20:15:14,869][00330] Num frames 6200...
901
+ [2023-02-22 20:15:14,977][00330] Num frames 6300...
902
+ [2023-02-22 20:15:15,106][00330] Num frames 6400...
903
+ [2023-02-22 20:15:15,231][00330] Num frames 6500...
904
+ [2023-02-22 20:15:15,354][00330] Num frames 6600...
905
+ [2023-02-22 20:15:15,471][00330] Num frames 6700...
906
+ [2023-02-22 20:15:15,587][00330] Num frames 6800...
907
+ [2023-02-22 20:15:15,705][00330] Num frames 6900...
908
+ [2023-02-22 20:15:15,815][00330] Num frames 7000...
909
+ [2023-02-22 20:15:15,927][00330] Num frames 7100...
910
+ [2023-02-22 20:15:16,048][00330] Num frames 7200...
911
+ [2023-02-22 20:15:16,159][00330] Num frames 7300...
912
+ [2023-02-22 20:15:16,274][00330] Num frames 7400...
913
+ [2023-02-22 20:15:16,390][00330] Num frames 7500...
914
+ [2023-02-22 20:15:16,514][00330] Num frames 7600...
915
+ [2023-02-22 20:15:16,624][00330] Num frames 7700...
916
+ [2023-02-22 20:15:16,739][00330] Avg episode rewards: #0: 28.757, true rewards: #0: 12.923
917
+ [2023-02-22 20:15:16,741][00330] Avg episode reward: 28.757, avg true_objective: 12.923
918
+ [2023-02-22 20:15:16,793][00330] Num frames 7800...
919
+ [2023-02-22 20:15:16,904][00330] Num frames 7900...
920
+ [2023-02-22 20:15:17,021][00330] Num frames 8000...
921
+ [2023-02-22 20:15:17,140][00330] Num frames 8100...
922
+ [2023-02-22 20:15:17,278][00330] Num frames 8200...
923
+ [2023-02-22 20:15:17,488][00330] Avg episode rewards: #0: 25.711, true rewards: #0: 11.854
924
+ [2023-02-22 20:15:17,490][00330] Avg episode reward: 25.711, avg true_objective: 11.854
925
+ [2023-02-22 20:15:17,496][00330] Num frames 8300...
926
+ [2023-02-22 20:15:17,647][00330] Num frames 8400...
927
+ [2023-02-22 20:15:17,802][00330] Num frames 8500...
928
+ [2023-02-22 20:15:17,955][00330] Num frames 8600...
929
+ [2023-02-22 20:15:18,112][00330] Num frames 8700...
930
+ [2023-02-22 20:15:18,268][00330] Num frames 8800...
931
+ [2023-02-22 20:15:18,424][00330] Num frames 8900...
932
+ [2023-02-22 20:15:18,587][00330] Num frames 9000...
933
+ [2023-02-22 20:15:18,740][00330] Num frames 9100...
934
+ [2023-02-22 20:15:18,907][00330] Avg episode rewards: #0: 24.837, true rewards: #0: 11.462
935
+ [2023-02-22 20:15:18,909][00330] Avg episode reward: 24.837, avg true_objective: 11.462
936
+ [2023-02-22 20:15:18,959][00330] Num frames 9200...
937
+ [2023-02-22 20:15:19,125][00330] Num frames 9300...
938
+ [2023-02-22 20:15:19,284][00330] Num frames 9400...
939
+ [2023-02-22 20:15:19,445][00330] Num frames 9500...
940
+ [2023-02-22 20:15:19,606][00330] Num frames 9600...
941
+ [2023-02-22 20:15:19,765][00330] Num frames 9700...
942
+ [2023-02-22 20:15:19,927][00330] Num frames 9800...
943
+ [2023-02-22 20:15:20,092][00330] Num frames 9900...
944
+ [2023-02-22 20:15:20,258][00330] Num frames 10000...
945
+ [2023-02-22 20:15:20,415][00330] Num frames 10100...
946
+ [2023-02-22 20:15:20,566][00330] Num frames 10200...
947
+ [2023-02-22 20:15:20,679][00330] Num frames 10300...
948
+ [2023-02-22 20:15:20,790][00330] Avg episode rewards: #0: 24.943, true rewards: #0: 11.499
949
+ [2023-02-22 20:15:20,792][00330] Avg episode reward: 24.943, avg true_objective: 11.499
950
+ [2023-02-22 20:15:20,851][00330] Num frames 10400...
951
+ [2023-02-22 20:15:20,959][00330] Num frames 10500...
952
+ [2023-02-22 20:15:21,072][00330] Num frames 10600...
953
+ [2023-02-22 20:15:21,190][00330] Num frames 10700...
954
+ [2023-02-22 20:15:21,303][00330] Num frames 10800...
955
+ [2023-02-22 20:15:21,415][00330] Num frames 10900...
956
+ [2023-02-22 20:15:21,525][00330] Num frames 11000...
957
+ [2023-02-22 20:15:21,636][00330] Num frames 11100...
958
+ [2023-02-22 20:15:21,753][00330] Num frames 11200...
959
+ [2023-02-22 20:15:21,824][00330] Avg episode rewards: #0: 24.713, true rewards: #0: 11.213
960
+ [2023-02-22 20:15:21,826][00330] Avg episode reward: 24.713, avg true_objective: 11.213
961
+ [2023-02-22 20:16:25,272][00330] Replay video saved to /content/train_dir/default_experiment/replay.mp4!