Upload folder using huggingface_hub
Browse files
.summary/0/events.out.tfevents.1722101970.452e51389018
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef560d848d7f8a247dd895ee97c221c347b03473b060631feb3e946a961cbbe9
|
3 |
+
size 112704
|
README.md
CHANGED
@@ -15,7 +15,7 @@ model-index:
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
-
value:
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
+
value: 11.54 +/- 5.62
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
checkpoint_p0/best_000001055_4321280_reward_20.107.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac697f2b86e134b5fef67d10851888b81bc77f7705ee6a87f360a9258be05b28
|
3 |
+
size 34929243
|
checkpoint_p0/checkpoint_000001050_4300800.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7f7cfe11c1d9401472bf49e8e725b28e03ab9d952b43615e7ea4e4e15d748ed
|
3 |
+
size 34929669
|
checkpoint_p0/checkpoint_000001101_4509696.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9386383845359bcea6b2f1c4cd4e83dadd02ff34ecb39a2c6ac587fc1df467f2
|
3 |
+
size 34929669
|
config.json
CHANGED
@@ -65,7 +65,7 @@
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
-
"train_for_env_steps":
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 4505000,
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cdbf3d4d8001af8ac65b08fafe13a97c1db1f4d63eb1d4ebaf4c10e9388271aa
|
3 |
+
size 21793979
|
sf_log.txt
CHANGED
@@ -1963,3 +1963,792 @@ main_loop: 37.5755
|
|
1963 |
[2024-07-27 17:38:15,970][00473] Avg episode rewards: #0: 14.652, true rewards: #0: 7.552
|
1964 |
[2024-07-27 17:38:15,974][00473] Avg episode reward: 14.652, avg true_objective: 7.552
|
1965 |
[2024-07-27 17:39:03,628][00473] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1963 |
[2024-07-27 17:38:15,970][00473] Avg episode rewards: #0: 14.652, true rewards: #0: 7.552
|
1964 |
[2024-07-27 17:38:15,974][00473] Avg episode reward: 14.652, avg true_objective: 7.552
|
1965 |
[2024-07-27 17:39:03,628][00473] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
1966 |
+
[2024-07-27 17:39:07,781][00473] The model has been pushed to https://huggingface.co/rishisim/rl_course_vizdoom_health_gathering_supreme
|
1967 |
+
[2024-07-27 17:39:30,317][00473] Environment doom_basic already registered, overwriting...
|
1968 |
+
[2024-07-27 17:39:30,320][00473] Environment doom_two_colors_easy already registered, overwriting...
|
1969 |
+
[2024-07-27 17:39:30,322][00473] Environment doom_two_colors_hard already registered, overwriting...
|
1970 |
+
[2024-07-27 17:39:30,325][00473] Environment doom_dm already registered, overwriting...
|
1971 |
+
[2024-07-27 17:39:30,329][00473] Environment doom_dwango5 already registered, overwriting...
|
1972 |
+
[2024-07-27 17:39:30,330][00473] Environment doom_my_way_home_flat_actions already registered, overwriting...
|
1973 |
+
[2024-07-27 17:39:30,331][00473] Environment doom_defend_the_center_flat_actions already registered, overwriting...
|
1974 |
+
[2024-07-27 17:39:30,332][00473] Environment doom_my_way_home already registered, overwriting...
|
1975 |
+
[2024-07-27 17:39:30,333][00473] Environment doom_deadly_corridor already registered, overwriting...
|
1976 |
+
[2024-07-27 17:39:30,334][00473] Environment doom_defend_the_center already registered, overwriting...
|
1977 |
+
[2024-07-27 17:39:30,342][00473] Environment doom_defend_the_line already registered, overwriting...
|
1978 |
+
[2024-07-27 17:39:30,344][00473] Environment doom_health_gathering already registered, overwriting...
|
1979 |
+
[2024-07-27 17:39:30,345][00473] Environment doom_health_gathering_supreme already registered, overwriting...
|
1980 |
+
[2024-07-27 17:39:30,347][00473] Environment doom_battle already registered, overwriting...
|
1981 |
+
[2024-07-27 17:39:30,349][00473] Environment doom_battle2 already registered, overwriting...
|
1982 |
+
[2024-07-27 17:39:30,351][00473] Environment doom_duel_bots already registered, overwriting...
|
1983 |
+
[2024-07-27 17:39:30,352][00473] Environment doom_deathmatch_bots already registered, overwriting...
|
1984 |
+
[2024-07-27 17:39:30,354][00473] Environment doom_duel already registered, overwriting...
|
1985 |
+
[2024-07-27 17:39:30,356][00473] Environment doom_deathmatch_full already registered, overwriting...
|
1986 |
+
[2024-07-27 17:39:30,357][00473] Environment doom_benchmark already registered, overwriting...
|
1987 |
+
[2024-07-27 17:39:30,359][00473] register_encoder_factory: <function make_vizdoom_encoder at 0x7f79f8ee35b0>
|
1988 |
+
[2024-07-27 17:39:30,381][00473] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
1989 |
+
[2024-07-27 17:39:30,383][00473] Overriding arg 'train_for_env_steps' with value 4505000 passed from command line
|
1990 |
+
[2024-07-27 17:39:30,392][00473] Experiment dir /content/train_dir/default_experiment already exists!
|
1991 |
+
[2024-07-27 17:39:30,395][00473] Resuming existing experiment from /content/train_dir/default_experiment...
|
1992 |
+
[2024-07-27 17:39:30,397][00473] Weights and Biases integration disabled
|
1993 |
+
[2024-07-27 17:39:30,403][00473] Environment var CUDA_VISIBLE_DEVICES is 0
|
1994 |
+
|
1995 |
+
[2024-07-27 17:39:32,519][00473] Starting experiment with the following configuration:
|
1996 |
+
help=False
|
1997 |
+
algo=APPO
|
1998 |
+
env=doom_health_gathering_supreme
|
1999 |
+
experiment=default_experiment
|
2000 |
+
train_dir=/content/train_dir
|
2001 |
+
restart_behavior=resume
|
2002 |
+
device=gpu
|
2003 |
+
seed=None
|
2004 |
+
num_policies=1
|
2005 |
+
async_rl=True
|
2006 |
+
serial_mode=False
|
2007 |
+
batched_sampling=False
|
2008 |
+
num_batches_to_accumulate=2
|
2009 |
+
worker_num_splits=2
|
2010 |
+
policy_workers_per_policy=1
|
2011 |
+
max_policy_lag=1000
|
2012 |
+
num_workers=8
|
2013 |
+
num_envs_per_worker=4
|
2014 |
+
batch_size=1024
|
2015 |
+
num_batches_per_epoch=1
|
2016 |
+
num_epochs=1
|
2017 |
+
rollout=32
|
2018 |
+
recurrence=32
|
2019 |
+
shuffle_minibatches=False
|
2020 |
+
gamma=0.99
|
2021 |
+
reward_scale=1.0
|
2022 |
+
reward_clip=1000.0
|
2023 |
+
value_bootstrap=False
|
2024 |
+
normalize_returns=True
|
2025 |
+
exploration_loss_coeff=0.001
|
2026 |
+
value_loss_coeff=0.5
|
2027 |
+
kl_loss_coeff=0.0
|
2028 |
+
exploration_loss=symmetric_kl
|
2029 |
+
gae_lambda=0.95
|
2030 |
+
ppo_clip_ratio=0.1
|
2031 |
+
ppo_clip_value=0.2
|
2032 |
+
with_vtrace=False
|
2033 |
+
vtrace_rho=1.0
|
2034 |
+
vtrace_c=1.0
|
2035 |
+
optimizer=adam
|
2036 |
+
adam_eps=1e-06
|
2037 |
+
adam_beta1=0.9
|
2038 |
+
adam_beta2=0.999
|
2039 |
+
max_grad_norm=4.0
|
2040 |
+
learning_rate=0.0001
|
2041 |
+
lr_schedule=constant
|
2042 |
+
lr_schedule_kl_threshold=0.008
|
2043 |
+
lr_adaptive_min=1e-06
|
2044 |
+
lr_adaptive_max=0.01
|
2045 |
+
obs_subtract_mean=0.0
|
2046 |
+
obs_scale=255.0
|
2047 |
+
normalize_input=True
|
2048 |
+
normalize_input_keys=None
|
2049 |
+
decorrelate_experience_max_seconds=0
|
2050 |
+
decorrelate_envs_on_one_worker=True
|
2051 |
+
actor_worker_gpus=[]
|
2052 |
+
set_workers_cpu_affinity=True
|
2053 |
+
force_envs_single_thread=False
|
2054 |
+
default_niceness=0
|
2055 |
+
log_to_file=True
|
2056 |
+
experiment_summaries_interval=10
|
2057 |
+
flush_summaries_interval=30
|
2058 |
+
stats_avg=100
|
2059 |
+
summaries_use_frameskip=True
|
2060 |
+
heartbeat_interval=20
|
2061 |
+
heartbeat_reporting_interval=600
|
2062 |
+
train_for_env_steps=4505000
|
2063 |
+
train_for_seconds=10000000000
|
2064 |
+
save_every_sec=120
|
2065 |
+
keep_checkpoints=2
|
2066 |
+
load_checkpoint_kind=latest
|
2067 |
+
save_milestones_sec=-1
|
2068 |
+
save_best_every_sec=5
|
2069 |
+
save_best_metric=reward
|
2070 |
+
save_best_after=100000
|
2071 |
+
benchmark=False
|
2072 |
+
encoder_mlp_layers=[512, 512]
|
2073 |
+
encoder_conv_architecture=convnet_simple
|
2074 |
+
encoder_conv_mlp_layers=[512]
|
2075 |
+
use_rnn=True
|
2076 |
+
rnn_size=512
|
2077 |
+
rnn_type=gru
|
2078 |
+
rnn_num_layers=1
|
2079 |
+
decoder_mlp_layers=[]
|
2080 |
+
nonlinearity=elu
|
2081 |
+
policy_initialization=orthogonal
|
2082 |
+
policy_init_gain=1.0
|
2083 |
+
actor_critic_share_weights=True
|
2084 |
+
adaptive_stddev=True
|
2085 |
+
continuous_tanh_scale=0.0
|
2086 |
+
initial_stddev=1.0
|
2087 |
+
use_env_info_cache=False
|
2088 |
+
env_gpu_actions=False
|
2089 |
+
env_gpu_observations=True
|
2090 |
+
env_frameskip=4
|
2091 |
+
env_framestack=1
|
2092 |
+
pixel_format=CHW
|
2093 |
+
use_record_episode_statistics=False
|
2094 |
+
with_wandb=False
|
2095 |
+
wandb_user=None
|
2096 |
+
wandb_project=sample_factory
|
2097 |
+
wandb_group=None
|
2098 |
+
wandb_job_type=SF
|
2099 |
+
wandb_tags=[]
|
2100 |
+
with_pbt=False
|
2101 |
+
pbt_mix_policies_in_one_env=True
|
2102 |
+
pbt_period_env_steps=5000000
|
2103 |
+
pbt_start_mutation=20000000
|
2104 |
+
pbt_replace_fraction=0.3
|
2105 |
+
pbt_mutation_rate=0.15
|
2106 |
+
pbt_replace_reward_gap=0.1
|
2107 |
+
pbt_replace_reward_gap_absolute=1e-06
|
2108 |
+
pbt_optimize_gamma=False
|
2109 |
+
pbt_target_objective=true_objective
|
2110 |
+
pbt_perturb_min=1.1
|
2111 |
+
pbt_perturb_max=1.5
|
2112 |
+
num_agents=-1
|
2113 |
+
num_humans=0
|
2114 |
+
num_bots=-1
|
2115 |
+
start_bot_difficulty=None
|
2116 |
+
timelimit=None
|
2117 |
+
res_w=128
|
2118 |
+
res_h=72
|
2119 |
+
wide_aspect_ratio=False
|
2120 |
+
eval_env_frameskip=1
|
2121 |
+
fps=35
|
2122 |
+
command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000
|
2123 |
+
cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000}
|
2124 |
+
git_hash=unknown
|
2125 |
+
git_repo_name=not a git repository
|
2126 |
+
[2024-07-27 17:39:32,521][00473] Saving configuration to /content/train_dir/default_experiment/config.json...
|
2127 |
+
[2024-07-27 17:39:32,525][00473] Rollout worker 0 uses device cpu
|
2128 |
+
[2024-07-27 17:39:32,526][00473] Rollout worker 1 uses device cpu
|
2129 |
+
[2024-07-27 17:39:32,528][00473] Rollout worker 2 uses device cpu
|
2130 |
+
[2024-07-27 17:39:32,530][00473] Rollout worker 3 uses device cpu
|
2131 |
+
[2024-07-27 17:39:32,531][00473] Rollout worker 4 uses device cpu
|
2132 |
+
[2024-07-27 17:39:32,532][00473] Rollout worker 5 uses device cpu
|
2133 |
+
[2024-07-27 17:39:32,534][00473] Rollout worker 6 uses device cpu
|
2134 |
+
[2024-07-27 17:39:32,535][00473] Rollout worker 7 uses device cpu
|
2135 |
+
[2024-07-27 17:39:32,635][00473] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
2136 |
+
[2024-07-27 17:39:32,637][00473] InferenceWorker_p0-w0: min num requests: 2
|
2137 |
+
[2024-07-27 17:39:32,671][00473] Starting all processes...
|
2138 |
+
[2024-07-27 17:39:32,674][00473] Starting process learner_proc0
|
2139 |
+
[2024-07-27 17:39:32,721][00473] Starting all processes...
|
2140 |
+
[2024-07-27 17:39:32,729][00473] Starting process inference_proc0-0
|
2141 |
+
[2024-07-27 17:39:32,730][00473] Starting process rollout_proc0
|
2142 |
+
[2024-07-27 17:39:32,730][00473] Starting process rollout_proc1
|
2143 |
+
[2024-07-27 17:39:32,730][00473] Starting process rollout_proc2
|
2144 |
+
[2024-07-27 17:39:32,730][00473] Starting process rollout_proc3
|
2145 |
+
[2024-07-27 17:39:32,732][00473] Starting process rollout_proc4
|
2146 |
+
[2024-07-27 17:39:32,739][00473] Starting process rollout_proc5
|
2147 |
+
[2024-07-27 17:39:32,739][00473] Starting process rollout_proc6
|
2148 |
+
[2024-07-27 17:39:32,739][00473] Starting process rollout_proc7
|
2149 |
+
[2024-07-27 17:39:48,066][18884] Worker 6 uses CPU cores [0]
|
2150 |
+
[2024-07-27 17:39:48,151][18863] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
2151 |
+
[2024-07-27 17:39:48,154][18863] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
2152 |
+
[2024-07-27 17:39:48,204][18881] Worker 4 uses CPU cores [0]
|
2153 |
+
[2024-07-27 17:39:48,214][18863] Num visible devices: 1
|
2154 |
+
[2024-07-27 17:39:48,243][18863] Starting seed is not provided
|
2155 |
+
[2024-07-27 17:39:48,245][18863] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
2156 |
+
[2024-07-27 17:39:48,245][18863] Initializing actor-critic model on device cuda:0
|
2157 |
+
[2024-07-27 17:39:48,246][18863] RunningMeanStd input shape: (3, 72, 128)
|
2158 |
+
[2024-07-27 17:39:48,248][18863] RunningMeanStd input shape: (1,)
|
2159 |
+
[2024-07-27 17:39:48,261][18883] Worker 7 uses CPU cores [1]
|
2160 |
+
[2024-07-27 17:39:48,285][18863] ConvEncoder: input_channels=3
|
2161 |
+
[2024-07-27 17:39:48,441][18876] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
2162 |
+
[2024-07-27 17:39:48,445][18876] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
2163 |
+
[2024-07-27 17:39:48,482][18879] Worker 2 uses CPU cores [0]
|
2164 |
+
[2024-07-27 17:39:48,495][18877] Worker 1 uses CPU cores [1]
|
2165 |
+
[2024-07-27 17:39:48,510][18876] Num visible devices: 1
|
2166 |
+
[2024-07-27 17:39:48,577][18882] Worker 5 uses CPU cores [1]
|
2167 |
+
[2024-07-27 17:39:48,630][18880] Worker 3 uses CPU cores [1]
|
2168 |
+
[2024-07-27 17:39:48,639][18878] Worker 0 uses CPU cores [0]
|
2169 |
+
[2024-07-27 17:39:48,678][18863] Conv encoder output size: 512
|
2170 |
+
[2024-07-27 17:39:48,679][18863] Policy head output size: 512
|
2171 |
+
[2024-07-27 17:39:48,694][18863] Created Actor Critic model with architecture:
|
2172 |
+
[2024-07-27 17:39:48,694][18863] ActorCriticSharedWeights(
|
2173 |
+
(obs_normalizer): ObservationNormalizer(
|
2174 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
2175 |
+
(running_mean_std): ModuleDict(
|
2176 |
+
(obs): RunningMeanStdInPlace()
|
2177 |
+
)
|
2178 |
+
)
|
2179 |
+
)
|
2180 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
2181 |
+
(encoder): VizdoomEncoder(
|
2182 |
+
(basic_encoder): ConvEncoder(
|
2183 |
+
(enc): RecursiveScriptModule(
|
2184 |
+
original_name=ConvEncoderImpl
|
2185 |
+
(conv_head): RecursiveScriptModule(
|
2186 |
+
original_name=Sequential
|
2187 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
2188 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
2189 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
2190 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
2191 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
2192 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
2193 |
+
)
|
2194 |
+
(mlp_layers): RecursiveScriptModule(
|
2195 |
+
original_name=Sequential
|
2196 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
2197 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
2198 |
+
)
|
2199 |
+
)
|
2200 |
+
)
|
2201 |
+
)
|
2202 |
+
(core): ModelCoreRNN(
|
2203 |
+
(core): GRU(512, 512)
|
2204 |
+
)
|
2205 |
+
(decoder): MlpDecoder(
|
2206 |
+
(mlp): Identity()
|
2207 |
+
)
|
2208 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
2209 |
+
(action_parameterization): ActionParameterizationDefault(
|
2210 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
2211 |
+
)
|
2212 |
+
)
|
2213 |
+
[2024-07-27 17:39:48,850][18863] Using optimizer <class 'torch.optim.adam.Adam'>
|
2214 |
+
[2024-07-27 17:39:49,619][18863] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000980_4014080.pth...
|
2215 |
+
[2024-07-27 17:39:49,654][18863] Loading model from checkpoint
|
2216 |
+
[2024-07-27 17:39:49,656][18863] Loaded experiment state at self.train_step=980, self.env_steps=4014080
|
2217 |
+
[2024-07-27 17:39:49,657][18863] Initialized policy 0 weights for model version 980
|
2218 |
+
[2024-07-27 17:39:49,659][18863] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
2219 |
+
[2024-07-27 17:39:49,666][18863] LearnerWorker_p0 finished initialization!
|
2220 |
+
[2024-07-27 17:39:49,767][18876] RunningMeanStd input shape: (3, 72, 128)
|
2221 |
+
[2024-07-27 17:39:49,768][18876] RunningMeanStd input shape: (1,)
|
2222 |
+
[2024-07-27 17:39:49,780][18876] ConvEncoder: input_channels=3
|
2223 |
+
[2024-07-27 17:39:49,886][18876] Conv encoder output size: 512
|
2224 |
+
[2024-07-27 17:39:49,887][18876] Policy head output size: 512
|
2225 |
+
[2024-07-27 17:39:49,942][00473] Inference worker 0-0 is ready!
|
2226 |
+
[2024-07-27 17:39:49,944][00473] All inference workers are ready! Signal rollout workers to start!
|
2227 |
+
[2024-07-27 17:39:50,170][18880] Doom resolution: 160x120, resize resolution: (128, 72)
|
2228 |
+
[2024-07-27 17:39:50,173][18877] Doom resolution: 160x120, resize resolution: (128, 72)
|
2229 |
+
[2024-07-27 17:39:50,174][18883] Doom resolution: 160x120, resize resolution: (128, 72)
|
2230 |
+
[2024-07-27 17:39:50,169][18882] Doom resolution: 160x120, resize resolution: (128, 72)
|
2231 |
+
[2024-07-27 17:39:50,181][18881] Doom resolution: 160x120, resize resolution: (128, 72)
|
2232 |
+
[2024-07-27 17:39:50,194][18879] Doom resolution: 160x120, resize resolution: (128, 72)
|
2233 |
+
[2024-07-27 17:39:50,195][18878] Doom resolution: 160x120, resize resolution: (128, 72)
|
2234 |
+
[2024-07-27 17:39:50,192][18884] Doom resolution: 160x120, resize resolution: (128, 72)
|
2235 |
+
[2024-07-27 17:39:50,403][00473] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4014080. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2236 |
+
[2024-07-27 17:39:50,826][18878] Decorrelating experience for 0 frames...
|
2237 |
+
[2024-07-27 17:39:51,217][18878] Decorrelating experience for 32 frames...
|
2238 |
+
[2024-07-27 17:39:51,585][18883] Decorrelating experience for 0 frames...
|
2239 |
+
[2024-07-27 17:39:51,588][18877] Decorrelating experience for 0 frames...
|
2240 |
+
[2024-07-27 17:39:51,590][18880] Decorrelating experience for 0 frames...
|
2241 |
+
[2024-07-27 17:39:52,097][18878] Decorrelating experience for 64 frames...
|
2242 |
+
[2024-07-27 17:39:52,623][18881] Decorrelating experience for 0 frames...
|
2243 |
+
[2024-07-27 17:39:52,626][18884] Decorrelating experience for 0 frames...
|
2244 |
+
[2024-07-27 17:39:52,628][00473] Heartbeat connected on Batcher_0
|
2245 |
+
[2024-07-27 17:39:52,638][00473] Heartbeat connected on LearnerWorker_p0
|
2246 |
+
[2024-07-27 17:39:52,674][00473] Heartbeat connected on InferenceWorker_p0-w0
|
2247 |
+
[2024-07-27 17:39:53,133][18880] Decorrelating experience for 32 frames...
|
2248 |
+
[2024-07-27 17:39:53,135][18877] Decorrelating experience for 32 frames...
|
2249 |
+
[2024-07-27 17:39:53,137][18883] Decorrelating experience for 32 frames...
|
2250 |
+
[2024-07-27 17:39:53,198][18882] Decorrelating experience for 0 frames...
|
2251 |
+
[2024-07-27 17:39:54,076][18884] Decorrelating experience for 32 frames...
|
2252 |
+
[2024-07-27 17:39:54,079][18881] Decorrelating experience for 32 frames...
|
2253 |
+
[2024-07-27 17:39:54,081][18879] Decorrelating experience for 0 frames...
|
2254 |
+
[2024-07-27 17:39:54,288][18882] Decorrelating experience for 32 frames...
|
2255 |
+
[2024-07-27 17:39:54,934][18883] Decorrelating experience for 64 frames...
|
2256 |
+
[2024-07-27 17:39:54,971][18878] Decorrelating experience for 96 frames...
|
2257 |
+
[2024-07-27 17:39:55,345][00473] Heartbeat connected on RolloutWorker_w0
|
2258 |
+
[2024-07-27 17:39:55,406][00473] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4014080. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2259 |
+
[2024-07-27 17:39:56,651][18880] Decorrelating experience for 64 frames...
|
2260 |
+
[2024-07-27 17:39:56,744][18879] Decorrelating experience for 32 frames...
|
2261 |
+
[2024-07-27 17:39:57,140][18877] Decorrelating experience for 64 frames...
|
2262 |
+
[2024-07-27 17:39:57,686][18882] Decorrelating experience for 64 frames...
|
2263 |
+
[2024-07-27 17:39:57,938][18883] Decorrelating experience for 96 frames...
|
2264 |
+
[2024-07-27 17:39:58,470][00473] Heartbeat connected on RolloutWorker_w7
|
2265 |
+
[2024-07-27 17:39:59,430][18881] Decorrelating experience for 64 frames...
|
2266 |
+
[2024-07-27 17:39:59,624][18880] Decorrelating experience for 96 frames...
|
2267 |
+
[2024-07-27 17:39:59,864][18884] Decorrelating experience for 64 frames...
|
2268 |
+
[2024-07-27 17:40:00,058][00473] Heartbeat connected on RolloutWorker_w3
|
2269 |
+
[2024-07-27 17:40:00,403][00473] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4014080. Throughput: 0: 42.0. Samples: 420. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2270 |
+
[2024-07-27 17:40:00,407][00473] Avg episode reward: [(0, '2.560')]
|
2271 |
+
[2024-07-27 17:40:00,645][18877] Decorrelating experience for 96 frames...
|
2272 |
+
[2024-07-27 17:40:00,943][18882] Decorrelating experience for 96 frames...
|
2273 |
+
[2024-07-27 17:40:01,245][00473] Heartbeat connected on RolloutWorker_w1
|
2274 |
+
[2024-07-27 17:40:01,957][00473] Heartbeat connected on RolloutWorker_w5
|
2275 |
+
[2024-07-27 17:40:02,674][18879] Decorrelating experience for 64 frames...
|
2276 |
+
[2024-07-27 17:40:03,967][18884] Decorrelating experience for 96 frames...
|
2277 |
+
[2024-07-27 17:40:04,683][00473] Heartbeat connected on RolloutWorker_w6
|
2278 |
+
[2024-07-27 17:40:05,052][18863] Signal inference workers to stop experience collection...
|
2279 |
+
[2024-07-27 17:40:05,066][18876] InferenceWorker_p0-w0: stopping experience collection
|
2280 |
+
[2024-07-27 17:40:05,286][18881] Decorrelating experience for 96 frames...
|
2281 |
+
[2024-07-27 17:40:05,389][18879] Decorrelating experience for 96 frames...
|
2282 |
+
[2024-07-27 17:40:05,403][00473] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4014080. Throughput: 0: 160.1. Samples: 2402. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2283 |
+
[2024-07-27 17:40:05,407][00473] Avg episode reward: [(0, '5.740')]
|
2284 |
+
[2024-07-27 17:40:05,455][00473] Heartbeat connected on RolloutWorker_w4
|
2285 |
+
[2024-07-27 17:40:05,545][00473] Heartbeat connected on RolloutWorker_w2
|
2286 |
+
[2024-07-27 17:40:06,486][18863] Signal inference workers to resume experience collection...
|
2287 |
+
[2024-07-27 17:40:06,488][18876] InferenceWorker_p0-w0: resuming experience collection
|
2288 |
+
[2024-07-27 17:40:10,403][00473] Fps is (10 sec: 2048.1, 60 sec: 1024.0, 300 sec: 1024.0). Total num frames: 4034560. Throughput: 0: 171.6. Samples: 3432. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
2289 |
+
[2024-07-27 17:40:10,408][00473] Avg episode reward: [(0, '8.034')]
|
2290 |
+
[2024-07-27 17:40:15,403][00473] Fps is (10 sec: 3686.4, 60 sec: 1474.6, 300 sec: 1474.6). Total num frames: 4050944. Throughput: 0: 367.2. Samples: 9180. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
2291 |
+
[2024-07-27 17:40:15,406][00473] Avg episode reward: [(0, '10.297')]
|
2292 |
+
[2024-07-27 17:40:16,092][18876] Updated weights for policy 0, policy_version 990 (0.0219)
|
2293 |
+
[2024-07-27 17:40:20,403][00473] Fps is (10 sec: 2867.2, 60 sec: 1638.4, 300 sec: 1638.4). Total num frames: 4063232. Throughput: 0: 433.3. Samples: 13000. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0)
|
2294 |
+
[2024-07-27 17:40:20,410][00473] Avg episode reward: [(0, '12.401')]
|
2295 |
+
[2024-07-27 17:40:25,403][00473] Fps is (10 sec: 3276.8, 60 sec: 1989.5, 300 sec: 1989.5). Total num frames: 4083712. Throughput: 0: 452.9. Samples: 15850. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
2296 |
+
[2024-07-27 17:40:25,406][00473] Avg episode reward: [(0, '14.255')]
|
2297 |
+
[2024-07-27 17:40:28,173][18876] Updated weights for policy 0, policy_version 1000 (0.0015)
|
2298 |
+
[2024-07-27 17:40:30,403][00473] Fps is (10 sec: 4096.0, 60 sec: 2252.8, 300 sec: 2252.8). Total num frames: 4104192. Throughput: 0: 544.3. Samples: 21770. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
2299 |
+
[2024-07-27 17:40:30,407][00473] Avg episode reward: [(0, '15.281')]
|
2300 |
+
[2024-07-27 17:40:35,408][00473] Fps is (10 sec: 3275.3, 60 sec: 2275.3, 300 sec: 2275.3). Total num frames: 4116480. Throughput: 0: 583.3. Samples: 26252. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
2301 |
+
[2024-07-27 17:40:35,410][00473] Avg episode reward: [(0, '16.210')]
|
2302 |
+
[2024-07-27 17:40:40,403][00473] Fps is (10 sec: 2867.1, 60 sec: 2375.7, 300 sec: 2375.7). Total num frames: 4132864. Throughput: 0: 627.2. Samples: 28222. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
2303 |
+
[2024-07-27 17:40:40,409][00473] Avg episode reward: [(0, '17.626')]
|
2304 |
+
[2024-07-27 17:40:40,827][18876] Updated weights for policy 0, policy_version 1010 (0.0024)
|
2305 |
+
[2024-07-27 17:40:45,403][00473] Fps is (10 sec: 3688.1, 60 sec: 2532.1, 300 sec: 2532.1). Total num frames: 4153344. Throughput: 0: 751.2. Samples: 34222. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
|
2306 |
+
[2024-07-27 17:40:45,408][00473] Avg episode reward: [(0, '18.829')]
|
2307 |
+
[2024-07-27 17:40:45,412][18863] Saving new best policy, reward=18.829!
|
2308 |
+
[2024-07-27 17:40:50,404][00473] Fps is (10 sec: 3686.3, 60 sec: 2594.1, 300 sec: 2594.1). Total num frames: 4169728. Throughput: 0: 824.3. Samples: 39496. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0)
|
2309 |
+
[2024-07-27 17:40:50,408][00473] Avg episode reward: [(0, '18.931')]
|
2310 |
+
[2024-07-27 17:40:50,426][18863] Saving new best policy, reward=18.931!
|
2311 |
+
[2024-07-27 17:40:53,148][18876] Updated weights for policy 0, policy_version 1020 (0.0033)
|
2312 |
+
[2024-07-27 17:40:55,403][00473] Fps is (10 sec: 2867.2, 60 sec: 2799.1, 300 sec: 2583.6). Total num frames: 4182016. Throughput: 0: 840.8. Samples: 41266. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
2313 |
+
[2024-07-27 17:40:55,405][00473] Avg episode reward: [(0, '18.576')]
|
2314 |
+
[2024-07-27 17:41:00,403][00473] Fps is (10 sec: 3277.0, 60 sec: 3140.3, 300 sec: 2691.7). Total num frames: 4202496. Throughput: 0: 822.0. Samples: 46170. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
2315 |
+
[2024-07-27 17:41:00,405][00473] Avg episode reward: [(0, '17.721')]
|
2316 |
+
[2024-07-27 17:41:04,256][18876] Updated weights for policy 0, policy_version 1030 (0.0017)
|
2317 |
+
[2024-07-27 17:41:05,403][00473] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 2785.3). Total num frames: 4222976. Throughput: 0: 876.9. Samples: 52462. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
2318 |
+
[2024-07-27 17:41:05,406][00473] Avg episode reward: [(0, '17.163')]
|
2319 |
+
[2024-07-27 17:41:10,409][00473] Fps is (10 sec: 3274.9, 60 sec: 3344.8, 300 sec: 2764.6). Total num frames: 4235264. Throughput: 0: 861.1. Samples: 54606. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
2320 |
+
[2024-07-27 17:41:10,416][00473] Avg episode reward: [(0, '17.196')]
|
2321 |
+
[2024-07-27 17:41:15,403][00473] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 2794.9). Total num frames: 4251648. Throughput: 0: 821.3. Samples: 58728. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
2322 |
+
[2024-07-27 17:41:15,405][00473] Avg episode reward: [(0, '17.044')]
|
2323 |
+
[2024-07-27 17:41:17,167][18876] Updated weights for policy 0, policy_version 1040 (0.0015)
|
2324 |
+
[2024-07-27 17:41:20,403][00473] Fps is (10 sec: 3688.4, 60 sec: 3481.6, 300 sec: 2867.2). Total num frames: 4272128. Throughput: 0: 857.2. Samples: 64820. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
2325 |
+
[2024-07-27 17:41:20,405][00473] Avg episode reward: [(0, '17.607')]
|
2326 |
+
[2024-07-27 17:41:25,403][00473] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 2888.8). Total num frames: 4288512. Throughput: 0: 883.3. Samples: 67970. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
2327 |
+
[2024-07-27 17:41:25,407][00473] Avg episode reward: [(0, '18.281')]
|
2328 |
+
[2024-07-27 17:41:29,513][18876] Updated weights for policy 0, policy_version 1050 (0.0016)
|
2329 |
+
[2024-07-27 17:41:30,403][00473] Fps is (10 sec: 2867.3, 60 sec: 3276.8, 300 sec: 2867.2). Total num frames: 4300800. Throughput: 0: 834.9. Samples: 71794. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
2330 |
+
[2024-07-27 17:41:30,410][00473] Avg episode reward: [(0, '18.080')]
|
2331 |
+
[2024-07-27 17:41:30,425][18863] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001050_4300800.pth...
|
2332 |
+
[2024-07-27 17:41:30,599][18863] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth
|
2333 |
+
[2024-07-27 17:41:35,403][00473] Fps is (10 sec: 3276.8, 60 sec: 3413.6, 300 sec: 2925.7). Total num frames: 4321280. Throughput: 0: 840.5. Samples: 77316. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
2334 |
+
[2024-07-27 17:41:35,411][00473] Avg episode reward: [(0, '20.107')]
|
2335 |
+
[2024-07-27 17:41:35,413][18863] Saving new best policy, reward=20.107!
|
2336 |
+
[2024-07-27 17:41:40,160][18876] Updated weights for policy 0, policy_version 1060 (0.0026)
|
2337 |
+
[2024-07-27 17:41:40,403][00473] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 2978.9). Total num frames: 4341760. Throughput: 0: 867.3. Samples: 80296. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
2338 |
+
[2024-07-27 17:41:40,409][00473] Avg episode reward: [(0, '19.189')]
|
2339 |
+
[2024-07-27 17:41:45,404][00473] Fps is (10 sec: 3276.6, 60 sec: 3345.0, 300 sec: 2956.2). Total num frames: 4354048. Throughput: 0: 865.1. Samples: 85102. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
2340 |
+
[2024-07-27 17:41:45,406][00473] Avg episode reward: [(0, '18.620')]
|
2341 |
+
[2024-07-27 17:41:50,403][00473] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 2969.6). Total num frames: 4370432. Throughput: 0: 822.8. Samples: 89488. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
2342 |
+
[2024-07-27 17:41:50,411][00473] Avg episode reward: [(0, '18.749')]
|
2343 |
+
[2024-07-27 17:41:53,106][18876] Updated weights for policy 0, policy_version 1070 (0.0021)
|
2344 |
+
[2024-07-27 17:41:55,403][00473] Fps is (10 sec: 3686.5, 60 sec: 3481.6, 300 sec: 3014.6). Total num frames: 4390912. Throughput: 0: 844.9. Samples: 92620. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
2345 |
+
[2024-07-27 17:41:55,413][00473] Avg episode reward: [(0, '17.540')]
|
2346 |
+
[2024-07-27 17:42:00,404][00473] Fps is (10 sec: 3686.0, 60 sec: 3413.3, 300 sec: 3024.7). Total num frames: 4407296. Throughput: 0: 887.8. Samples: 98678. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
2347 |
+
[2024-07-27 17:42:00,408][00473] Avg episode reward: [(0, '16.795')]
|
2348 |
+
[2024-07-27 17:42:05,403][00473] Fps is (10 sec: 2867.3, 60 sec: 3276.8, 300 sec: 3003.7). Total num frames: 4419584. Throughput: 0: 836.8. Samples: 102474. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
2349 |
+
[2024-07-27 17:42:05,405][00473] Avg episode reward: [(0, '17.038')]
|
2350 |
+
[2024-07-27 17:42:05,599][18876] Updated weights for policy 0, policy_version 1080 (0.0013)
|
2351 |
+
[2024-07-27 17:42:10,403][00473] Fps is (10 sec: 3277.1, 60 sec: 3413.6, 300 sec: 3042.7). Total num frames: 4440064. Throughput: 0: 826.2. Samples: 105150. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
2352 |
+
[2024-07-27 17:42:10,406][00473] Avg episode reward: [(0, '19.360')]
|
2353 |
+
[2024-07-27 17:42:15,403][00473] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3079.1). Total num frames: 4460544. Throughput: 0: 868.5. Samples: 110876. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
2354 |
+
[2024-07-27 17:42:15,410][00473] Avg episode reward: [(0, '19.294')]
|
2355 |
+
[2024-07-27 17:42:16,213][18876] Updated weights for policy 0, policy_version 1090 (0.0024)
|
2356 |
+
[2024-07-27 17:42:20,403][00473] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3058.3). Total num frames: 4472832. Throughput: 0: 848.5. Samples: 115500. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
|
2357 |
+
[2024-07-27 17:42:20,411][00473] Avg episode reward: [(0, '18.425')]
|
2358 |
+
[2024-07-27 17:42:25,404][00473] Fps is (10 sec: 2457.3, 60 sec: 3276.7, 300 sec: 3038.9). Total num frames: 4485120. Throughput: 0: 823.6. Samples: 117358. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
2359 |
+
[2024-07-27 17:42:25,406][00473] Avg episode reward: [(0, '18.760')]
|
2360 |
+
[2024-07-27 17:42:29,257][18876] Updated weights for policy 0, policy_version 1100 (0.0030)
|
2361 |
+
[2024-07-27 17:42:30,403][00473] Fps is (10 sec: 3276.6, 60 sec: 3413.3, 300 sec: 3072.0). Total num frames: 4505600. Throughput: 0: 845.9. Samples: 123168. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
2362 |
+
[2024-07-27 17:42:30,407][00473] Avg episode reward: [(0, '18.340')]
|
2363 |
+
[2024-07-27 17:42:30,534][18863] Stopping Batcher_0...
|
2364 |
+
[2024-07-27 17:42:30,535][18863] Loop batcher_evt_loop terminating...
|
2365 |
+
[2024-07-27 17:42:30,534][00473] Component Batcher_0 stopped!
|
2366 |
+
[2024-07-27 17:42:30,539][18863] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001101_4509696.pth...
|
2367 |
+
[2024-07-27 17:42:30,601][00473] Component RolloutWorker_w3 stopped!
|
2368 |
+
[2024-07-27 17:42:30,603][18880] Stopping RolloutWorker_w3...
|
2369 |
+
[2024-07-27 17:42:30,612][18881] Stopping RolloutWorker_w4...
|
2370 |
+
[2024-07-27 17:42:30,612][00473] Component RolloutWorker_w4 stopped!
|
2371 |
+
[2024-07-27 17:42:30,609][18880] Loop rollout_proc3_evt_loop terminating...
|
2372 |
+
[2024-07-27 17:42:30,620][18881] Loop rollout_proc4_evt_loop terminating...
|
2373 |
+
[2024-07-27 17:42:30,624][00473] Component RolloutWorker_w1 stopped!
|
2374 |
+
[2024-07-27 17:42:30,627][18877] Stopping RolloutWorker_w1...
|
2375 |
+
[2024-07-27 17:42:30,656][18877] Loop rollout_proc1_evt_loop terminating...
|
2376 |
+
[2024-07-27 17:42:30,659][18884] Stopping RolloutWorker_w6...
|
2377 |
+
[2024-07-27 17:42:30,660][18884] Loop rollout_proc6_evt_loop terminating...
|
2378 |
+
[2024-07-27 17:42:30,662][00473] Component RolloutWorker_w6 stopped!
|
2379 |
+
[2024-07-27 17:42:30,641][18876] Weights refcount: 2 0
|
2380 |
+
[2024-07-27 17:42:30,681][00473] Component InferenceWorker_p0-w0 stopped!
|
2381 |
+
[2024-07-27 17:42:30,686][18876] Stopping InferenceWorker_p0-w0...
|
2382 |
+
[2024-07-27 17:42:30,689][18876] Loop inference_proc0-0_evt_loop terminating...
|
2383 |
+
[2024-07-27 17:42:30,692][00473] Component RolloutWorker_w7 stopped!
|
2384 |
+
[2024-07-27 17:42:30,695][18883] Stopping RolloutWorker_w7...
|
2385 |
+
[2024-07-27 17:42:30,696][18883] Loop rollout_proc7_evt_loop terminating...
|
2386 |
+
[2024-07-27 17:42:30,706][00473] Component RolloutWorker_w5 stopped!
|
2387 |
+
[2024-07-27 17:42:30,711][18882] Stopping RolloutWorker_w5...
|
2388 |
+
[2024-07-27 17:42:30,718][18878] Stopping RolloutWorker_w0...
|
2389 |
+
[2024-07-27 17:42:30,719][18878] Loop rollout_proc0_evt_loop terminating...
|
2390 |
+
[2024-07-27 17:42:30,722][18879] Stopping RolloutWorker_w2...
|
2391 |
+
[2024-07-27 17:42:30,718][00473] Component RolloutWorker_w0 stopped!
|
2392 |
+
[2024-07-27 17:42:30,725][18879] Loop rollout_proc2_evt_loop terminating...
|
2393 |
+
[2024-07-27 17:42:30,725][00473] Component RolloutWorker_w2 stopped!
|
2394 |
+
[2024-07-27 17:42:30,712][18882] Loop rollout_proc5_evt_loop terminating...
|
2395 |
+
[2024-07-27 17:42:30,754][18863] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000980_4014080.pth
|
2396 |
+
[2024-07-27 17:42:30,768][18863] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001101_4509696.pth...
|
2397 |
+
[2024-07-27 17:42:31,000][00473] Component LearnerWorker_p0 stopped!
|
2398 |
+
[2024-07-27 17:42:31,001][00473] Waiting for process learner_proc0 to stop...
|
2399 |
+
[2024-07-27 17:42:30,999][18863] Stopping LearnerWorker_p0...
|
2400 |
+
[2024-07-27 17:42:31,018][18863] Loop learner_proc0_evt_loop terminating...
|
2401 |
+
[2024-07-27 17:42:32,425][00473] Waiting for process inference_proc0-0 to join...
|
2402 |
+
[2024-07-27 17:42:32,428][00473] Waiting for process rollout_proc0 to join...
|
2403 |
+
[2024-07-27 17:42:34,518][00473] Waiting for process rollout_proc1 to join...
|
2404 |
+
[2024-07-27 17:42:34,558][00473] Waiting for process rollout_proc2 to join...
|
2405 |
+
[2024-07-27 17:42:34,561][00473] Waiting for process rollout_proc3 to join...
|
2406 |
+
[2024-07-27 17:42:34,564][00473] Waiting for process rollout_proc4 to join...
|
2407 |
+
[2024-07-27 17:42:34,572][00473] Waiting for process rollout_proc5 to join...
|
2408 |
+
[2024-07-27 17:42:34,575][00473] Waiting for process rollout_proc6 to join...
|
2409 |
+
[2024-07-27 17:42:34,583][00473] Waiting for process rollout_proc7 to join...
|
2410 |
+
[2024-07-27 17:42:34,589][00473] Batcher 0 profile tree view:
|
2411 |
+
batching: 3.2638, releasing_batches: 0.0064
|
2412 |
+
[2024-07-27 17:42:34,593][00473] InferenceWorker_p0-w0 profile tree view:
|
2413 |
+
wait_policy: 0.0001
|
2414 |
+
wait_policy_total: 67.6765
|
2415 |
+
update_model: 1.3268
|
2416 |
+
weight_update: 0.0039
|
2417 |
+
one_step: 0.0049
|
2418 |
+
handle_policy_step: 84.2539
|
2419 |
+
deserialize: 2.1267, stack: 0.4427, obs_to_device_normalize: 17.0462, forward: 45.6140, send_messages: 3.9128
|
2420 |
+
prepare_outputs: 11.0594
|
2421 |
+
to_cpu: 6.4650
|
2422 |
+
[2024-07-27 17:42:34,595][00473] Learner 0 profile tree view:
|
2423 |
+
misc: 0.0007, prepare_batch: 4.2685
|
2424 |
+
train: 12.2778
|
2425 |
+
epoch_init: 0.0007, minibatch_init: 0.0009, losses_postprocess: 0.0813, kl_divergence: 0.1217, after_optimizer: 0.5455
|
2426 |
+
calculate_losses: 5.0106
|
2427 |
+
losses_init: 0.0005, forward_head: 0.5302, bptt_initial: 3.3199, tail: 0.3049, advantages_returns: 0.0295, losses: 0.5035
|
2428 |
+
bptt: 0.2665
|
2429 |
+
bptt_forward_core: 0.2500
|
2430 |
+
update: 6.4516
|
2431 |
+
clip: 0.1431
|
2432 |
+
[2024-07-27 17:42:34,597][00473] RolloutWorker_w0 profile tree view:
|
2433 |
+
wait_for_trajectories: 0.0341, enqueue_policy_requests: 17.7626, env_step: 117.1943, overhead: 2.2659, complete_rollouts: 1.0716
|
2434 |
+
save_policy_outputs: 2.8331
|
2435 |
+
split_output_tensors: 1.1090
|
2436 |
+
[2024-07-27 17:42:34,599][00473] RolloutWorker_w7 profile tree view:
|
2437 |
+
wait_for_trajectories: 0.0488, enqueue_policy_requests: 17.4188, env_step: 116.0178, overhead: 2.1658, complete_rollouts: 0.9147
|
2438 |
+
save_policy_outputs: 3.1246
|
2439 |
+
split_output_tensors: 1.1803
|
2440 |
+
[2024-07-27 17:42:34,601][00473] Loop Runner_EvtLoop terminating...
|
2441 |
+
[2024-07-27 17:42:34,603][00473] Runner profile tree view:
|
2442 |
+
main_loop: 181.9323
|
2443 |
+
[2024-07-27 17:42:34,609][00473] Collected {0: 4509696}, FPS: 2724.2
|
2444 |
+
[2024-07-27 17:42:34,644][00473] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
2445 |
+
[2024-07-27 17:42:34,647][00473] Overriding arg 'num_workers' with value 1 passed from command line
|
2446 |
+
[2024-07-27 17:42:34,649][00473] Adding new argument 'no_render'=True that is not in the saved config file!
|
2447 |
+
[2024-07-27 17:42:34,650][00473] Adding new argument 'save_video'=True that is not in the saved config file!
|
2448 |
+
[2024-07-27 17:42:34,651][00473] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
2449 |
+
[2024-07-27 17:42:34,652][00473] Adding new argument 'video_name'=None that is not in the saved config file!
|
2450 |
+
[2024-07-27 17:42:34,653][00473] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
2451 |
+
[2024-07-27 17:42:34,654][00473] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
2452 |
+
[2024-07-27 17:42:34,655][00473] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
2453 |
+
[2024-07-27 17:42:34,656][00473] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
2454 |
+
[2024-07-27 17:42:34,658][00473] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
2455 |
+
[2024-07-27 17:42:34,659][00473] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
2456 |
+
[2024-07-27 17:42:34,660][00473] Adding new argument 'train_script'=None that is not in the saved config file!
|
2457 |
+
[2024-07-27 17:42:34,661][00473] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
2458 |
+
[2024-07-27 17:42:34,662][00473] Using frameskip 1 and render_action_repeat=4 for evaluation
|
2459 |
+
[2024-07-27 17:42:34,712][00473] RunningMeanStd input shape: (3, 72, 128)
|
2460 |
+
[2024-07-27 17:42:34,715][00473] RunningMeanStd input shape: (1,)
|
2461 |
+
[2024-07-27 17:42:34,738][00473] ConvEncoder: input_channels=3
|
2462 |
+
[2024-07-27 17:42:34,806][00473] Conv encoder output size: 512
|
2463 |
+
[2024-07-27 17:42:34,808][00473] Policy head output size: 512
|
2464 |
+
[2024-07-27 17:42:34,838][00473] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001101_4509696.pth...
|
2465 |
+
[2024-07-27 17:42:35,509][00473] Num frames 100...
|
2466 |
+
[2024-07-27 17:42:35,712][00473] Num frames 200...
|
2467 |
+
[2024-07-27 17:42:35,902][00473] Num frames 300...
|
2468 |
+
[2024-07-27 17:42:36,103][00473] Num frames 400...
|
2469 |
+
[2024-07-27 17:42:36,292][00473] Num frames 500...
|
2470 |
+
[2024-07-27 17:42:36,481][00473] Num frames 600...
|
2471 |
+
[2024-07-27 17:42:36,678][00473] Num frames 700...
|
2472 |
+
[2024-07-27 17:42:36,821][00473] Num frames 800...
|
2473 |
+
[2024-07-27 17:42:36,957][00473] Num frames 900...
|
2474 |
+
[2024-07-27 17:42:37,095][00473] Num frames 1000...
|
2475 |
+
[2024-07-27 17:42:37,222][00473] Num frames 1100...
|
2476 |
+
[2024-07-27 17:42:37,349][00473] Num frames 1200...
|
2477 |
+
[2024-07-27 17:42:37,474][00473] Num frames 1300...
|
2478 |
+
[2024-07-27 17:42:37,606][00473] Num frames 1400...
|
2479 |
+
[2024-07-27 17:42:37,740][00473] Num frames 1500...
|
2480 |
+
[2024-07-27 17:42:37,870][00473] Num frames 1600...
|
2481 |
+
[2024-07-27 17:42:37,999][00473] Num frames 1700...
|
2482 |
+
[2024-07-27 17:42:38,141][00473] Num frames 1800...
|
2483 |
+
[2024-07-27 17:42:38,274][00473] Avg episode rewards: #0: 51.599, true rewards: #0: 18.600
|
2484 |
+
[2024-07-27 17:42:38,276][00473] Avg episode reward: 51.599, avg true_objective: 18.600
|
2485 |
+
[2024-07-27 17:42:38,330][00473] Num frames 1900...
|
2486 |
+
[2024-07-27 17:42:38,463][00473] Num frames 2000...
|
2487 |
+
[2024-07-27 17:42:38,590][00473] Num frames 2100...
|
2488 |
+
[2024-07-27 17:42:38,732][00473] Num frames 2200...
|
2489 |
+
[2024-07-27 17:42:38,863][00473] Num frames 2300...
|
2490 |
+
[2024-07-27 17:42:38,992][00473] Num frames 2400...
|
2491 |
+
[2024-07-27 17:42:39,133][00473] Num frames 2500...
|
2492 |
+
[2024-07-27 17:42:39,260][00473] Num frames 2600...
|
2493 |
+
[2024-07-27 17:42:39,387][00473] Num frames 2700...
|
2494 |
+
[2024-07-27 17:42:39,520][00473] Num frames 2800...
|
2495 |
+
[2024-07-27 17:42:39,657][00473] Num frames 2900...
|
2496 |
+
[2024-07-27 17:42:39,798][00473] Num frames 3000...
|
2497 |
+
[2024-07-27 17:42:39,933][00473] Num frames 3100...
|
2498 |
+
[2024-07-27 17:42:40,001][00473] Avg episode rewards: #0: 38.540, true rewards: #0: 15.540
|
2499 |
+
[2024-07-27 17:42:40,002][00473] Avg episode reward: 38.540, avg true_objective: 15.540
|
2500 |
+
[2024-07-27 17:42:40,136][00473] Num frames 3200...
|
2501 |
+
[2024-07-27 17:42:40,269][00473] Num frames 3300...
|
2502 |
+
[2024-07-27 17:42:40,402][00473] Num frames 3400...
|
2503 |
+
[2024-07-27 17:42:40,538][00473] Num frames 3500...
|
2504 |
+
[2024-07-27 17:42:40,669][00473] Num frames 3600...
|
2505 |
+
[2024-07-27 17:42:40,811][00473] Num frames 3700...
|
2506 |
+
[2024-07-27 17:42:40,938][00473] Num frames 3800...
|
2507 |
+
[2024-07-27 17:42:41,071][00473] Num frames 3900...
|
2508 |
+
[2024-07-27 17:42:41,215][00473] Num frames 4000...
|
2509 |
+
[2024-07-27 17:42:41,344][00473] Num frames 4100...
|
2510 |
+
[2024-07-27 17:42:41,475][00473] Num frames 4200...
|
2511 |
+
[2024-07-27 17:42:41,651][00473] Avg episode rewards: #0: 33.306, true rewards: #0: 14.307
|
2512 |
+
[2024-07-27 17:42:41,652][00473] Avg episode reward: 33.306, avg true_objective: 14.307
|
2513 |
+
[2024-07-27 17:42:41,666][00473] Num frames 4300...
|
2514 |
+
[2024-07-27 17:42:41,799][00473] Num frames 4400...
|
2515 |
+
[2024-07-27 17:42:41,931][00473] Num frames 4500...
|
2516 |
+
[2024-07-27 17:42:42,059][00473] Num frames 4600...
|
2517 |
+
[2024-07-27 17:42:42,196][00473] Num frames 4700...
|
2518 |
+
[2024-07-27 17:42:42,322][00473] Num frames 4800...
|
2519 |
+
[2024-07-27 17:42:42,448][00473] Num frames 4900...
|
2520 |
+
[2024-07-27 17:42:42,578][00473] Num frames 5000...
|
2521 |
+
[2024-07-27 17:42:42,712][00473] Num frames 5100...
|
2522 |
+
[2024-07-27 17:42:42,885][00473] Avg episode rewards: #0: 30.717, true rewards: #0: 12.967
|
2523 |
+
[2024-07-27 17:42:42,887][00473] Avg episode reward: 30.717, avg true_objective: 12.967
|
2524 |
+
[2024-07-27 17:42:42,906][00473] Num frames 5200...
|
2525 |
+
[2024-07-27 17:42:43,035][00473] Num frames 5300...
|
2526 |
+
[2024-07-27 17:42:43,172][00473] Num frames 5400...
|
2527 |
+
[2024-07-27 17:42:43,299][00473] Num frames 5500...
|
2528 |
+
[2024-07-27 17:42:43,426][00473] Num frames 5600...
|
2529 |
+
[2024-07-27 17:42:43,525][00473] Avg episode rewards: #0: 25.670, true rewards: #0: 11.270
|
2530 |
+
[2024-07-27 17:42:43,528][00473] Avg episode reward: 25.670, avg true_objective: 11.270
|
2531 |
+
[2024-07-27 17:42:43,613][00473] Num frames 5700...
|
2532 |
+
[2024-07-27 17:42:43,749][00473] Num frames 5800...
|
2533 |
+
[2024-07-27 17:42:43,879][00473] Num frames 5900...
|
2534 |
+
[2024-07-27 17:42:44,007][00473] Num frames 6000...
|
2535 |
+
[2024-07-27 17:42:44,134][00473] Num frames 6100...
|
2536 |
+
[2024-07-27 17:42:44,277][00473] Num frames 6200...
|
2537 |
+
[2024-07-27 17:42:44,406][00473] Num frames 6300...
|
2538 |
+
[2024-07-27 17:42:44,536][00473] Num frames 6400...
|
2539 |
+
[2024-07-27 17:42:44,678][00473] Num frames 6500...
|
2540 |
+
[2024-07-27 17:42:44,772][00473] Avg episode rewards: #0: 24.043, true rewards: #0: 10.877
|
2541 |
+
[2024-07-27 17:42:44,774][00473] Avg episode reward: 24.043, avg true_objective: 10.877
|
2542 |
+
[2024-07-27 17:42:44,868][00473] Num frames 6600...
|
2543 |
+
[2024-07-27 17:42:44,998][00473] Num frames 6700...
|
2544 |
+
[2024-07-27 17:42:45,132][00473] Num frames 6800...
|
2545 |
+
[2024-07-27 17:42:45,271][00473] Num frames 6900...
|
2546 |
+
[2024-07-27 17:42:45,403][00473] Num frames 7000...
|
2547 |
+
[2024-07-27 17:42:45,534][00473] Num frames 7100...
|
2548 |
+
[2024-07-27 17:42:45,664][00473] Num frames 7200...
|
2549 |
+
[2024-07-27 17:42:45,802][00473] Num frames 7300...
|
2550 |
+
[2024-07-27 17:42:45,929][00473] Num frames 7400...
|
2551 |
+
[2024-07-27 17:42:46,060][00473] Num frames 7500...
|
2552 |
+
[2024-07-27 17:42:46,198][00473] Num frames 7600...
|
2553 |
+
[2024-07-27 17:42:46,339][00473] Num frames 7700...
|
2554 |
+
[2024-07-27 17:42:46,470][00473] Num frames 7800...
|
2555 |
+
[2024-07-27 17:42:46,606][00473] Num frames 7900...
|
2556 |
+
[2024-07-27 17:42:46,763][00473] Num frames 8000...
|
2557 |
+
[2024-07-27 17:42:46,956][00473] Num frames 8100...
|
2558 |
+
[2024-07-27 17:42:47,147][00473] Num frames 8200...
|
2559 |
+
[2024-07-27 17:42:47,338][00473] Num frames 8300...
|
2560 |
+
[2024-07-27 17:42:47,525][00473] Num frames 8400...
|
2561 |
+
[2024-07-27 17:42:47,706][00473] Num frames 8500...
|
2562 |
+
[2024-07-27 17:42:47,893][00473] Num frames 8600...
|
2563 |
+
[2024-07-27 17:42:48,004][00473] Avg episode rewards: #0: 28.180, true rewards: #0: 12.323
|
2564 |
+
[2024-07-27 17:42:48,007][00473] Avg episode reward: 28.180, avg true_objective: 12.323
|
2565 |
+
[2024-07-27 17:42:48,143][00473] Num frames 8700...
|
2566 |
+
[2024-07-27 17:42:48,340][00473] Num frames 8800...
|
2567 |
+
[2024-07-27 17:42:48,529][00473] Num frames 8900...
|
2568 |
+
[2024-07-27 17:42:48,729][00473] Num frames 9000...
|
2569 |
+
[2024-07-27 17:42:48,916][00473] Num frames 9100...
|
2570 |
+
[2024-07-27 17:42:49,108][00473] Num frames 9200...
|
2571 |
+
[2024-07-27 17:42:49,293][00473] Num frames 9300...
|
2572 |
+
[2024-07-27 17:42:49,429][00473] Num frames 9400...
|
2573 |
+
[2024-07-27 17:42:49,560][00473] Num frames 9500...
|
2574 |
+
[2024-07-27 17:42:49,645][00473] Avg episode rewards: #0: 27.027, true rewards: #0: 11.902
|
2575 |
+
[2024-07-27 17:42:49,647][00473] Avg episode reward: 27.027, avg true_objective: 11.902
|
2576 |
+
[2024-07-27 17:42:49,752][00473] Num frames 9600...
|
2577 |
+
[2024-07-27 17:42:49,883][00473] Num frames 9700...
|
2578 |
+
[2024-07-27 17:42:50,013][00473] Num frames 9800...
|
2579 |
+
[2024-07-27 17:42:50,141][00473] Num frames 9900...
|
2580 |
+
[2024-07-27 17:42:50,269][00473] Num frames 10000...
|
2581 |
+
[2024-07-27 17:42:50,404][00473] Num frames 10100...
|
2582 |
+
[2024-07-27 17:42:50,581][00473] Avg episode rewards: #0: 25.215, true rewards: #0: 11.327
|
2583 |
+
[2024-07-27 17:42:50,583][00473] Avg episode reward: 25.215, avg true_objective: 11.327
|
2584 |
+
[2024-07-27 17:42:50,594][00473] Num frames 10200...
|
2585 |
+
[2024-07-27 17:42:50,729][00473] Num frames 10300...
|
2586 |
+
[2024-07-27 17:42:50,863][00473] Num frames 10400...
|
2587 |
+
[2024-07-27 17:42:50,998][00473] Num frames 10500...
|
2588 |
+
[2024-07-27 17:42:51,128][00473] Num frames 10600...
|
2589 |
+
[2024-07-27 17:42:51,259][00473] Num frames 10700...
|
2590 |
+
[2024-07-27 17:42:51,399][00473] Num frames 10800...
|
2591 |
+
[2024-07-27 17:42:51,535][00473] Num frames 10900...
|
2592 |
+
[2024-07-27 17:42:51,672][00473] Num frames 11000...
|
2593 |
+
[2024-07-27 17:42:51,815][00473] Num frames 11100...
|
2594 |
+
[2024-07-27 17:42:51,949][00473] Num frames 11200...
|
2595 |
+
[2024-07-27 17:42:52,118][00473] Avg episode rewards: #0: 25.187, true rewards: #0: 11.287
|
2596 |
+
[2024-07-27 17:42:52,120][00473] Avg episode reward: 25.187, avg true_objective: 11.287
|
2597 |
+
[2024-07-27 17:44:02,468][00473] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
2598 |
+
[2024-07-27 17:45:25,025][00473] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
2599 |
+
[2024-07-27 17:45:25,027][00473] Overriding arg 'num_workers' with value 1 passed from command line
|
2600 |
+
[2024-07-27 17:45:25,029][00473] Adding new argument 'no_render'=True that is not in the saved config file!
|
2601 |
+
[2024-07-27 17:45:25,031][00473] Adding new argument 'save_video'=True that is not in the saved config file!
|
2602 |
+
[2024-07-27 17:45:25,033][00473] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
2603 |
+
[2024-07-27 17:45:25,034][00473] Adding new argument 'video_name'=None that is not in the saved config file!
|
2604 |
+
[2024-07-27 17:45:25,035][00473] Adding new argument 'max_num_frames'=1000000 that is not in the saved config file!
|
2605 |
+
[2024-07-27 17:45:25,037][00473] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
2606 |
+
[2024-07-27 17:45:25,039][00473] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
2607 |
+
[2024-07-27 17:45:25,041][00473] Adding new argument 'hf_repository'='rishisim/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
2608 |
+
[2024-07-27 17:45:25,042][00473] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
2609 |
+
[2024-07-27 17:45:25,044][00473] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
2610 |
+
[2024-07-27 17:45:25,045][00473] Adding new argument 'train_script'=None that is not in the saved config file!
|
2611 |
+
[2024-07-27 17:45:25,046][00473] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
2612 |
+
[2024-07-27 17:45:25,049][00473] Using frameskip 1 and render_action_repeat=4 for evaluation
|
2613 |
+
[2024-07-27 17:45:25,077][00473] RunningMeanStd input shape: (3, 72, 128)
|
2614 |
+
[2024-07-27 17:45:25,079][00473] RunningMeanStd input shape: (1,)
|
2615 |
+
[2024-07-27 17:45:25,093][00473] ConvEncoder: input_channels=3
|
2616 |
+
[2024-07-27 17:45:25,131][00473] Conv encoder output size: 512
|
2617 |
+
[2024-07-27 17:45:25,132][00473] Policy head output size: 512
|
2618 |
+
[2024-07-27 17:45:25,151][00473] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001101_4509696.pth...
|
2619 |
+
[2024-07-27 17:45:25,589][00473] Num frames 100...
|
2620 |
+
[2024-07-27 17:45:25,727][00473] Num frames 200...
|
2621 |
+
[2024-07-27 17:45:25,876][00473] Num frames 300...
|
2622 |
+
[2024-07-27 17:45:26,006][00473] Num frames 400...
|
2623 |
+
[2024-07-27 17:45:26,135][00473] Num frames 500...
|
2624 |
+
[2024-07-27 17:45:26,272][00473] Num frames 600...
|
2625 |
+
[2024-07-27 17:45:26,429][00473] Avg episode rewards: #0: 13.730, true rewards: #0: 6.730
|
2626 |
+
[2024-07-27 17:45:26,431][00473] Avg episode reward: 13.730, avg true_objective: 6.730
|
2627 |
+
[2024-07-27 17:45:26,468][00473] Num frames 700...
|
2628 |
+
[2024-07-27 17:45:26,626][00473] Num frames 800...
|
2629 |
+
[2024-07-27 17:45:26,818][00473] Num frames 900...
|
2630 |
+
[2024-07-27 17:45:27,007][00473] Num frames 1000...
|
2631 |
+
[2024-07-27 17:45:27,193][00473] Num frames 1100...
|
2632 |
+
[2024-07-27 17:45:27,377][00473] Num frames 1200...
|
2633 |
+
[2024-07-27 17:45:27,567][00473] Num frames 1300...
|
2634 |
+
[2024-07-27 17:45:27,763][00473] Num frames 1400...
|
2635 |
+
[2024-07-27 17:45:27,976][00473] Num frames 1500...
|
2636 |
+
[2024-07-27 17:45:28,169][00473] Num frames 1600...
|
2637 |
+
[2024-07-27 17:45:28,231][00473] Avg episode rewards: #0: 15.505, true rewards: #0: 8.005
|
2638 |
+
[2024-07-27 17:45:28,234][00473] Avg episode reward: 15.505, avg true_objective: 8.005
|
2639 |
+
[2024-07-27 17:45:28,443][00473] Num frames 1700...
|
2640 |
+
[2024-07-27 17:45:28,632][00473] Num frames 1800...
|
2641 |
+
[2024-07-27 17:45:28,817][00473] Num frames 1900...
|
2642 |
+
[2024-07-27 17:45:29,018][00473] Num frames 2000...
|
2643 |
+
[2024-07-27 17:45:29,198][00473] Num frames 2100...
|
2644 |
+
[2024-07-27 17:45:29,330][00473] Num frames 2200...
|
2645 |
+
[2024-07-27 17:45:29,459][00473] Num frames 2300...
|
2646 |
+
[2024-07-27 17:45:29,603][00473] Num frames 2400...
|
2647 |
+
[2024-07-27 17:45:29,744][00473] Num frames 2500...
|
2648 |
+
[2024-07-27 17:45:29,876][00473] Num frames 2600...
|
2649 |
+
[2024-07-27 17:45:30,013][00473] Num frames 2700...
|
2650 |
+
[2024-07-27 17:45:30,073][00473] Avg episode rewards: #0: 18.003, true rewards: #0: 9.003
|
2651 |
+
[2024-07-27 17:45:30,075][00473] Avg episode reward: 18.003, avg true_objective: 9.003
|
2652 |
+
[2024-07-27 17:45:30,217][00473] Num frames 2800...
|
2653 |
+
[2024-07-27 17:45:30,350][00473] Num frames 2900...
|
2654 |
+
[2024-07-27 17:45:30,479][00473] Num frames 3000...
|
2655 |
+
[2024-07-27 17:45:30,618][00473] Num frames 3100...
|
2656 |
+
[2024-07-27 17:45:30,758][00473] Num frames 3200...
|
2657 |
+
[2024-07-27 17:45:30,900][00473] Num frames 3300...
|
2658 |
+
[2024-07-27 17:45:31,030][00473] Num frames 3400...
|
2659 |
+
[2024-07-27 17:45:31,157][00473] Num frames 3500...
|
2660 |
+
[2024-07-27 17:45:31,288][00473] Num frames 3600...
|
2661 |
+
[2024-07-27 17:45:31,420][00473] Num frames 3700...
|
2662 |
+
[2024-07-27 17:45:31,556][00473] Num frames 3800...
|
2663 |
+
[2024-07-27 17:45:31,689][00473] Num frames 3900...
|
2664 |
+
[2024-07-27 17:45:31,827][00473] Num frames 4000...
|
2665 |
+
[2024-07-27 17:45:31,958][00473] Num frames 4100...
|
2666 |
+
[2024-07-27 17:45:32,088][00473] Num frames 4200...
|
2667 |
+
[2024-07-27 17:45:32,257][00473] Num frames 4300...
|
2668 |
+
[2024-07-27 17:45:32,357][00473] Avg episode rewards: #0: 23.080, true rewards: #0: 10.830
|
2669 |
+
[2024-07-27 17:45:32,358][00473] Avg episode reward: 23.080, avg true_objective: 10.830
|
2670 |
+
[2024-07-27 17:45:32,448][00473] Num frames 4400...
|
2671 |
+
[2024-07-27 17:45:32,586][00473] Num frames 4500...
|
2672 |
+
[2024-07-27 17:45:32,719][00473] Num frames 4600...
|
2673 |
+
[2024-07-27 17:45:32,850][00473] Num frames 4700...
|
2674 |
+
[2024-07-27 17:45:32,981][00473] Num frames 4800...
|
2675 |
+
[2024-07-27 17:45:33,108][00473] Num frames 4900...
|
2676 |
+
[2024-07-27 17:45:33,237][00473] Num frames 5000...
|
2677 |
+
[2024-07-27 17:45:33,376][00473] Num frames 5100...
|
2678 |
+
[2024-07-27 17:45:33,507][00473] Num frames 5200...
|
2679 |
+
[2024-07-27 17:45:33,646][00473] Num frames 5300...
|
2680 |
+
[2024-07-27 17:45:33,785][00473] Num frames 5400...
|
2681 |
+
[2024-07-27 17:45:33,914][00473] Num frames 5500...
|
2682 |
+
[2024-07-27 17:45:34,044][00473] Num frames 5600...
|
2683 |
+
[2024-07-27 17:45:34,172][00473] Num frames 5700...
|
2684 |
+
[2024-07-27 17:45:34,307][00473] Num frames 5800...
|
2685 |
+
[2024-07-27 17:45:34,442][00473] Num frames 5900...
|
2686 |
+
[2024-07-27 17:45:34,577][00473] Num frames 6000...
|
2687 |
+
[2024-07-27 17:45:34,727][00473] Num frames 6100...
|
2688 |
+
[2024-07-27 17:45:34,858][00473] Num frames 6200...
|
2689 |
+
[2024-07-27 17:45:34,989][00473] Num frames 6300...
|
2690 |
+
[2024-07-27 17:45:35,118][00473] Num frames 6400...
|
2691 |
+
[2024-07-27 17:45:35,215][00473] Avg episode rewards: #0: 29.664, true rewards: #0: 12.864
|
2692 |
+
[2024-07-27 17:45:35,217][00473] Avg episode reward: 29.664, avg true_objective: 12.864
|
2693 |
+
[2024-07-27 17:45:35,312][00473] Num frames 6500...
|
2694 |
+
[2024-07-27 17:45:35,440][00473] Num frames 6600...
|
2695 |
+
[2024-07-27 17:45:35,570][00473] Num frames 6700...
|
2696 |
+
[2024-07-27 17:45:35,714][00473] Num frames 6800...
|
2697 |
+
[2024-07-27 17:45:35,886][00473] Avg episode rewards: #0: 25.633, true rewards: #0: 11.467
|
2698 |
+
[2024-07-27 17:45:35,888][00473] Avg episode reward: 25.633, avg true_objective: 11.467
|
2699 |
+
[2024-07-27 17:45:35,918][00473] Num frames 6900...
|
2700 |
+
[2024-07-27 17:45:36,046][00473] Num frames 7000...
|
2701 |
+
[2024-07-27 17:45:36,176][00473] Num frames 7100...
|
2702 |
+
[2024-07-27 17:45:36,307][00473] Num frames 7200...
|
2703 |
+
[2024-07-27 17:45:36,436][00473] Num frames 7300...
|
2704 |
+
[2024-07-27 17:45:36,566][00473] Num frames 7400...
|
2705 |
+
[2024-07-27 17:45:36,703][00473] Num frames 7500...
|
2706 |
+
[2024-07-27 17:45:36,790][00473] Avg episode rewards: #0: 24.028, true rewards: #0: 10.743
|
2707 |
+
[2024-07-27 17:45:36,791][00473] Avg episode reward: 24.028, avg true_objective: 10.743
|
2708 |
+
[2024-07-27 17:45:36,898][00473] Num frames 7600...
|
2709 |
+
[2024-07-27 17:45:37,026][00473] Num frames 7700...
|
2710 |
+
[2024-07-27 17:45:37,151][00473] Num frames 7800...
|
2711 |
+
[2024-07-27 17:45:37,286][00473] Num frames 7900...
|
2712 |
+
[2024-07-27 17:45:37,415][00473] Num frames 8000...
|
2713 |
+
[2024-07-27 17:45:37,543][00473] Num frames 8100...
|
2714 |
+
[2024-07-27 17:45:37,673][00473] Num frames 8200...
|
2715 |
+
[2024-07-27 17:45:37,820][00473] Num frames 8300...
|
2716 |
+
[2024-07-27 17:45:37,988][00473] Avg episode rewards: #0: 23.360, true rewards: #0: 10.485
|
2717 |
+
[2024-07-27 17:45:37,990][00473] Avg episode reward: 23.360, avg true_objective: 10.485
|
2718 |
+
[2024-07-27 17:45:38,012][00473] Num frames 8400...
|
2719 |
+
[2024-07-27 17:45:38,147][00473] Num frames 8500...
|
2720 |
+
[2024-07-27 17:45:38,278][00473] Num frames 8600...
|
2721 |
+
[2024-07-27 17:45:38,409][00473] Num frames 8700...
|
2722 |
+
[2024-07-27 17:45:38,541][00473] Num frames 8800...
|
2723 |
+
[2024-07-27 17:45:38,675][00473] Num frames 8900...
|
2724 |
+
[2024-07-27 17:45:38,819][00473] Num frames 9000...
|
2725 |
+
[2024-07-27 17:45:38,948][00473] Num frames 9100...
|
2726 |
+
[2024-07-27 17:45:39,079][00473] Num frames 9200...
|
2727 |
+
[2024-07-27 17:45:39,237][00473] Num frames 9300...
|
2728 |
+
[2024-07-27 17:45:39,429][00473] Num frames 9400...
|
2729 |
+
[2024-07-27 17:45:39,567][00473] Avg episode rewards: #0: 23.160, true rewards: #0: 10.493
|
2730 |
+
[2024-07-27 17:45:39,569][00473] Avg episode reward: 23.160, avg true_objective: 10.493
|
2731 |
+
[2024-07-27 17:45:39,673][00473] Num frames 9500...
|
2732 |
+
[2024-07-27 17:45:39,885][00473] Num frames 9600...
|
2733 |
+
[2024-07-27 17:45:40,068][00473] Num frames 9700...
|
2734 |
+
[2024-07-27 17:45:40,251][00473] Num frames 9800...
|
2735 |
+
[2024-07-27 17:45:40,438][00473] Num frames 9900...
|
2736 |
+
[2024-07-27 17:45:40,627][00473] Num frames 10000...
|
2737 |
+
[2024-07-27 17:45:40,837][00473] Num frames 10100...
|
2738 |
+
[2024-07-27 17:45:41,032][00473] Num frames 10200...
|
2739 |
+
[2024-07-27 17:45:41,228][00473] Num frames 10300...
|
2740 |
+
[2024-07-27 17:45:41,425][00473] Num frames 10400...
|
2741 |
+
[2024-07-27 17:45:41,622][00473] Num frames 10500...
|
2742 |
+
[2024-07-27 17:45:41,784][00473] Num frames 10600...
|
2743 |
+
[2024-07-27 17:45:41,937][00473] Num frames 10700...
|
2744 |
+
[2024-07-27 17:45:42,070][00473] Num frames 10800...
|
2745 |
+
[2024-07-27 17:45:42,198][00473] Num frames 10900...
|
2746 |
+
[2024-07-27 17:45:42,333][00473] Num frames 11000...
|
2747 |
+
[2024-07-27 17:45:42,463][00473] Num frames 11100...
|
2748 |
+
[2024-07-27 17:45:42,596][00473] Num frames 11200...
|
2749 |
+
[2024-07-27 17:45:42,732][00473] Num frames 11300...
|
2750 |
+
[2024-07-27 17:45:42,863][00473] Num frames 11400...
|
2751 |
+
[2024-07-27 17:45:43,003][00473] Num frames 11500...
|
2752 |
+
[2024-07-27 17:45:43,116][00473] Avg episode rewards: #0: 26.544, true rewards: #0: 11.544
|
2753 |
+
[2024-07-27 17:45:43,118][00473] Avg episode reward: 26.544, avg true_objective: 11.544
|
2754 |
+
[2024-07-27 17:46:54,499][00473] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|