Upload . with huggingface_hub
Browse files- .summary/0/events.out.tfevents.1677247769.1b4f54364242 +3 -0
- README.md +1 -1
- checkpoint_p0/checkpoint_000000982_4022272.pth +3 -0
- config.json +1 -1
- replay.mp4 +2 -2
- sf_log.txt +670 -0
.summary/0/events.out.tfevents.1677247769.1b4f54364242
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd53f3ce2c7103fb8d0644900f6a458b127fa846865dbd489d74d01ac2387cc9
|
3 |
+
size 2414
|
README.md
CHANGED
@@ -15,7 +15,7 @@ model-index:
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
-
value:
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
|
|
15 |
type: doom_health_gathering_supreme
|
16 |
metrics:
|
17 |
- type: mean_reward
|
18 |
+
value: 8.90 +/- 5.34
|
19 |
name: mean_reward
|
20 |
verified: false
|
21 |
---
|
checkpoint_p0/checkpoint_000000982_4022272.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee91fdd8597f4dfbdba8542f128481b5f81cd9e0e3407d47b93d4daa1729fc37
|
3 |
+
size 34929220
|
config.json
CHANGED
@@ -65,7 +65,7 @@
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
-
"train_for_env_steps":
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
|
|
65 |
"summaries_use_frameskip": true,
|
66 |
"heartbeat_interval": 20,
|
67 |
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 2000000,
|
69 |
"train_for_seconds": 10000000000,
|
70 |
"save_every_sec": 120,
|
71 |
"keep_checkpoints": 2,
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b55282d8cd2cab67d7d5d558bbfad544466278619d50dd5c36367a4071609c6
|
3 |
+
size 16692227
|
sf_log.txt
CHANGED
@@ -1798,3 +1798,673 @@ main_loop: 35.4650
|
|
1798 |
[2023-02-24 14:07:35,852][00980] Avg episode rewards: #0: 27.851, true rewards: #0: 11.351
|
1799 |
[2023-02-24 14:07:35,853][00980] Avg episode reward: 27.851, avg true_objective: 11.351
|
1800 |
[2023-02-24 14:08:43,682][00980] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1798 |
[2023-02-24 14:07:35,852][00980] Avg episode rewards: #0: 27.851, true rewards: #0: 11.351
|
1799 |
[2023-02-24 14:07:35,853][00980] Avg episode reward: 27.851, avg true_objective: 11.351
|
1800 |
[2023-02-24 14:08:43,682][00980] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
1801 |
+
[2023-02-24 14:08:46,837][00980] The model has been pushed to https://huggingface.co/mnavas/rl_course_vizdoom_health_gathering_supreme
|
1802 |
+
[2023-02-24 14:09:29,770][00980] Environment doom_basic already registered, overwriting...
|
1803 |
+
[2023-02-24 14:09:29,773][00980] Environment doom_two_colors_easy already registered, overwriting...
|
1804 |
+
[2023-02-24 14:09:29,775][00980] Environment doom_two_colors_hard already registered, overwriting...
|
1805 |
+
[2023-02-24 14:09:29,776][00980] Environment doom_dm already registered, overwriting...
|
1806 |
+
[2023-02-24 14:09:29,777][00980] Environment doom_dwango5 already registered, overwriting...
|
1807 |
+
[2023-02-24 14:09:29,783][00980] Environment doom_my_way_home_flat_actions already registered, overwriting...
|
1808 |
+
[2023-02-24 14:09:29,784][00980] Environment doom_defend_the_center_flat_actions already registered, overwriting...
|
1809 |
+
[2023-02-24 14:09:29,785][00980] Environment doom_my_way_home already registered, overwriting...
|
1810 |
+
[2023-02-24 14:09:29,786][00980] Environment doom_deadly_corridor already registered, overwriting...
|
1811 |
+
[2023-02-24 14:09:29,787][00980] Environment doom_defend_the_center already registered, overwriting...
|
1812 |
+
[2023-02-24 14:09:29,788][00980] Environment doom_defend_the_line already registered, overwriting...
|
1813 |
+
[2023-02-24 14:09:29,789][00980] Environment doom_health_gathering already registered, overwriting...
|
1814 |
+
[2023-02-24 14:09:29,791][00980] Environment doom_health_gathering_supreme already registered, overwriting...
|
1815 |
+
[2023-02-24 14:09:29,792][00980] Environment doom_battle already registered, overwriting...
|
1816 |
+
[2023-02-24 14:09:29,793][00980] Environment doom_battle2 already registered, overwriting...
|
1817 |
+
[2023-02-24 14:09:29,794][00980] Environment doom_duel_bots already registered, overwriting...
|
1818 |
+
[2023-02-24 14:09:29,796][00980] Environment doom_deathmatch_bots already registered, overwriting...
|
1819 |
+
[2023-02-24 14:09:29,797][00980] Environment doom_duel already registered, overwriting...
|
1820 |
+
[2023-02-24 14:09:29,798][00980] Environment doom_deathmatch_full already registered, overwriting...
|
1821 |
+
[2023-02-24 14:09:29,799][00980] Environment doom_benchmark already registered, overwriting...
|
1822 |
+
[2023-02-24 14:09:29,801][00980] register_encoder_factory: <function make_vizdoom_encoder at 0x7ff7e26f99d0>
|
1823 |
+
[2023-02-24 14:09:29,829][00980] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
1824 |
+
[2023-02-24 14:09:29,831][00980] Overriding arg 'train_for_env_steps' with value 2000000 passed from command line
|
1825 |
+
[2023-02-24 14:09:29,845][00980] Experiment dir /content/train_dir/default_experiment already exists!
|
1826 |
+
[2023-02-24 14:09:29,846][00980] Resuming existing experiment from /content/train_dir/default_experiment...
|
1827 |
+
[2023-02-24 14:09:29,848][00980] Weights and Biases integration disabled
|
1828 |
+
[2023-02-24 14:09:29,855][00980] Environment var CUDA_VISIBLE_DEVICES is 0
|
1829 |
+
|
1830 |
+
[2023-02-24 14:09:32,673][00980] Starting experiment with the following configuration:
|
1831 |
+
help=False
|
1832 |
+
algo=APPO
|
1833 |
+
env=doom_health_gathering_supreme
|
1834 |
+
experiment=default_experiment
|
1835 |
+
train_dir=/content/train_dir
|
1836 |
+
restart_behavior=resume
|
1837 |
+
device=gpu
|
1838 |
+
seed=None
|
1839 |
+
num_policies=1
|
1840 |
+
async_rl=True
|
1841 |
+
serial_mode=False
|
1842 |
+
batched_sampling=False
|
1843 |
+
num_batches_to_accumulate=2
|
1844 |
+
worker_num_splits=2
|
1845 |
+
policy_workers_per_policy=1
|
1846 |
+
max_policy_lag=1000
|
1847 |
+
num_workers=8
|
1848 |
+
num_envs_per_worker=4
|
1849 |
+
batch_size=1024
|
1850 |
+
num_batches_per_epoch=1
|
1851 |
+
num_epochs=1
|
1852 |
+
rollout=32
|
1853 |
+
recurrence=32
|
1854 |
+
shuffle_minibatches=False
|
1855 |
+
gamma=0.99
|
1856 |
+
reward_scale=1.0
|
1857 |
+
reward_clip=1000.0
|
1858 |
+
value_bootstrap=False
|
1859 |
+
normalize_returns=True
|
1860 |
+
exploration_loss_coeff=0.001
|
1861 |
+
value_loss_coeff=0.5
|
1862 |
+
kl_loss_coeff=0.0
|
1863 |
+
exploration_loss=symmetric_kl
|
1864 |
+
gae_lambda=0.95
|
1865 |
+
ppo_clip_ratio=0.1
|
1866 |
+
ppo_clip_value=0.2
|
1867 |
+
with_vtrace=False
|
1868 |
+
vtrace_rho=1.0
|
1869 |
+
vtrace_c=1.0
|
1870 |
+
optimizer=adam
|
1871 |
+
adam_eps=1e-06
|
1872 |
+
adam_beta1=0.9
|
1873 |
+
adam_beta2=0.999
|
1874 |
+
max_grad_norm=4.0
|
1875 |
+
learning_rate=0.0001
|
1876 |
+
lr_schedule=constant
|
1877 |
+
lr_schedule_kl_threshold=0.008
|
1878 |
+
lr_adaptive_min=1e-06
|
1879 |
+
lr_adaptive_max=0.01
|
1880 |
+
obs_subtract_mean=0.0
|
1881 |
+
obs_scale=255.0
|
1882 |
+
normalize_input=True
|
1883 |
+
normalize_input_keys=None
|
1884 |
+
decorrelate_experience_max_seconds=0
|
1885 |
+
decorrelate_envs_on_one_worker=True
|
1886 |
+
actor_worker_gpus=[]
|
1887 |
+
set_workers_cpu_affinity=True
|
1888 |
+
force_envs_single_thread=False
|
1889 |
+
default_niceness=0
|
1890 |
+
log_to_file=True
|
1891 |
+
experiment_summaries_interval=10
|
1892 |
+
flush_summaries_interval=30
|
1893 |
+
stats_avg=100
|
1894 |
+
summaries_use_frameskip=True
|
1895 |
+
heartbeat_interval=20
|
1896 |
+
heartbeat_reporting_interval=600
|
1897 |
+
train_for_env_steps=2000000
|
1898 |
+
train_for_seconds=10000000000
|
1899 |
+
save_every_sec=120
|
1900 |
+
keep_checkpoints=2
|
1901 |
+
load_checkpoint_kind=latest
|
1902 |
+
save_milestones_sec=-1
|
1903 |
+
save_best_every_sec=5
|
1904 |
+
save_best_metric=reward
|
1905 |
+
save_best_after=100000
|
1906 |
+
benchmark=False
|
1907 |
+
encoder_mlp_layers=[512, 512]
|
1908 |
+
encoder_conv_architecture=convnet_simple
|
1909 |
+
encoder_conv_mlp_layers=[512]
|
1910 |
+
use_rnn=True
|
1911 |
+
rnn_size=512
|
1912 |
+
rnn_type=gru
|
1913 |
+
rnn_num_layers=1
|
1914 |
+
decoder_mlp_layers=[]
|
1915 |
+
nonlinearity=elu
|
1916 |
+
policy_initialization=orthogonal
|
1917 |
+
policy_init_gain=1.0
|
1918 |
+
actor_critic_share_weights=True
|
1919 |
+
adaptive_stddev=True
|
1920 |
+
continuous_tanh_scale=0.0
|
1921 |
+
initial_stddev=1.0
|
1922 |
+
use_env_info_cache=False
|
1923 |
+
env_gpu_actions=False
|
1924 |
+
env_gpu_observations=True
|
1925 |
+
env_frameskip=4
|
1926 |
+
env_framestack=1
|
1927 |
+
pixel_format=CHW
|
1928 |
+
use_record_episode_statistics=False
|
1929 |
+
with_wandb=False
|
1930 |
+
wandb_user=None
|
1931 |
+
wandb_project=sample_factory
|
1932 |
+
wandb_group=None
|
1933 |
+
wandb_job_type=SF
|
1934 |
+
wandb_tags=[]
|
1935 |
+
with_pbt=False
|
1936 |
+
pbt_mix_policies_in_one_env=True
|
1937 |
+
pbt_period_env_steps=5000000
|
1938 |
+
pbt_start_mutation=20000000
|
1939 |
+
pbt_replace_fraction=0.3
|
1940 |
+
pbt_mutation_rate=0.15
|
1941 |
+
pbt_replace_reward_gap=0.1
|
1942 |
+
pbt_replace_reward_gap_absolute=1e-06
|
1943 |
+
pbt_optimize_gamma=False
|
1944 |
+
pbt_target_objective=true_objective
|
1945 |
+
pbt_perturb_min=1.1
|
1946 |
+
pbt_perturb_max=1.5
|
1947 |
+
num_agents=-1
|
1948 |
+
num_humans=0
|
1949 |
+
num_bots=-1
|
1950 |
+
start_bot_difficulty=None
|
1951 |
+
timelimit=None
|
1952 |
+
res_w=128
|
1953 |
+
res_h=72
|
1954 |
+
wide_aspect_ratio=False
|
1955 |
+
eval_env_frameskip=1
|
1956 |
+
fps=35
|
1957 |
+
command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000
|
1958 |
+
cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000}
|
1959 |
+
git_hash=unknown
|
1960 |
+
git_repo_name=not a git repository
|
1961 |
+
[2023-02-24 14:09:32,676][00980] Saving configuration to /content/train_dir/default_experiment/config.json...
|
1962 |
+
[2023-02-24 14:09:32,680][00980] Rollout worker 0 uses device cpu
|
1963 |
+
[2023-02-24 14:09:32,683][00980] Rollout worker 1 uses device cpu
|
1964 |
+
[2023-02-24 14:09:32,684][00980] Rollout worker 2 uses device cpu
|
1965 |
+
[2023-02-24 14:09:32,685][00980] Rollout worker 3 uses device cpu
|
1966 |
+
[2023-02-24 14:09:32,686][00980] Rollout worker 4 uses device cpu
|
1967 |
+
[2023-02-24 14:09:32,688][00980] Rollout worker 5 uses device cpu
|
1968 |
+
[2023-02-24 14:09:32,689][00980] Rollout worker 6 uses device cpu
|
1969 |
+
[2023-02-24 14:09:32,691][00980] Rollout worker 7 uses device cpu
|
1970 |
+
[2023-02-24 14:09:32,810][00980] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1971 |
+
[2023-02-24 14:09:32,813][00980] InferenceWorker_p0-w0: min num requests: 2
|
1972 |
+
[2023-02-24 14:09:32,846][00980] Starting all processes...
|
1973 |
+
[2023-02-24 14:09:32,847][00980] Starting process learner_proc0
|
1974 |
+
[2023-02-24 14:09:32,984][00980] Starting all processes...
|
1975 |
+
[2023-02-24 14:09:32,994][00980] Starting process inference_proc0-0
|
1976 |
+
[2023-02-24 14:09:32,994][00980] Starting process rollout_proc0
|
1977 |
+
[2023-02-24 14:09:32,996][00980] Starting process rollout_proc1
|
1978 |
+
[2023-02-24 14:09:32,999][00980] Starting process rollout_proc2
|
1979 |
+
[2023-02-24 14:09:32,999][00980] Starting process rollout_proc3
|
1980 |
+
[2023-02-24 14:09:32,999][00980] Starting process rollout_proc4
|
1981 |
+
[2023-02-24 14:09:33,000][00980] Starting process rollout_proc5
|
1982 |
+
[2023-02-24 14:09:33,000][00980] Starting process rollout_proc6
|
1983 |
+
[2023-02-24 14:09:33,000][00980] Starting process rollout_proc7
|
1984 |
+
[2023-02-24 14:09:41,559][24666] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1985 |
+
[2023-02-24 14:09:41,559][24666] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
1986 |
+
[2023-02-24 14:09:41,666][24666] Num visible devices: 1
|
1987 |
+
[2023-02-24 14:09:41,726][24666] Starting seed is not provided
|
1988 |
+
[2023-02-24 14:09:41,727][24666] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
1989 |
+
[2023-02-24 14:09:41,728][24666] Initializing actor-critic model on device cuda:0
|
1990 |
+
[2023-02-24 14:09:41,729][24666] RunningMeanStd input shape: (3, 72, 128)
|
1991 |
+
[2023-02-24 14:09:41,730][24666] RunningMeanStd input shape: (1,)
|
1992 |
+
[2023-02-24 14:09:41,877][24666] ConvEncoder: input_channels=3
|
1993 |
+
[2023-02-24 14:09:43,471][24666] Conv encoder output size: 512
|
1994 |
+
[2023-02-24 14:09:43,472][24666] Policy head output size: 512
|
1995 |
+
[2023-02-24 14:09:43,665][24666] Created Actor Critic model with architecture:
|
1996 |
+
[2023-02-24 14:09:43,674][24666] ActorCriticSharedWeights(
|
1997 |
+
(obs_normalizer): ObservationNormalizer(
|
1998 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
1999 |
+
(running_mean_std): ModuleDict(
|
2000 |
+
(obs): RunningMeanStdInPlace()
|
2001 |
+
)
|
2002 |
+
)
|
2003 |
+
)
|
2004 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
2005 |
+
(encoder): VizdoomEncoder(
|
2006 |
+
(basic_encoder): ConvEncoder(
|
2007 |
+
(enc): RecursiveScriptModule(
|
2008 |
+
original_name=ConvEncoderImpl
|
2009 |
+
(conv_head): RecursiveScriptModule(
|
2010 |
+
original_name=Sequential
|
2011 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
2012 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
2013 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
2014 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
2015 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
2016 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
2017 |
+
)
|
2018 |
+
(mlp_layers): RecursiveScriptModule(
|
2019 |
+
original_name=Sequential
|
2020 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
2021 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
2022 |
+
)
|
2023 |
+
)
|
2024 |
+
)
|
2025 |
+
)
|
2026 |
+
(core): ModelCoreRNN(
|
2027 |
+
(core): GRU(512, 512)
|
2028 |
+
)
|
2029 |
+
(decoder): MlpDecoder(
|
2030 |
+
(mlp): Identity()
|
2031 |
+
)
|
2032 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
2033 |
+
(action_parameterization): ActionParameterizationDefault(
|
2034 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
2035 |
+
)
|
2036 |
+
)
|
2037 |
+
[2023-02-24 14:09:43,933][24680] Worker 0 uses CPU cores [0]
|
2038 |
+
[2023-02-24 14:09:43,977][24681] Worker 1 uses CPU cores [1]
|
2039 |
+
[2023-02-24 14:09:44,335][24683] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
2040 |
+
[2023-02-24 14:09:44,340][24683] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
2041 |
+
[2023-02-24 14:09:44,430][24683] Num visible devices: 1
|
2042 |
+
[2023-02-24 14:09:45,037][24690] Worker 2 uses CPU cores [0]
|
2043 |
+
[2023-02-24 14:09:45,309][24688] Worker 3 uses CPU cores [1]
|
2044 |
+
[2023-02-24 14:09:45,561][24695] Worker 5 uses CPU cores [1]
|
2045 |
+
[2023-02-24 14:09:45,612][24693] Worker 4 uses CPU cores [0]
|
2046 |
+
[2023-02-24 14:09:45,799][24701] Worker 7 uses CPU cores [1]
|
2047 |
+
[2023-02-24 14:09:45,881][24703] Worker 6 uses CPU cores [0]
|
2048 |
+
[2023-02-24 14:09:49,269][24666] Using optimizer <class 'torch.optim.adam.Adam'>
|
2049 |
+
[2023-02-24 14:09:49,270][24666] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000980_4014080.pth...
|
2050 |
+
[2023-02-24 14:09:49,304][24666] Loading model from checkpoint
|
2051 |
+
[2023-02-24 14:09:49,308][24666] Loaded experiment state at self.train_step=980, self.env_steps=4014080
|
2052 |
+
[2023-02-24 14:09:49,308][24666] Initialized policy 0 weights for model version 980
|
2053 |
+
[2023-02-24 14:09:49,312][24666] LearnerWorker_p0 finished initialization!
|
2054 |
+
[2023-02-24 14:09:49,314][24666] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
2055 |
+
[2023-02-24 14:09:49,529][24683] RunningMeanStd input shape: (3, 72, 128)
|
2056 |
+
[2023-02-24 14:09:49,531][24683] RunningMeanStd input shape: (1,)
|
2057 |
+
[2023-02-24 14:09:49,543][24683] ConvEncoder: input_channels=3
|
2058 |
+
[2023-02-24 14:09:49,645][24683] Conv encoder output size: 512
|
2059 |
+
[2023-02-24 14:09:49,645][24683] Policy head output size: 512
|
2060 |
+
[2023-02-24 14:09:49,856][00980] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4014080. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2061 |
+
[2023-02-24 14:09:51,910][00980] Inference worker 0-0 is ready!
|
2062 |
+
[2023-02-24 14:09:51,911][00980] All inference workers are ready! Signal rollout workers to start!
|
2063 |
+
[2023-02-24 14:09:52,016][24680] Doom resolution: 160x120, resize resolution: (128, 72)
|
2064 |
+
[2023-02-24 14:09:52,012][24693] Doom resolution: 160x120, resize resolution: (128, 72)
|
2065 |
+
[2023-02-24 14:09:52,014][24703] Doom resolution: 160x120, resize resolution: (128, 72)
|
2066 |
+
[2023-02-24 14:09:52,017][24690] Doom resolution: 160x120, resize resolution: (128, 72)
|
2067 |
+
[2023-02-24 14:09:52,031][24688] Doom resolution: 160x120, resize resolution: (128, 72)
|
2068 |
+
[2023-02-24 14:09:52,032][24695] Doom resolution: 160x120, resize resolution: (128, 72)
|
2069 |
+
[2023-02-24 14:09:52,028][24701] Doom resolution: 160x120, resize resolution: (128, 72)
|
2070 |
+
[2023-02-24 14:09:52,030][24681] Doom resolution: 160x120, resize resolution: (128, 72)
|
2071 |
+
[2023-02-24 14:09:52,803][00980] Heartbeat connected on Batcher_0
|
2072 |
+
[2023-02-24 14:09:52,810][00980] Heartbeat connected on LearnerWorker_p0
|
2073 |
+
[2023-02-24 14:09:52,840][24701] Decorrelating experience for 0 frames...
|
2074 |
+
[2023-02-24 14:09:52,841][24695] Decorrelating experience for 0 frames...
|
2075 |
+
[2023-02-24 14:09:52,845][00980] Heartbeat connected on InferenceWorker_p0-w0
|
2076 |
+
[2023-02-24 14:09:53,214][24693] Decorrelating experience for 0 frames...
|
2077 |
+
[2023-02-24 14:09:53,221][24690] Decorrelating experience for 0 frames...
|
2078 |
+
[2023-02-24 14:09:53,227][24703] Decorrelating experience for 0 frames...
|
2079 |
+
[2023-02-24 14:09:53,926][24688] Decorrelating experience for 0 frames...
|
2080 |
+
[2023-02-24 14:09:53,929][24695] Decorrelating experience for 32 frames...
|
2081 |
+
[2023-02-24 14:09:53,931][24701] Decorrelating experience for 32 frames...
|
2082 |
+
[2023-02-24 14:09:54,439][24703] Decorrelating experience for 32 frames...
|
2083 |
+
[2023-02-24 14:09:54,441][24693] Decorrelating experience for 32 frames...
|
2084 |
+
[2023-02-24 14:09:54,501][24680] Decorrelating experience for 0 frames...
|
2085 |
+
[2023-02-24 14:09:54,732][24688] Decorrelating experience for 32 frames...
|
2086 |
+
[2023-02-24 14:09:54,847][24695] Decorrelating experience for 64 frames...
|
2087 |
+
[2023-02-24 14:09:54,855][00980] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4014080. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2088 |
+
[2023-02-24 14:09:55,223][24690] Decorrelating experience for 32 frames...
|
2089 |
+
[2023-02-24 14:09:55,748][24680] Decorrelating experience for 32 frames...
|
2090 |
+
[2023-02-24 14:09:55,944][24688] Decorrelating experience for 64 frames...
|
2091 |
+
[2023-02-24 14:09:55,960][24701] Decorrelating experience for 64 frames...
|
2092 |
+
[2023-02-24 14:09:55,990][24703] Decorrelating experience for 64 frames...
|
2093 |
+
[2023-02-24 14:09:56,934][24681] Decorrelating experience for 0 frames...
|
2094 |
+
[2023-02-24 14:09:57,310][24690] Decorrelating experience for 64 frames...
|
2095 |
+
[2023-02-24 14:09:58,045][24695] Decorrelating experience for 96 frames...
|
2096 |
+
[2023-02-24 14:09:58,046][24693] Decorrelating experience for 64 frames...
|
2097 |
+
[2023-02-24 14:09:58,090][24680] Decorrelating experience for 64 frames...
|
2098 |
+
[2023-02-24 14:09:58,226][24688] Decorrelating experience for 96 frames...
|
2099 |
+
[2023-02-24 14:09:58,248][24701] Decorrelating experience for 96 frames...
|
2100 |
+
[2023-02-24 14:09:58,487][00980] Heartbeat connected on RolloutWorker_w5
|
2101 |
+
[2023-02-24 14:09:58,896][00980] Heartbeat connected on RolloutWorker_w3
|
2102 |
+
[2023-02-24 14:09:58,898][00980] Heartbeat connected on RolloutWorker_w7
|
2103 |
+
[2023-02-24 14:09:59,487][24703] Decorrelating experience for 96 frames...
|
2104 |
+
[2023-02-24 14:09:59,560][24681] Decorrelating experience for 32 frames...
|
2105 |
+
[2023-02-24 14:09:59,855][00980] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4014080. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2106 |
+
[2023-02-24 14:10:00,072][00980] Heartbeat connected on RolloutWorker_w6
|
2107 |
+
[2023-02-24 14:10:00,336][24690] Decorrelating experience for 96 frames...
|
2108 |
+
[2023-02-24 14:10:00,841][00980] Heartbeat connected on RolloutWorker_w2
|
2109 |
+
[2023-02-24 14:10:01,251][24693] Decorrelating experience for 96 frames...
|
2110 |
+
[2023-02-24 14:10:01,256][24680] Decorrelating experience for 96 frames...
|
2111 |
+
[2023-02-24 14:10:01,914][00980] Heartbeat connected on RolloutWorker_w0
|
2112 |
+
[2023-02-24 14:10:01,917][24681] Decorrelating experience for 64 frames...
|
2113 |
+
[2023-02-24 14:10:01,957][00980] Heartbeat connected on RolloutWorker_w4
|
2114 |
+
[2023-02-24 14:10:04,856][00980] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4014080. Throughput: 0: 116.8. Samples: 1752. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
2115 |
+
[2023-02-24 14:10:04,862][00980] Avg episode reward: [(0, '1.850')]
|
2116 |
+
[2023-02-24 14:10:04,999][24666] Signal inference workers to stop experience collection...
|
2117 |
+
[2023-02-24 14:10:05,017][24683] InferenceWorker_p0-w0: stopping experience collection
|
2118 |
+
[2023-02-24 14:10:05,324][24681] Decorrelating experience for 96 frames...
|
2119 |
+
[2023-02-24 14:10:05,455][00980] Heartbeat connected on RolloutWorker_w1
|
2120 |
+
[2023-02-24 14:10:07,790][24666] Signal inference workers to resume experience collection...
|
2121 |
+
[2023-02-24 14:10:07,790][24683] InferenceWorker_p0-w0: resuming experience collection
|
2122 |
+
[2023-02-24 14:10:07,796][24666] Stopping Batcher_0...
|
2123 |
+
[2023-02-24 14:10:07,798][24666] Loop batcher_evt_loop terminating...
|
2124 |
+
[2023-02-24 14:10:07,799][00980] Component Batcher_0 stopped!
|
2125 |
+
[2023-02-24 14:10:07,851][24703] Stopping RolloutWorker_w6...
|
2126 |
+
[2023-02-24 14:10:07,851][00980] Component RolloutWorker_w6 stopped!
|
2127 |
+
[2023-02-24 14:10:07,860][24693] Stopping RolloutWorker_w4...
|
2128 |
+
[2023-02-24 14:10:07,861][24693] Loop rollout_proc4_evt_loop terminating...
|
2129 |
+
[2023-02-24 14:10:07,854][24680] Stopping RolloutWorker_w0...
|
2130 |
+
[2023-02-24 14:10:07,863][24680] Loop rollout_proc0_evt_loop terminating...
|
2131 |
+
[2023-02-24 14:10:07,855][00980] Component RolloutWorker_w0 stopped!
|
2132 |
+
[2023-02-24 14:10:07,863][00980] Component RolloutWorker_w4 stopped!
|
2133 |
+
[2023-02-24 14:10:07,866][00980] Component RolloutWorker_w2 stopped!
|
2134 |
+
[2023-02-24 14:10:07,866][24690] Stopping RolloutWorker_w2...
|
2135 |
+
[2023-02-24 14:10:07,851][24703] Loop rollout_proc6_evt_loop terminating...
|
2136 |
+
[2023-02-24 14:10:07,870][24690] Loop rollout_proc2_evt_loop terminating...
|
2137 |
+
[2023-02-24 14:10:07,886][00980] Component RolloutWorker_w1 stopped!
|
2138 |
+
[2023-02-24 14:10:07,886][24681] Stopping RolloutWorker_w1...
|
2139 |
+
[2023-02-24 14:10:07,908][00980] Component RolloutWorker_w7 stopped!
|
2140 |
+
[2023-02-24 14:10:07,916][00980] Component RolloutWorker_w3 stopped!
|
2141 |
+
[2023-02-24 14:10:07,917][24688] Stopping RolloutWorker_w3...
|
2142 |
+
[2023-02-24 14:10:07,923][00980] Component RolloutWorker_w5 stopped!
|
2143 |
+
[2023-02-24 14:10:07,909][24701] Stopping RolloutWorker_w7...
|
2144 |
+
[2023-02-24 14:10:07,904][24681] Loop rollout_proc1_evt_loop terminating...
|
2145 |
+
[2023-02-24 14:10:07,924][24695] Stopping RolloutWorker_w5...
|
2146 |
+
[2023-02-24 14:10:07,922][24688] Loop rollout_proc3_evt_loop terminating...
|
2147 |
+
[2023-02-24 14:10:07,928][24701] Loop rollout_proc7_evt_loop terminating...
|
2148 |
+
[2023-02-24 14:10:07,930][24695] Loop rollout_proc5_evt_loop terminating...
|
2149 |
+
[2023-02-24 14:10:07,947][24683] Weights refcount: 2 0
|
2150 |
+
[2023-02-24 14:10:07,957][24683] Stopping InferenceWorker_p0-w0...
|
2151 |
+
[2023-02-24 14:10:07,957][00980] Component InferenceWorker_p0-w0 stopped!
|
2152 |
+
[2023-02-24 14:10:07,962][24683] Loop inference_proc0-0_evt_loop terminating...
|
2153 |
+
[2023-02-24 14:10:10,081][24666] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000982_4022272.pth...
|
2154 |
+
[2023-02-24 14:10:10,180][24666] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth
|
2155 |
+
[2023-02-24 14:10:10,186][24666] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000982_4022272.pth...
|
2156 |
+
[2023-02-24 14:10:10,319][24666] Stopping LearnerWorker_p0...
|
2157 |
+
[2023-02-24 14:10:10,320][24666] Loop learner_proc0_evt_loop terminating...
|
2158 |
+
[2023-02-24 14:10:10,321][00980] Component LearnerWorker_p0 stopped!
|
2159 |
+
[2023-02-24 14:10:10,323][00980] Waiting for process learner_proc0 to stop...
|
2160 |
+
[2023-02-24 14:10:11,370][00980] Waiting for process inference_proc0-0 to join...
|
2161 |
+
[2023-02-24 14:10:11,373][00980] Waiting for process rollout_proc0 to join...
|
2162 |
+
[2023-02-24 14:10:11,376][00980] Waiting for process rollout_proc1 to join...
|
2163 |
+
[2023-02-24 14:10:11,379][00980] Waiting for process rollout_proc2 to join...
|
2164 |
+
[2023-02-24 14:10:11,382][00980] Waiting for process rollout_proc3 to join...
|
2165 |
+
[2023-02-24 14:10:11,385][00980] Waiting for process rollout_proc4 to join...
|
2166 |
+
[2023-02-24 14:10:11,386][00980] Waiting for process rollout_proc5 to join...
|
2167 |
+
[2023-02-24 14:10:11,388][00980] Waiting for process rollout_proc6 to join...
|
2168 |
+
[2023-02-24 14:10:11,390][00980] Waiting for process rollout_proc7 to join...
|
2169 |
+
[2023-02-24 14:10:11,391][00980] Batcher 0 profile tree view:
|
2170 |
+
batching: 0.1455, releasing_batches: 0.0007
|
2171 |
+
[2023-02-24 14:10:11,392][00980] InferenceWorker_p0-w0 profile tree view:
|
2172 |
+
wait_policy: 0.0126
|
2173 |
+
wait_policy_total: 8.5993
|
2174 |
+
update_model: 0.0278
|
2175 |
+
weight_update: 0.0017
|
2176 |
+
one_step: 0.0564
|
2177 |
+
handle_policy_step: 4.2885
|
2178 |
+
deserialize: 0.0514, stack: 0.0076, obs_to_device_normalize: 0.4279, forward: 3.3584, send_messages: 0.1076
|
2179 |
+
prepare_outputs: 0.2513
|
2180 |
+
to_cpu: 0.1288
|
2181 |
+
[2023-02-24 14:10:11,393][00980] Learner 0 profile tree view:
|
2182 |
+
misc: 0.0000, prepare_batch: 5.3320
|
2183 |
+
train: 1.3646
|
2184 |
+
epoch_init: 0.0000, minibatch_init: 0.0000, losses_postprocess: 0.0003, kl_divergence: 0.0015, after_optimizer: 0.0074
|
2185 |
+
calculate_losses: 0.2007
|
2186 |
+
losses_init: 0.0000, forward_head: 0.1107, bptt_initial: 0.0672, tail: 0.0012, advantages_returns: 0.0008, losses: 0.0182
|
2187 |
+
bptt: 0.0023
|
2188 |
+
bptt_forward_core: 0.0021
|
2189 |
+
update: 1.1537
|
2190 |
+
clip: 0.0045
|
2191 |
+
[2023-02-24 14:10:11,395][00980] RolloutWorker_w0 profile tree view:
|
2192 |
+
wait_for_trajectories: 0.0015, enqueue_policy_requests: 0.6653, env_step: 1.8552, overhead: 0.1000, complete_rollouts: 0.0290
|
2193 |
+
save_policy_outputs: 0.1110
|
2194 |
+
split_output_tensors: 0.0652
|
2195 |
+
[2023-02-24 14:10:11,400][00980] RolloutWorker_w7 profile tree view:
|
2196 |
+
wait_for_trajectories: 0.0007, enqueue_policy_requests: 0.5677, env_step: 2.9069, overhead: 0.1583, complete_rollouts: 0.0367
|
2197 |
+
save_policy_outputs: 0.1729
|
2198 |
+
split_output_tensors: 0.1089
|
2199 |
+
[2023-02-24 14:10:11,401][00980] Loop Runner_EvtLoop terminating...
|
2200 |
+
[2023-02-24 14:10:11,403][00980] Runner profile tree view:
|
2201 |
+
main_loop: 38.5571
|
2202 |
+
[2023-02-24 14:10:11,405][00980] Collected {0: 4022272}, FPS: 212.5
|
2203 |
+
[2023-02-24 14:10:11,454][00980] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
2204 |
+
[2023-02-24 14:10:11,455][00980] Overriding arg 'num_workers' with value 1 passed from command line
|
2205 |
+
[2023-02-24 14:10:11,457][00980] Adding new argument 'no_render'=True that is not in the saved config file!
|
2206 |
+
[2023-02-24 14:10:11,459][00980] Adding new argument 'save_video'=True that is not in the saved config file!
|
2207 |
+
[2023-02-24 14:10:11,463][00980] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
2208 |
+
[2023-02-24 14:10:11,469][00980] Adding new argument 'video_name'=None that is not in the saved config file!
|
2209 |
+
[2023-02-24 14:10:11,472][00980] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
2210 |
+
[2023-02-24 14:10:11,473][00980] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
2211 |
+
[2023-02-24 14:10:11,475][00980] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
2212 |
+
[2023-02-24 14:10:11,476][00980] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
2213 |
+
[2023-02-24 14:10:11,478][00980] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
2214 |
+
[2023-02-24 14:10:11,479][00980] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
2215 |
+
[2023-02-24 14:10:11,480][00980] Adding new argument 'train_script'=None that is not in the saved config file!
|
2216 |
+
[2023-02-24 14:10:11,481][00980] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
2217 |
+
[2023-02-24 14:10:11,483][00980] Using frameskip 1 and render_action_repeat=4 for evaluation
|
2218 |
+
[2023-02-24 14:10:11,505][00980] RunningMeanStd input shape: (3, 72, 128)
|
2219 |
+
[2023-02-24 14:10:11,512][00980] RunningMeanStd input shape: (1,)
|
2220 |
+
[2023-02-24 14:10:11,531][00980] ConvEncoder: input_channels=3
|
2221 |
+
[2023-02-24 14:10:11,573][00980] Conv encoder output size: 512
|
2222 |
+
[2023-02-24 14:10:11,575][00980] Policy head output size: 512
|
2223 |
+
[2023-02-24 14:10:11,597][00980] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000982_4022272.pth...
|
2224 |
+
[2023-02-24 14:10:12,226][00980] Num frames 100...
|
2225 |
+
[2023-02-24 14:10:12,342][00980] Num frames 200...
|
2226 |
+
[2023-02-24 14:10:12,466][00980] Num frames 300...
|
2227 |
+
[2023-02-24 14:10:12,599][00980] Num frames 400...
|
2228 |
+
[2023-02-24 14:10:12,711][00980] Num frames 500...
|
2229 |
+
[2023-02-24 14:10:12,828][00980] Num frames 600...
|
2230 |
+
[2023-02-24 14:10:12,947][00980] Num frames 700...
|
2231 |
+
[2023-02-24 14:10:13,071][00980] Num frames 800...
|
2232 |
+
[2023-02-24 14:10:13,155][00980] Avg episode rewards: #0: 22.240, true rewards: #0: 8.240
|
2233 |
+
[2023-02-24 14:10:13,157][00980] Avg episode reward: 22.240, avg true_objective: 8.240
|
2234 |
+
[2023-02-24 14:10:13,254][00980] Num frames 900...
|
2235 |
+
[2023-02-24 14:10:13,391][00980] Num frames 1000...
|
2236 |
+
[2023-02-24 14:10:13,503][00980] Num frames 1100...
|
2237 |
+
[2023-02-24 14:10:13,625][00980] Num frames 1200...
|
2238 |
+
[2023-02-24 14:10:13,742][00980] Num frames 1300...
|
2239 |
+
[2023-02-24 14:10:13,869][00980] Num frames 1400...
|
2240 |
+
[2023-02-24 14:10:13,995][00980] Num frames 1500...
|
2241 |
+
[2023-02-24 14:10:14,112][00980] Num frames 1600...
|
2242 |
+
[2023-02-24 14:10:14,228][00980] Num frames 1700...
|
2243 |
+
[2023-02-24 14:10:14,350][00980] Num frames 1800...
|
2244 |
+
[2023-02-24 14:10:14,468][00980] Num frames 1900...
|
2245 |
+
[2023-02-24 14:10:14,612][00980] Avg episode rewards: #0: 23.880, true rewards: #0: 9.880
|
2246 |
+
[2023-02-24 14:10:14,614][00980] Avg episode reward: 23.880, avg true_objective: 9.880
|
2247 |
+
[2023-02-24 14:10:14,646][00980] Num frames 2000...
|
2248 |
+
[2023-02-24 14:10:14,768][00980] Num frames 2100...
|
2249 |
+
[2023-02-24 14:10:14,893][00980] Num frames 2200...
|
2250 |
+
[2023-02-24 14:10:14,946][00980] Avg episode rewards: #0: 17.000, true rewards: #0: 7.333
|
2251 |
+
[2023-02-24 14:10:14,948][00980] Avg episode reward: 17.000, avg true_objective: 7.333
|
2252 |
+
[2023-02-24 14:10:15,119][00980] Num frames 2300...
|
2253 |
+
[2023-02-24 14:10:15,316][00980] Num frames 2400...
|
2254 |
+
[2023-02-24 14:10:15,494][00980] Num frames 2500...
|
2255 |
+
[2023-02-24 14:10:15,671][00980] Num frames 2600...
|
2256 |
+
[2023-02-24 14:10:15,840][00980] Num frames 2700...
|
2257 |
+
[2023-02-24 14:10:16,019][00980] Num frames 2800...
|
2258 |
+
[2023-02-24 14:10:16,200][00980] Num frames 2900...
|
2259 |
+
[2023-02-24 14:10:16,366][00980] Num frames 3000...
|
2260 |
+
[2023-02-24 14:10:16,531][00980] Num frames 3100...
|
2261 |
+
[2023-02-24 14:10:16,699][00980] Num frames 3200...
|
2262 |
+
[2023-02-24 14:10:16,857][00980] Num frames 3300...
|
2263 |
+
[2023-02-24 14:10:17,023][00980] Num frames 3400...
|
2264 |
+
[2023-02-24 14:10:17,190][00980] Num frames 3500...
|
2265 |
+
[2023-02-24 14:10:17,357][00980] Num frames 3600...
|
2266 |
+
[2023-02-24 14:10:17,518][00980] Num frames 3700...
|
2267 |
+
[2023-02-24 14:10:17,635][00980] Avg episode rewards: #0: 22.590, true rewards: #0: 9.340
|
2268 |
+
[2023-02-24 14:10:17,636][00980] Avg episode reward: 22.590, avg true_objective: 9.340
|
2269 |
+
[2023-02-24 14:10:17,746][00980] Num frames 3800...
|
2270 |
+
[2023-02-24 14:10:17,911][00980] Num frames 3900...
|
2271 |
+
[2023-02-24 14:10:18,074][00980] Num frames 4000...
|
2272 |
+
[2023-02-24 14:10:18,236][00980] Num frames 4100...
|
2273 |
+
[2023-02-24 14:10:18,371][00980] Avg episode rewards: #0: 19.304, true rewards: #0: 8.304
|
2274 |
+
[2023-02-24 14:10:18,373][00980] Avg episode reward: 19.304, avg true_objective: 8.304
|
2275 |
+
[2023-02-24 14:10:18,451][00980] Num frames 4200...
|
2276 |
+
[2023-02-24 14:10:18,613][00980] Num frames 4300...
|
2277 |
+
[2023-02-24 14:10:18,785][00980] Num frames 4400...
|
2278 |
+
[2023-02-24 14:10:18,963][00980] Num frames 4500...
|
2279 |
+
[2023-02-24 14:10:19,145][00980] Num frames 4600...
|
2280 |
+
[2023-02-24 14:10:19,318][00980] Num frames 4700...
|
2281 |
+
[2023-02-24 14:10:19,493][00980] Num frames 4800...
|
2282 |
+
[2023-02-24 14:10:19,629][00980] Num frames 4900...
|
2283 |
+
[2023-02-24 14:10:19,750][00980] Num frames 5000...
|
2284 |
+
[2023-02-24 14:10:19,875][00980] Num frames 5100...
|
2285 |
+
[2023-02-24 14:10:19,993][00980] Num frames 5200...
|
2286 |
+
[2023-02-24 14:10:20,122][00980] Num frames 5300...
|
2287 |
+
[2023-02-24 14:10:20,241][00980] Num frames 5400...
|
2288 |
+
[2023-02-24 14:10:20,361][00980] Num frames 5500...
|
2289 |
+
[2023-02-24 14:10:20,480][00980] Num frames 5600...
|
2290 |
+
[2023-02-24 14:10:20,592][00980] Num frames 5700...
|
2291 |
+
[2023-02-24 14:10:20,709][00980] Num frames 5800...
|
2292 |
+
[2023-02-24 14:10:20,823][00980] Num frames 5900...
|
2293 |
+
[2023-02-24 14:10:20,893][00980] Avg episode rewards: #0: 23.187, true rewards: #0: 9.853
|
2294 |
+
[2023-02-24 14:10:20,895][00980] Avg episode reward: 23.187, avg true_objective: 9.853
|
2295 |
+
[2023-02-24 14:10:20,999][00980] Num frames 6000...
|
2296 |
+
[2023-02-24 14:10:21,065][00980] Avg episode rewards: #0: 20.154, true rewards: #0: 8.583
|
2297 |
+
[2023-02-24 14:10:21,066][00980] Avg episode reward: 20.154, avg true_objective: 8.583
|
2298 |
+
[2023-02-24 14:10:21,180][00980] Num frames 6100...
|
2299 |
+
[2023-02-24 14:10:21,305][00980] Num frames 6200...
|
2300 |
+
[2023-02-24 14:10:21,421][00980] Num frames 6300...
|
2301 |
+
[2023-02-24 14:10:21,544][00980] Num frames 6400...
|
2302 |
+
[2023-02-24 14:10:21,669][00980] Num frames 6500...
|
2303 |
+
[2023-02-24 14:10:21,781][00980] Num frames 6600...
|
2304 |
+
[2023-02-24 14:10:21,903][00980] Num frames 6700...
|
2305 |
+
[2023-02-24 14:10:22,023][00980] Num frames 6800...
|
2306 |
+
[2023-02-24 14:10:22,151][00980] Num frames 6900...
|
2307 |
+
[2023-02-24 14:10:22,270][00980] Num frames 7000...
|
2308 |
+
[2023-02-24 14:10:22,391][00980] Num frames 7100...
|
2309 |
+
[2023-02-24 14:10:22,520][00980] Avg episode rewards: #0: 20.951, true rewards: #0: 8.951
|
2310 |
+
[2023-02-24 14:10:22,521][00980] Avg episode reward: 20.951, avg true_objective: 8.951
|
2311 |
+
[2023-02-24 14:10:22,572][00980] Num frames 7200...
|
2312 |
+
[2023-02-24 14:10:22,691][00980] Num frames 7300...
|
2313 |
+
[2023-02-24 14:10:22,808][00980] Num frames 7400...
|
2314 |
+
[2023-02-24 14:10:22,931][00980] Num frames 7500...
|
2315 |
+
[2023-02-24 14:10:23,056][00980] Num frames 7600...
|
2316 |
+
[2023-02-24 14:10:23,189][00980] Num frames 7700...
|
2317 |
+
[2023-02-24 14:10:23,314][00980] Num frames 7800...
|
2318 |
+
[2023-02-24 14:10:23,440][00980] Num frames 7900...
|
2319 |
+
[2023-02-24 14:10:23,556][00980] Num frames 8000...
|
2320 |
+
[2023-02-24 14:10:23,682][00980] Num frames 8100...
|
2321 |
+
[2023-02-24 14:10:23,839][00980] Avg episode rewards: #0: 21.100, true rewards: #0: 9.100
|
2322 |
+
[2023-02-24 14:10:23,841][00980] Avg episode reward: 21.100, avg true_objective: 9.100
|
2323 |
+
[2023-02-24 14:10:23,857][00980] Num frames 8200...
|
2324 |
+
[2023-02-24 14:10:23,974][00980] Num frames 8300...
|
2325 |
+
[2023-02-24 14:10:24,100][00980] Num frames 8400...
|
2326 |
+
[2023-02-24 14:10:24,219][00980] Num frames 8500...
|
2327 |
+
[2023-02-24 14:10:24,333][00980] Num frames 8600...
|
2328 |
+
[2023-02-24 14:10:24,450][00980] Num frames 8700...
|
2329 |
+
[2023-02-24 14:10:24,566][00980] Num frames 8800...
|
2330 |
+
[2023-02-24 14:10:24,690][00980] Num frames 8900...
|
2331 |
+
[2023-02-24 14:10:24,807][00980] Num frames 9000...
|
2332 |
+
[2023-02-24 14:10:24,928][00980] Num frames 9100...
|
2333 |
+
[2023-02-24 14:10:25,045][00980] Num frames 9200...
|
2334 |
+
[2023-02-24 14:10:25,176][00980] Num frames 9300...
|
2335 |
+
[2023-02-24 14:10:25,295][00980] Num frames 9400...
|
2336 |
+
[2023-02-24 14:10:25,420][00980] Num frames 9500...
|
2337 |
+
[2023-02-24 14:10:25,546][00980] Num frames 9600...
|
2338 |
+
[2023-02-24 14:10:25,620][00980] Avg episode rewards: #0: 22.314, true rewards: #0: 9.614
|
2339 |
+
[2023-02-24 14:10:25,622][00980] Avg episode reward: 22.314, avg true_objective: 9.614
|
2340 |
+
[2023-02-24 14:11:23,996][00980] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
2341 |
+
[2023-02-24 14:11:24,026][00980] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
2342 |
+
[2023-02-24 14:11:24,027][00980] Overriding arg 'num_workers' with value 1 passed from command line
|
2343 |
+
[2023-02-24 14:11:24,028][00980] Adding new argument 'no_render'=True that is not in the saved config file!
|
2344 |
+
[2023-02-24 14:11:24,031][00980] Adding new argument 'save_video'=True that is not in the saved config file!
|
2345 |
+
[2023-02-24 14:11:24,033][00980] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
2346 |
+
[2023-02-24 14:11:24,035][00980] Adding new argument 'video_name'=None that is not in the saved config file!
|
2347 |
+
[2023-02-24 14:11:24,045][00980] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
2348 |
+
[2023-02-24 14:11:24,047][00980] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
2349 |
+
[2023-02-24 14:11:24,050][00980] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
2350 |
+
[2023-02-24 14:11:24,053][00980] Adding new argument 'hf_repository'='mnavas/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
2351 |
+
[2023-02-24 14:11:24,055][00980] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
2352 |
+
[2023-02-24 14:11:24,058][00980] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
2353 |
+
[2023-02-24 14:11:24,059][00980] Adding new argument 'train_script'=None that is not in the saved config file!
|
2354 |
+
[2023-02-24 14:11:24,060][00980] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
2355 |
+
[2023-02-24 14:11:24,062][00980] Using frameskip 1 and render_action_repeat=4 for evaluation
|
2356 |
+
[2023-02-24 14:11:24,081][00980] RunningMeanStd input shape: (3, 72, 128)
|
2357 |
+
[2023-02-24 14:11:24,083][00980] RunningMeanStd input shape: (1,)
|
2358 |
+
[2023-02-24 14:11:24,098][00980] ConvEncoder: input_channels=3
|
2359 |
+
[2023-02-24 14:11:24,135][00980] Conv encoder output size: 512
|
2360 |
+
[2023-02-24 14:11:24,139][00980] Policy head output size: 512
|
2361 |
+
[2023-02-24 14:11:24,159][00980] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000982_4022272.pth...
|
2362 |
+
[2023-02-24 14:11:24,602][00980] Num frames 100...
|
2363 |
+
[2023-02-24 14:11:24,723][00980] Num frames 200...
|
2364 |
+
[2023-02-24 14:11:24,864][00980] Avg episode rewards: #0: 5.760, true rewards: #0: 2.760
|
2365 |
+
[2023-02-24 14:11:24,866][00980] Avg episode reward: 5.760, avg true_objective: 2.760
|
2366 |
+
[2023-02-24 14:11:24,899][00980] Num frames 300...
|
2367 |
+
[2023-02-24 14:11:25,014][00980] Num frames 400...
|
2368 |
+
[2023-02-24 14:11:25,127][00980] Num frames 500...
|
2369 |
+
[2023-02-24 14:11:25,239][00980] Num frames 600...
|
2370 |
+
[2023-02-24 14:11:25,361][00980] Num frames 700...
|
2371 |
+
[2023-02-24 14:11:25,478][00980] Num frames 800...
|
2372 |
+
[2023-02-24 14:11:25,598][00980] Num frames 900...
|
2373 |
+
[2023-02-24 14:11:25,720][00980] Num frames 1000...
|
2374 |
+
[2023-02-24 14:11:25,780][00980] Avg episode rewards: #0: 11.015, true rewards: #0: 5.015
|
2375 |
+
[2023-02-24 14:11:25,781][00980] Avg episode reward: 11.015, avg true_objective: 5.015
|
2376 |
+
[2023-02-24 14:11:25,893][00980] Num frames 1100...
|
2377 |
+
[2023-02-24 14:11:26,017][00980] Num frames 1200...
|
2378 |
+
[2023-02-24 14:11:26,132][00980] Num frames 1300...
|
2379 |
+
[2023-02-24 14:11:26,254][00980] Num frames 1400...
|
2380 |
+
[2023-02-24 14:11:26,368][00980] Num frames 1500...
|
2381 |
+
[2023-02-24 14:11:26,484][00980] Num frames 1600...
|
2382 |
+
[2023-02-24 14:11:26,621][00980] Num frames 1700...
|
2383 |
+
[2023-02-24 14:11:26,803][00980] Num frames 1800...
|
2384 |
+
[2023-02-24 14:11:26,965][00980] Num frames 1900...
|
2385 |
+
[2023-02-24 14:11:27,126][00980] Num frames 2000...
|
2386 |
+
[2023-02-24 14:11:27,288][00980] Num frames 2100...
|
2387 |
+
[2023-02-24 14:11:27,438][00980] Avg episode rewards: #0: 15.183, true rewards: #0: 7.183
|
2388 |
+
[2023-02-24 14:11:27,441][00980] Avg episode reward: 15.183, avg true_objective: 7.183
|
2389 |
+
[2023-02-24 14:11:27,529][00980] Num frames 2200...
|
2390 |
+
[2023-02-24 14:11:27,688][00980] Num frames 2300...
|
2391 |
+
[2023-02-24 14:11:27,853][00980] Num frames 2400...
|
2392 |
+
[2023-02-24 14:11:28,016][00980] Num frames 2500...
|
2393 |
+
[2023-02-24 14:11:28,173][00980] Num frames 2600...
|
2394 |
+
[2023-02-24 14:11:28,336][00980] Num frames 2700...
|
2395 |
+
[2023-02-24 14:11:28,499][00980] Num frames 2800...
|
2396 |
+
[2023-02-24 14:11:28,668][00980] Num frames 2900...
|
2397 |
+
[2023-02-24 14:11:28,828][00980] Num frames 3000...
|
2398 |
+
[2023-02-24 14:11:29,003][00980] Num frames 3100...
|
2399 |
+
[2023-02-24 14:11:29,184][00980] Num frames 3200...
|
2400 |
+
[2023-02-24 14:11:29,363][00980] Num frames 3300...
|
2401 |
+
[2023-02-24 14:11:29,537][00980] Num frames 3400...
|
2402 |
+
[2023-02-24 14:11:29,710][00980] Num frames 3500...
|
2403 |
+
[2023-02-24 14:11:29,884][00980] Num frames 3600...
|
2404 |
+
[2023-02-24 14:11:30,060][00980] Num frames 3700...
|
2405 |
+
[2023-02-24 14:11:30,160][00980] Avg episode rewards: #0: 21.807, true rewards: #0: 9.307
|
2406 |
+
[2023-02-24 14:11:30,162][00980] Avg episode reward: 21.807, avg true_objective: 9.307
|
2407 |
+
[2023-02-24 14:11:30,272][00980] Num frames 3800...
|
2408 |
+
[2023-02-24 14:11:30,387][00980] Num frames 3900...
|
2409 |
+
[2023-02-24 14:11:30,504][00980] Num frames 4000...
|
2410 |
+
[2023-02-24 14:11:30,625][00980] Num frames 4100...
|
2411 |
+
[2023-02-24 14:11:30,746][00980] Num frames 4200...
|
2412 |
+
[2023-02-24 14:11:30,851][00980] Avg episode rewards: #0: 19.284, true rewards: #0: 8.484
|
2413 |
+
[2023-02-24 14:11:30,853][00980] Avg episode reward: 19.284, avg true_objective: 8.484
|
2414 |
+
[2023-02-24 14:11:30,922][00980] Num frames 4300...
|
2415 |
+
[2023-02-24 14:11:31,038][00980] Num frames 4400...
|
2416 |
+
[2023-02-24 14:11:31,164][00980] Num frames 4500...
|
2417 |
+
[2023-02-24 14:11:31,287][00980] Num frames 4600...
|
2418 |
+
[2023-02-24 14:11:31,406][00980] Num frames 4700...
|
2419 |
+
[2023-02-24 14:11:31,517][00980] Num frames 4800...
|
2420 |
+
[2023-02-24 14:11:31,633][00980] Num frames 4900...
|
2421 |
+
[2023-02-24 14:11:31,746][00980] Num frames 5000...
|
2422 |
+
[2023-02-24 14:11:31,851][00980] Avg episode rewards: #0: 19.237, true rewards: #0: 8.403
|
2423 |
+
[2023-02-24 14:11:31,853][00980] Avg episode reward: 19.237, avg true_objective: 8.403
|
2424 |
+
[2023-02-24 14:11:31,924][00980] Num frames 5100...
|
2425 |
+
[2023-02-24 14:11:32,046][00980] Num frames 5200...
|
2426 |
+
[2023-02-24 14:11:32,170][00980] Num frames 5300...
|
2427 |
+
[2023-02-24 14:11:32,286][00980] Num frames 5400...
|
2428 |
+
[2023-02-24 14:11:32,397][00980] Num frames 5500...
|
2429 |
+
[2023-02-24 14:11:32,512][00980] Avg episode rewards: #0: 17.506, true rewards: #0: 7.934
|
2430 |
+
[2023-02-24 14:11:32,513][00980] Avg episode reward: 17.506, avg true_objective: 7.934
|
2431 |
+
[2023-02-24 14:11:32,568][00980] Num frames 5600...
|
2432 |
+
[2023-02-24 14:11:32,687][00980] Num frames 5700...
|
2433 |
+
[2023-02-24 14:11:32,800][00980] Num frames 5800...
|
2434 |
+
[2023-02-24 14:11:32,920][00980] Num frames 5900...
|
2435 |
+
[2023-02-24 14:11:33,040][00980] Num frames 6000...
|
2436 |
+
[2023-02-24 14:11:33,157][00980] Num frames 6100...
|
2437 |
+
[2023-02-24 14:11:33,280][00980] Num frames 6200...
|
2438 |
+
[2023-02-24 14:11:33,441][00980] Avg episode rewards: #0: 17.363, true rewards: #0: 7.862
|
2439 |
+
[2023-02-24 14:11:33,442][00980] Avg episode reward: 17.363, avg true_objective: 7.862
|
2440 |
+
[2023-02-24 14:11:33,457][00980] Num frames 6300...
|
2441 |
+
[2023-02-24 14:11:33,571][00980] Num frames 6400...
|
2442 |
+
[2023-02-24 14:11:33,685][00980] Num frames 6500...
|
2443 |
+
[2023-02-24 14:11:33,807][00980] Num frames 6600...
|
2444 |
+
[2023-02-24 14:11:33,927][00980] Num frames 6700...
|
2445 |
+
[2023-02-24 14:11:34,099][00980] Avg episode rewards: #0: 16.440, true rewards: #0: 7.551
|
2446 |
+
[2023-02-24 14:11:34,102][00980] Avg episode reward: 16.440, avg true_objective: 7.551
|
2447 |
+
[2023-02-24 14:11:34,110][00980] Num frames 6800...
|
2448 |
+
[2023-02-24 14:11:34,224][00980] Num frames 6900...
|
2449 |
+
[2023-02-24 14:11:34,343][00980] Num frames 7000...
|
2450 |
+
[2023-02-24 14:11:34,457][00980] Num frames 7100...
|
2451 |
+
[2023-02-24 14:11:34,576][00980] Num frames 7200...
|
2452 |
+
[2023-02-24 14:11:34,691][00980] Num frames 7300...
|
2453 |
+
[2023-02-24 14:11:34,805][00980] Num frames 7400...
|
2454 |
+
[2023-02-24 14:11:34,926][00980] Num frames 7500...
|
2455 |
+
[2023-02-24 14:11:35,044][00980] Num frames 7600...
|
2456 |
+
[2023-02-24 14:11:35,165][00980] Num frames 7700...
|
2457 |
+
[2023-02-24 14:11:35,281][00980] Num frames 7800...
|
2458 |
+
[2023-02-24 14:11:35,399][00980] Num frames 7900...
|
2459 |
+
[2023-02-24 14:11:35,514][00980] Num frames 8000...
|
2460 |
+
[2023-02-24 14:11:35,643][00980] Num frames 8100...
|
2461 |
+
[2023-02-24 14:11:35,756][00980] Num frames 8200...
|
2462 |
+
[2023-02-24 14:11:35,872][00980] Num frames 8300...
|
2463 |
+
[2023-02-24 14:11:36,001][00980] Num frames 8400...
|
2464 |
+
[2023-02-24 14:11:36,126][00980] Num frames 8500...
|
2465 |
+
[2023-02-24 14:11:36,241][00980] Num frames 8600...
|
2466 |
+
[2023-02-24 14:11:36,360][00980] Num frames 8700...
|
2467 |
+
[2023-02-24 14:11:36,486][00980] Num frames 8800...
|
2468 |
+
[2023-02-24 14:11:36,650][00980] Avg episode rewards: #0: 20.196, true rewards: #0: 8.896
|
2469 |
+
[2023-02-24 14:11:36,652][00980] Avg episode reward: 20.196, avg true_objective: 8.896
|
2470 |
+
[2023-02-24 14:12:30,762][00980] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|