diff --git "a/sf_log.txt" "b/sf_log.txt" --- "a/sf_log.txt" +++ "b/sf_log.txt" @@ -1,17 +1,17 @@ -[2023-07-04 14:52:45,345][00220] Saving configuration to /content/train_dir/default_experiment/config.json... -[2023-07-04 14:52:45,348][00220] Rollout worker 0 uses device cpu -[2023-07-04 14:52:45,352][00220] Rollout worker 1 uses device cpu -[2023-07-04 14:52:45,355][00220] Rollout worker 2 uses device cpu -[2023-07-04 14:52:45,357][00220] Rollout worker 3 uses device cpu -[2023-07-04 14:52:45,358][00220] Rollout worker 4 uses device cpu -[2023-07-04 14:52:45,359][00220] Rollout worker 5 uses device cpu -[2023-07-04 14:52:45,363][00220] Rollout worker 6 uses device cpu -[2023-07-04 14:52:45,364][00220] Rollout worker 7 uses device cpu -[2023-07-04 14:52:45,565][00220] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:52:45,567][00220] InferenceWorker_p0-w0: min num requests: 2 -[2023-07-04 14:52:45,611][00220] Starting all processes... -[2023-07-04 14:52:45,614][00220] Starting process learner_proc0 -[2023-07-04 14:52:45,621][00220] EvtLoop [Runner_EvtLoop, process=main process 220] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=() +[2023-07-04 15:18:01,007][00468] Saving configuration to /content/train_dir/default_experiment/cfg.json... +[2023-07-04 15:18:01,012][00468] Rollout worker 0 uses device cpu +[2023-07-04 15:18:01,022][00468] Rollout worker 1 uses device cpu +[2023-07-04 15:18:01,023][00468] Rollout worker 2 uses device cpu +[2023-07-04 15:18:01,025][00468] Rollout worker 3 uses device cpu +[2023-07-04 15:18:01,030][00468] Rollout worker 4 uses device cpu +[2023-07-04 15:18:01,031][00468] Rollout worker 5 uses device cpu +[2023-07-04 15:18:01,036][00468] Rollout worker 6 uses device cpu +[2023-07-04 15:18:01,037][00468] Rollout worker 7 uses device cpu +[2023-07-04 15:18:01,231][00468] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:18:01,237][00468] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:18:01,287][00468] Starting all processes... +[2023-07-04 15:18:01,289][00468] Starting process learner_proc0 +[2023-07-04 15:18:01,295][00468] EvtLoop [Runner_EvtLoop, process=main process 468] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=() Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -34,8 +34,8 @@ Traceback (most recent call last): File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump ForkingPickler(file, protocol).dump(obj) TypeError: cannot pickle 'TLSBuffer' object -[2023-07-04 14:52:45,628][00220] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop -[2023-07-04 14:52:45,631][00220] Uncaught exception in Runner evt loop +[2023-07-04 15:18:01,303][00468] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop +[2023-07-04 15:18:01,304][00468] Uncaught exception in Runner evt loop Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run evt_loop_status = self.event_loop.exec() @@ -68,49 +68,975 @@ Traceback (most recent call last): File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump ForkingPickler(file, protocol).dump(obj) TypeError: cannot pickle 'TLSBuffer' object -[2023-07-04 14:52:45,636][00220] Runner profile tree view: -main_loop: 0.0248 -[2023-07-04 14:52:45,637][00220] Collected {}, FPS: 0.0 -[2023-07-04 14:56:08,351][13487] Saving configuration to /content/train_dir/default_experiment/cfg.json... -[2023-07-04 14:56:08,364][13487] Rollout worker 0 uses device cpu -[2023-07-04 14:56:08,368][13487] Rollout worker 1 uses device cpu -[2023-07-04 14:56:08,371][13487] Rollout worker 2 uses device cpu -[2023-07-04 14:56:08,378][13487] Rollout worker 3 uses device cpu -[2023-07-04 14:56:08,380][13487] Rollout worker 4 uses device cpu -[2023-07-04 14:56:08,381][13487] Rollout worker 5 uses device cpu -[2023-07-04 14:56:08,383][13487] Rollout worker 6 uses device cpu -[2023-07-04 14:56:08,384][13487] Rollout worker 7 uses device cpu -[2023-07-04 14:56:08,681][13487] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:56:08,686][13487] InferenceWorker_p0-w0: min num requests: 2 -[2023-07-04 14:56:08,755][13487] Starting all processes... -[2023-07-04 14:56:08,764][13487] Starting process learner_proc0 -[2023-07-04 14:56:08,849][13487] Starting all processes... -[2023-07-04 14:56:08,965][13487] Starting process inference_proc0-0 -[2023-07-04 14:56:08,971][13487] Starting process rollout_proc0 -[2023-07-04 14:56:08,971][13487] Starting process rollout_proc1 -[2023-07-04 14:56:08,971][13487] Starting process rollout_proc2 -[2023-07-04 14:56:08,971][13487] Starting process rollout_proc3 -[2023-07-04 14:56:08,972][13487] Starting process rollout_proc4 -[2023-07-04 14:56:08,972][13487] Starting process rollout_proc5 -[2023-07-04 14:56:08,972][13487] Starting process rollout_proc6 -[2023-07-04 14:56:08,972][13487] Starting process rollout_proc7 -[2023-07-04 14:56:22,840][13825] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:56:22,855][13825] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 -[2023-07-04 14:56:22,937][13825] Num visible devices: 1 -[2023-07-04 14:56:22,984][13825] Starting seed is not provided -[2023-07-04 14:56:22,984][13825] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:56:22,984][13825] Initializing actor-critic model on device cuda:0 -[2023-07-04 14:56:22,985][13825] RunningMeanStd input shape: (3, 72, 128) -[2023-07-04 14:56:22,988][13825] RunningMeanStd input shape: (1,) -[2023-07-04 14:56:23,216][13825] ConvEncoder: input_channels=3 -[2023-07-04 14:56:24,689][13838] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:56:24,689][13838] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 -[2023-07-04 14:56:24,866][13838] Num visible devices: 1 -[2023-07-04 14:56:25,312][13825] Conv encoder output size: 512 -[2023-07-04 14:56:25,328][13825] Policy head output size: 512 -[2023-07-04 14:56:25,504][13846] Worker 7 uses CPU cores [1] -[2023-07-04 14:56:25,522][13825] Created Actor Critic model with architecture: -[2023-07-04 14:56:25,533][13825] ActorCriticSharedWeights( +[2023-07-04 15:18:01,307][00468] Runner profile tree view: +main_loop: 0.0203 +[2023-07-04 15:18:01,310][00468] Collected {}, FPS: 0.0 +[2023-07-04 15:18:20,039][00468] Environment doom_basic already registered, overwriting... +[2023-07-04 15:18:20,041][00468] Environment doom_two_colors_easy already registered, overwriting... +[2023-07-04 15:18:20,043][00468] Environment doom_two_colors_hard already registered, overwriting... +[2023-07-04 15:18:20,044][00468] Environment doom_dm already registered, overwriting... +[2023-07-04 15:18:20,046][00468] Environment doom_dwango5 already registered, overwriting... +[2023-07-04 15:18:20,047][00468] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-07-04 15:18:20,049][00468] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-07-04 15:18:20,050][00468] Environment doom_my_way_home already registered, overwriting... +[2023-07-04 15:18:20,051][00468] Environment doom_deadly_corridor already registered, overwriting... +[2023-07-04 15:18:20,052][00468] Environment doom_defend_the_center already registered, overwriting... +[2023-07-04 15:18:20,053][00468] Environment doom_defend_the_line already registered, overwriting... +[2023-07-04 15:18:20,054][00468] Environment doom_health_gathering already registered, overwriting... +[2023-07-04 15:18:20,056][00468] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-07-04 15:18:20,057][00468] Environment doom_battle already registered, overwriting... +[2023-07-04 15:18:20,058][00468] Environment doom_battle2 already registered, overwriting... +[2023-07-04 15:18:20,059][00468] Environment doom_duel_bots already registered, overwriting... +[2023-07-04 15:18:20,060][00468] Environment doom_deathmatch_bots already registered, overwriting... +[2023-07-04 15:18:20,061][00468] Environment doom_duel already registered, overwriting... +[2023-07-04 15:18:20,062][00468] Environment doom_deathmatch_full already registered, overwriting... +[2023-07-04 15:18:20,064][00468] Environment doom_benchmark already registered, overwriting... +[2023-07-04 15:18:20,065][00468] register_encoder_factory: +[2023-07-04 15:18:20,087][00468] Loading existing experiment configuration from /content/train_dir/default_experiment/cfg.json +[2023-07-04 15:18:20,099][00468] Experiment dir /content/train_dir/default_experiment already exists! +[2023-07-04 15:18:20,100][00468] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-07-04 15:18:20,101][00468] Weights and Biases integration disabled +[2023-07-04 15:18:20,104][00468] Environment var CUDA_VISIBLE_DEVICES is 0 + +[2023-07-04 15:18:21,502][00468] Starting experiment with the following configuration: +help=False +algo=APPO +env=doom_health_gathering_supreme +experiment=default_experiment +train_dir=/content/train_dir +restart_behavior=resume +device=gpu +seed=None +num_policies=1 +async_rl=True +serial_mode=False +batched_sampling=False +num_batches_to_accumulate=2 +worker_num_splits=2 +policy_workers_per_policy=1 +max_policy_lag=1000 +num_workers=8 +num_envs_per_worker=4 +batch_size=1024 +num_batches_per_epoch=1 +num_epochs=1 +rollout=32 +recurrence=32 +shuffle_minibatches=False +gamma=0.99 +reward_scale=1.0 +reward_clip=1000.0 +value_bootstrap=False +normalize_returns=True +exploration_loss_coeff=0.001 +value_loss_coeff=0.5 +kl_loss_coeff=0.0 +exploration_loss=symmetric_kl +gae_lambda=0.95 +ppo_clip_ratio=0.1 +ppo_clip_value=0.2 +with_vtrace=False +vtrace_rho=1.0 +vtrace_c=1.0 +optimizer=adam +adam_eps=1e-06 +adam_beta1=0.9 +adam_beta2=0.999 +max_grad_norm=4.0 +learning_rate=0.0001 +lr_schedule=constant +lr_schedule_kl_threshold=0.008 +obs_subtract_mean=0.0 +obs_scale=255.0 +normalize_input=True +normalize_input_keys=None +decorrelate_experience_max_seconds=0 +decorrelate_envs_on_one_worker=True +actor_worker_gpus=[] +set_workers_cpu_affinity=True +force_envs_single_thread=False +default_niceness=0 +log_to_file=True +experiment_summaries_interval=10 +flush_summaries_interval=30 +stats_avg=100 +summaries_use_frameskip=True +heartbeat_interval=20 +heartbeat_reporting_interval=600 +train_for_env_steps=4000000 +train_for_seconds=10000000000 +save_every_sec=120 +keep_checkpoints=2 +load_checkpoint_kind=latest +save_milestones_sec=-1 +save_best_every_sec=5 +save_best_metric=reward +save_best_after=100000 +benchmark=False +encoder_mlp_layers=[512, 512] +encoder_conv_architecture=convnet_simple +encoder_conv_mlp_layers=[512] +use_rnn=True +rnn_size=512 +rnn_type=gru +rnn_num_layers=1 +decoder_mlp_layers=[] +nonlinearity=elu +policy_initialization=orthogonal +policy_init_gain=1.0 +actor_critic_share_weights=True +adaptive_stddev=True +continuous_tanh_scale=0.0 +initial_stddev=1.0 +use_env_info_cache=False +env_gpu_actions=False +env_gpu_observations=True +env_frameskip=4 +env_framestack=1 +pixel_format=CHW +use_record_episode_statistics=False +with_wandb=False +wandb_user=None +wandb_project=sample_factory +wandb_group=None +wandb_job_type=SF +wandb_tags=[] +with_pbt=False +pbt_mix_policies_in_one_env=True +pbt_period_env_steps=5000000 +pbt_start_mutation=20000000 +pbt_replace_fraction=0.3 +pbt_mutation_rate=0.15 +pbt_replace_reward_gap=0.1 +pbt_replace_reward_gap_absolute=1e-06 +pbt_optimize_gamma=False +pbt_target_objective=true_objective +pbt_perturb_min=1.1 +pbt_perturb_max=1.5 +num_agents=-1 +num_humans=0 +num_bots=-1 +start_bot_difficulty=None +timelimit=None +res_w=128 +res_h=72 +wide_aspect_ratio=False +eval_env_frameskip=1 +fps=35 +command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000 +cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000} +git_hash=unknown +git_repo_name=not a git repository +train_script=.usr.local.lib.python3.10.dist-packages.ipykernel_launcher +[2023-07-04 15:18:21,505][00468] Saving configuration to /content/train_dir/default_experiment/cfg.json... +[2023-07-04 15:18:21,507][00468] Rollout worker 0 uses device cpu +[2023-07-04 15:18:21,510][00468] Rollout worker 1 uses device cpu +[2023-07-04 15:18:21,512][00468] Rollout worker 2 uses device cpu +[2023-07-04 15:18:21,514][00468] Rollout worker 3 uses device cpu +[2023-07-04 15:18:21,515][00468] Rollout worker 4 uses device cpu +[2023-07-04 15:18:21,517][00468] Rollout worker 5 uses device cpu +[2023-07-04 15:18:21,518][00468] Rollout worker 6 uses device cpu +[2023-07-04 15:18:21,519][00468] Rollout worker 7 uses device cpu +[2023-07-04 15:18:21,647][00468] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:18:21,649][00468] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:18:21,683][00468] Starting all processes... +[2023-07-04 15:18:21,684][00468] Starting process learner_proc0 +[2023-07-04 15:18:21,690][00468] EvtLoop [Runner_EvtLoop, process=main process 468] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:18:21,692][00468] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop +[2023-07-04 15:18:21,696][00468] Uncaught exception in Runner evt loop +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run + evt_loop_status = self.event_loop.exec() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 403, in exec + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 399, in exec + while self._loop_iteration(): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 383, in _loop_iteration + self._process_signal(s) + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 358, in _process_signal + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:18:21,698][00468] Runner profile tree view: +main_loop: 0.0147 +[2023-07-04 15:18:21,699][00468] Collected {}, FPS: 0.0 +[2023-07-04 15:18:34,241][00468] Environment doom_basic already registered, overwriting... +[2023-07-04 15:18:34,244][00468] Environment doom_two_colors_easy already registered, overwriting... +[2023-07-04 15:18:34,245][00468] Environment doom_two_colors_hard already registered, overwriting... +[2023-07-04 15:18:34,251][00468] Environment doom_dm already registered, overwriting... +[2023-07-04 15:18:34,252][00468] Environment doom_dwango5 already registered, overwriting... +[2023-07-04 15:18:34,253][00468] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-07-04 15:18:34,255][00468] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-07-04 15:18:34,258][00468] Environment doom_my_way_home already registered, overwriting... +[2023-07-04 15:18:34,259][00468] Environment doom_deadly_corridor already registered, overwriting... +[2023-07-04 15:18:34,260][00468] Environment doom_defend_the_center already registered, overwriting... +[2023-07-04 15:18:34,261][00468] Environment doom_defend_the_line already registered, overwriting... +[2023-07-04 15:18:34,263][00468] Environment doom_health_gathering already registered, overwriting... +[2023-07-04 15:18:34,264][00468] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-07-04 15:18:34,265][00468] Environment doom_battle already registered, overwriting... +[2023-07-04 15:18:34,266][00468] Environment doom_battle2 already registered, overwriting... +[2023-07-04 15:18:34,267][00468] Environment doom_duel_bots already registered, overwriting... +[2023-07-04 15:18:34,268][00468] Environment doom_deathmatch_bots already registered, overwriting... +[2023-07-04 15:18:34,270][00468] Environment doom_duel already registered, overwriting... +[2023-07-04 15:18:34,271][00468] Environment doom_deathmatch_full already registered, overwriting... +[2023-07-04 15:18:34,272][00468] Environment doom_benchmark already registered, overwriting... +[2023-07-04 15:18:34,273][00468] register_encoder_factory: +[2023-07-04 15:18:34,300][00468] Loading existing experiment configuration from /content/train_dir/default_experiment/cfg.json +[2023-07-04 15:18:34,305][00468] Experiment dir /content/train_dir/default_experiment already exists! +[2023-07-04 15:18:34,309][00468] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-07-04 15:18:34,310][00468] Weights and Biases integration disabled +[2023-07-04 15:18:34,313][00468] Environment var CUDA_VISIBLE_DEVICES is 0 + +[2023-07-04 15:18:35,720][00468] Starting experiment with the following configuration: +help=False +algo=APPO +env=doom_health_gathering_supreme +experiment=default_experiment +train_dir=/content/train_dir +restart_behavior=resume +device=gpu +seed=None +num_policies=1 +async_rl=True +serial_mode=False +batched_sampling=False +num_batches_to_accumulate=2 +worker_num_splits=2 +policy_workers_per_policy=1 +max_policy_lag=1000 +num_workers=8 +num_envs_per_worker=4 +batch_size=1024 +num_batches_per_epoch=1 +num_epochs=1 +rollout=32 +recurrence=32 +shuffle_minibatches=False +gamma=0.99 +reward_scale=1.0 +reward_clip=1000.0 +value_bootstrap=False +normalize_returns=True +exploration_loss_coeff=0.001 +value_loss_coeff=0.5 +kl_loss_coeff=0.0 +exploration_loss=symmetric_kl +gae_lambda=0.95 +ppo_clip_ratio=0.1 +ppo_clip_value=0.2 +with_vtrace=False +vtrace_rho=1.0 +vtrace_c=1.0 +optimizer=adam +adam_eps=1e-06 +adam_beta1=0.9 +adam_beta2=0.999 +max_grad_norm=4.0 +learning_rate=0.0001 +lr_schedule=constant +lr_schedule_kl_threshold=0.008 +obs_subtract_mean=0.0 +obs_scale=255.0 +normalize_input=True +normalize_input_keys=None +decorrelate_experience_max_seconds=0 +decorrelate_envs_on_one_worker=True +actor_worker_gpus=[] +set_workers_cpu_affinity=True +force_envs_single_thread=False +default_niceness=0 +log_to_file=True +experiment_summaries_interval=10 +flush_summaries_interval=30 +stats_avg=100 +summaries_use_frameskip=True +heartbeat_interval=20 +heartbeat_reporting_interval=600 +train_for_env_steps=4000000 +train_for_seconds=10000000000 +save_every_sec=120 +keep_checkpoints=2 +load_checkpoint_kind=latest +save_milestones_sec=-1 +save_best_every_sec=5 +save_best_metric=reward +save_best_after=100000 +benchmark=False +encoder_mlp_layers=[512, 512] +encoder_conv_architecture=convnet_simple +encoder_conv_mlp_layers=[512] +use_rnn=True +rnn_size=512 +rnn_type=gru +rnn_num_layers=1 +decoder_mlp_layers=[] +nonlinearity=elu +policy_initialization=orthogonal +policy_init_gain=1.0 +actor_critic_share_weights=True +adaptive_stddev=True +continuous_tanh_scale=0.0 +initial_stddev=1.0 +use_env_info_cache=False +env_gpu_actions=False +env_gpu_observations=True +env_frameskip=4 +env_framestack=1 +pixel_format=CHW +use_record_episode_statistics=False +with_wandb=False +wandb_user=None +wandb_project=sample_factory +wandb_group=None +wandb_job_type=SF +wandb_tags=[] +with_pbt=False +pbt_mix_policies_in_one_env=True +pbt_period_env_steps=5000000 +pbt_start_mutation=20000000 +pbt_replace_fraction=0.3 +pbt_mutation_rate=0.15 +pbt_replace_reward_gap=0.1 +pbt_replace_reward_gap_absolute=1e-06 +pbt_optimize_gamma=False +pbt_target_objective=true_objective +pbt_perturb_min=1.1 +pbt_perturb_max=1.5 +num_agents=-1 +num_humans=0 +num_bots=-1 +start_bot_difficulty=None +timelimit=None +res_w=128 +res_h=72 +wide_aspect_ratio=False +eval_env_frameskip=1 +fps=35 +command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000 +cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000} +git_hash=unknown +git_repo_name=not a git repository +train_script=.usr.local.lib.python3.10.dist-packages.ipykernel_launcher +[2023-07-04 15:18:35,721][00468] Saving configuration to /content/train_dir/default_experiment/cfg.json... +[2023-07-04 15:18:35,728][00468] Rollout worker 0 uses device cpu +[2023-07-04 15:18:35,729][00468] Rollout worker 1 uses device cpu +[2023-07-04 15:18:35,731][00468] Rollout worker 2 uses device cpu +[2023-07-04 15:18:35,732][00468] Rollout worker 3 uses device cpu +[2023-07-04 15:18:35,734][00468] Rollout worker 4 uses device cpu +[2023-07-04 15:18:35,736][00468] Rollout worker 5 uses device cpu +[2023-07-04 15:18:35,737][00468] Rollout worker 6 uses device cpu +[2023-07-04 15:18:35,739][00468] Rollout worker 7 uses device cpu +[2023-07-04 15:18:35,866][00468] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:18:35,868][00468] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:18:35,898][00468] Starting all processes... +[2023-07-04 15:18:35,901][00468] Starting process learner_proc0 +[2023-07-04 15:18:35,905][00468] EvtLoop [Runner_EvtLoop, process=main process 468] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:18:35,907][00468] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop +[2023-07-04 15:18:35,910][00468] Uncaught exception in Runner evt loop +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run + evt_loop_status = self.event_loop.exec() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 403, in exec + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 399, in exec + while self._loop_iteration(): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 383, in _loop_iteration + self._process_signal(s) + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 358, in _process_signal + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:18:35,915][00468] Runner profile tree view: +main_loop: 0.0170 +[2023-07-04 15:18:35,916][00468] Collected {}, FPS: 0.0 +[2023-07-04 15:18:39,760][00468] Environment doom_basic already registered, overwriting... +[2023-07-04 15:18:39,763][00468] Environment doom_two_colors_easy already registered, overwriting... +[2023-07-04 15:18:39,765][00468] Environment doom_two_colors_hard already registered, overwriting... +[2023-07-04 15:18:39,769][00468] Environment doom_dm already registered, overwriting... +[2023-07-04 15:18:39,770][00468] Environment doom_dwango5 already registered, overwriting... +[2023-07-04 15:18:39,772][00468] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-07-04 15:18:39,773][00468] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-07-04 15:18:39,774][00468] Environment doom_my_way_home already registered, overwriting... +[2023-07-04 15:18:39,775][00468] Environment doom_deadly_corridor already registered, overwriting... +[2023-07-04 15:18:39,776][00468] Environment doom_defend_the_center already registered, overwriting... +[2023-07-04 15:18:39,778][00468] Environment doom_defend_the_line already registered, overwriting... +[2023-07-04 15:18:39,778][00468] Environment doom_health_gathering already registered, overwriting... +[2023-07-04 15:18:39,779][00468] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-07-04 15:18:39,780][00468] Environment doom_battle already registered, overwriting... +[2023-07-04 15:18:39,781][00468] Environment doom_battle2 already registered, overwriting... +[2023-07-04 15:18:39,782][00468] Environment doom_duel_bots already registered, overwriting... +[2023-07-04 15:18:39,783][00468] Environment doom_deathmatch_bots already registered, overwriting... +[2023-07-04 15:18:39,785][00468] Environment doom_duel already registered, overwriting... +[2023-07-04 15:18:39,786][00468] Environment doom_deathmatch_full already registered, overwriting... +[2023-07-04 15:18:39,787][00468] Environment doom_benchmark already registered, overwriting... +[2023-07-04 15:18:39,788][00468] register_encoder_factory: +[2023-07-04 15:18:39,818][00468] Loading existing experiment configuration from /content/train_dir/default_experiment/cfg.json +[2023-07-04 15:18:39,828][00468] Experiment dir /content/train_dir/default_experiment already exists! +[2023-07-04 15:18:39,835][00468] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-07-04 15:18:39,837][00468] Weights and Biases integration disabled +[2023-07-04 15:18:39,841][00468] Environment var CUDA_VISIBLE_DEVICES is 0 + +[2023-07-04 15:18:41,653][00468] Starting experiment with the following configuration: +help=False +algo=APPO +env=doom_health_gathering_supreme +experiment=default_experiment +train_dir=/content/train_dir +restart_behavior=resume +device=gpu +seed=None +num_policies=1 +async_rl=True +serial_mode=False +batched_sampling=False +num_batches_to_accumulate=2 +worker_num_splits=2 +policy_workers_per_policy=1 +max_policy_lag=1000 +num_workers=8 +num_envs_per_worker=4 +batch_size=1024 +num_batches_per_epoch=1 +num_epochs=1 +rollout=32 +recurrence=32 +shuffle_minibatches=False +gamma=0.99 +reward_scale=1.0 +reward_clip=1000.0 +value_bootstrap=False +normalize_returns=True +exploration_loss_coeff=0.001 +value_loss_coeff=0.5 +kl_loss_coeff=0.0 +exploration_loss=symmetric_kl +gae_lambda=0.95 +ppo_clip_ratio=0.1 +ppo_clip_value=0.2 +with_vtrace=False +vtrace_rho=1.0 +vtrace_c=1.0 +optimizer=adam +adam_eps=1e-06 +adam_beta1=0.9 +adam_beta2=0.999 +max_grad_norm=4.0 +learning_rate=0.0001 +lr_schedule=constant +lr_schedule_kl_threshold=0.008 +obs_subtract_mean=0.0 +obs_scale=255.0 +normalize_input=True +normalize_input_keys=None +decorrelate_experience_max_seconds=0 +decorrelate_envs_on_one_worker=True +actor_worker_gpus=[] +set_workers_cpu_affinity=True +force_envs_single_thread=False +default_niceness=0 +log_to_file=True +experiment_summaries_interval=10 +flush_summaries_interval=30 +stats_avg=100 +summaries_use_frameskip=True +heartbeat_interval=20 +heartbeat_reporting_interval=600 +train_for_env_steps=4000000 +train_for_seconds=10000000000 +save_every_sec=120 +keep_checkpoints=2 +load_checkpoint_kind=latest +save_milestones_sec=-1 +save_best_every_sec=5 +save_best_metric=reward +save_best_after=100000 +benchmark=False +encoder_mlp_layers=[512, 512] +encoder_conv_architecture=convnet_simple +encoder_conv_mlp_layers=[512] +use_rnn=True +rnn_size=512 +rnn_type=gru +rnn_num_layers=1 +decoder_mlp_layers=[] +nonlinearity=elu +policy_initialization=orthogonal +policy_init_gain=1.0 +actor_critic_share_weights=True +adaptive_stddev=True +continuous_tanh_scale=0.0 +initial_stddev=1.0 +use_env_info_cache=False +env_gpu_actions=False +env_gpu_observations=True +env_frameskip=4 +env_framestack=1 +pixel_format=CHW +use_record_episode_statistics=False +with_wandb=False +wandb_user=None +wandb_project=sample_factory +wandb_group=None +wandb_job_type=SF +wandb_tags=[] +with_pbt=False +pbt_mix_policies_in_one_env=True +pbt_period_env_steps=5000000 +pbt_start_mutation=20000000 +pbt_replace_fraction=0.3 +pbt_mutation_rate=0.15 +pbt_replace_reward_gap=0.1 +pbt_replace_reward_gap_absolute=1e-06 +pbt_optimize_gamma=False +pbt_target_objective=true_objective +pbt_perturb_min=1.1 +pbt_perturb_max=1.5 +num_agents=-1 +num_humans=0 +num_bots=-1 +start_bot_difficulty=None +timelimit=None +res_w=128 +res_h=72 +wide_aspect_ratio=False +eval_env_frameskip=1 +fps=35 +command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000 +cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000} +git_hash=unknown +git_repo_name=not a git repository +train_script=.usr.local.lib.python3.10.dist-packages.ipykernel_launcher +[2023-07-04 15:18:41,654][00468] Saving configuration to /content/train_dir/default_experiment/cfg.json... +[2023-07-04 15:18:41,661][00468] Rollout worker 0 uses device cpu +[2023-07-04 15:18:41,662][00468] Rollout worker 1 uses device cpu +[2023-07-04 15:18:41,664][00468] Rollout worker 2 uses device cpu +[2023-07-04 15:18:41,666][00468] Rollout worker 3 uses device cpu +[2023-07-04 15:18:41,671][00468] Rollout worker 4 uses device cpu +[2023-07-04 15:18:41,673][00468] Rollout worker 5 uses device cpu +[2023-07-04 15:18:41,675][00468] Rollout worker 6 uses device cpu +[2023-07-04 15:18:41,676][00468] Rollout worker 7 uses device cpu +[2023-07-04 15:18:41,799][00468] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:18:41,801][00468] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:18:41,830][00468] Starting all processes... +[2023-07-04 15:18:41,831][00468] Starting process learner_proc0 +[2023-07-04 15:18:41,838][00468] EvtLoop [Runner_EvtLoop, process=main process 468] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:18:41,840][00468] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop +[2023-07-04 15:18:41,842][00468] Uncaught exception in Runner evt loop +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run + evt_loop_status = self.event_loop.exec() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 403, in exec + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 399, in exec + while self._loop_iteration(): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 383, in _loop_iteration + self._process_signal(s) + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 358, in _process_signal + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:18:41,845][00468] Runner profile tree view: +main_loop: 0.0146 +[2023-07-04 15:18:41,847][00468] Collected {}, FPS: 0.0 +[2023-07-04 15:19:10,892][00468] Environment doom_basic already registered, overwriting... +[2023-07-04 15:19:10,905][00468] Environment doom_two_colors_easy already registered, overwriting... +[2023-07-04 15:19:10,910][00468] Environment doom_two_colors_hard already registered, overwriting... +[2023-07-04 15:19:10,917][00468] Environment doom_dm already registered, overwriting... +[2023-07-04 15:19:10,920][00468] Environment doom_dwango5 already registered, overwriting... +[2023-07-04 15:19:10,921][00468] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-07-04 15:19:10,924][00468] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-07-04 15:19:10,927][00468] Environment doom_my_way_home already registered, overwriting... +[2023-07-04 15:19:10,929][00468] Environment doom_deadly_corridor already registered, overwriting... +[2023-07-04 15:19:10,932][00468] Environment doom_defend_the_center already registered, overwriting... +[2023-07-04 15:19:10,935][00468] Environment doom_defend_the_line already registered, overwriting... +[2023-07-04 15:19:10,944][00468] Environment doom_health_gathering already registered, overwriting... +[2023-07-04 15:19:10,945][00468] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-07-04 15:19:10,949][00468] Environment doom_battle already registered, overwriting... +[2023-07-04 15:19:10,950][00468] Environment doom_battle2 already registered, overwriting... +[2023-07-04 15:19:10,957][00468] Environment doom_duel_bots already registered, overwriting... +[2023-07-04 15:19:10,958][00468] Environment doom_deathmatch_bots already registered, overwriting... +[2023-07-04 15:19:10,959][00468] Environment doom_duel already registered, overwriting... +[2023-07-04 15:19:10,965][00468] Environment doom_deathmatch_full already registered, overwriting... +[2023-07-04 15:19:10,966][00468] Environment doom_benchmark already registered, overwriting... +[2023-07-04 15:19:10,968][00468] register_encoder_factory: +[2023-07-04 15:19:11,035][00468] Loading existing experiment configuration from /content/train_dir/default_experiment/cfg.json +[2023-07-04 15:19:11,041][00468] Experiment dir /content/train_dir/default_experiment already exists! +[2023-07-04 15:19:11,045][00468] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-07-04 15:19:11,050][00468] Weights and Biases integration disabled +[2023-07-04 15:19:11,056][00468] Environment var CUDA_VISIBLE_DEVICES is 0 + +[2023-07-04 15:19:13,557][00468] Starting experiment with the following configuration: +help=False +algo=APPO +env=doom_health_gathering_supreme +experiment=default_experiment +train_dir=/content/train_dir +restart_behavior=resume +device=gpu +seed=None +num_policies=1 +async_rl=True +serial_mode=False +batched_sampling=False +num_batches_to_accumulate=2 +worker_num_splits=2 +policy_workers_per_policy=1 +max_policy_lag=1000 +num_workers=8 +num_envs_per_worker=4 +batch_size=1024 +num_batches_per_epoch=1 +num_epochs=1 +rollout=32 +recurrence=32 +shuffle_minibatches=False +gamma=0.99 +reward_scale=1.0 +reward_clip=1000.0 +value_bootstrap=False +normalize_returns=True +exploration_loss_coeff=0.001 +value_loss_coeff=0.5 +kl_loss_coeff=0.0 +exploration_loss=symmetric_kl +gae_lambda=0.95 +ppo_clip_ratio=0.1 +ppo_clip_value=0.2 +with_vtrace=False +vtrace_rho=1.0 +vtrace_c=1.0 +optimizer=adam +adam_eps=1e-06 +adam_beta1=0.9 +adam_beta2=0.999 +max_grad_norm=4.0 +learning_rate=0.0001 +lr_schedule=constant +lr_schedule_kl_threshold=0.008 +obs_subtract_mean=0.0 +obs_scale=255.0 +normalize_input=True +normalize_input_keys=None +decorrelate_experience_max_seconds=0 +decorrelate_envs_on_one_worker=True +actor_worker_gpus=[] +set_workers_cpu_affinity=True +force_envs_single_thread=False +default_niceness=0 +log_to_file=True +experiment_summaries_interval=10 +flush_summaries_interval=30 +stats_avg=100 +summaries_use_frameskip=True +heartbeat_interval=20 +heartbeat_reporting_interval=600 +train_for_env_steps=4000000 +train_for_seconds=10000000000 +save_every_sec=120 +keep_checkpoints=2 +load_checkpoint_kind=latest +save_milestones_sec=-1 +save_best_every_sec=5 +save_best_metric=reward +save_best_after=100000 +benchmark=False +encoder_mlp_layers=[512, 512] +encoder_conv_architecture=convnet_simple +encoder_conv_mlp_layers=[512] +use_rnn=True +rnn_size=512 +rnn_type=gru +rnn_num_layers=1 +decoder_mlp_layers=[] +nonlinearity=elu +policy_initialization=orthogonal +policy_init_gain=1.0 +actor_critic_share_weights=True +adaptive_stddev=True +continuous_tanh_scale=0.0 +initial_stddev=1.0 +use_env_info_cache=False +env_gpu_actions=False +env_gpu_observations=True +env_frameskip=4 +env_framestack=1 +pixel_format=CHW +use_record_episode_statistics=False +with_wandb=False +wandb_user=None +wandb_project=sample_factory +wandb_group=None +wandb_job_type=SF +wandb_tags=[] +with_pbt=False +pbt_mix_policies_in_one_env=True +pbt_period_env_steps=5000000 +pbt_start_mutation=20000000 +pbt_replace_fraction=0.3 +pbt_mutation_rate=0.15 +pbt_replace_reward_gap=0.1 +pbt_replace_reward_gap_absolute=1e-06 +pbt_optimize_gamma=False +pbt_target_objective=true_objective +pbt_perturb_min=1.1 +pbt_perturb_max=1.5 +num_agents=-1 +num_humans=0 +num_bots=-1 +start_bot_difficulty=None +timelimit=None +res_w=128 +res_h=72 +wide_aspect_ratio=False +eval_env_frameskip=1 +fps=35 +command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000 +cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000} +git_hash=unknown +git_repo_name=not a git repository +train_script=.usr.local.lib.python3.10.dist-packages.ipykernel_launcher +[2023-07-04 15:19:13,564][00468] Saving configuration to /content/train_dir/default_experiment/cfg.json... +[2023-07-04 15:19:13,569][00468] Rollout worker 0 uses device cpu +[2023-07-04 15:19:13,572][00468] Rollout worker 1 uses device cpu +[2023-07-04 15:19:13,574][00468] Rollout worker 2 uses device cpu +[2023-07-04 15:19:13,576][00468] Rollout worker 3 uses device cpu +[2023-07-04 15:19:13,581][00468] Rollout worker 4 uses device cpu +[2023-07-04 15:19:13,584][00468] Rollout worker 5 uses device cpu +[2023-07-04 15:19:13,586][00468] Rollout worker 6 uses device cpu +[2023-07-04 15:19:13,591][00468] Rollout worker 7 uses device cpu +[2023-07-04 15:19:13,873][00468] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:19:13,879][00468] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:19:13,918][00468] Starting all processes... +[2023-07-04 15:19:13,922][00468] Starting process learner_proc0 +[2023-07-04 15:19:13,929][00468] EvtLoop [Runner_EvtLoop, process=main process 468] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:19:13,933][00468] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop +[2023-07-04 15:19:13,937][00468] Uncaught exception in Runner evt loop +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run + evt_loop_status = self.event_loop.exec() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 403, in exec + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 399, in exec + while self._loop_iteration(): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 383, in _loop_iteration + self._process_signal(s) + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 358, in _process_signal + raise exc + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start + self._start_processes() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes + p.start() + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start + self._process.start() + File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start + self._popen = self._Popen(self) + File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen + return Popen(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) +TypeError: cannot pickle 'TLSBuffer' object +[2023-07-04 15:19:13,942][00468] Runner profile tree view: +main_loop: 0.0244 +[2023-07-04 15:19:13,947][00468] Collected {}, FPS: 0.0 +[2023-07-04 15:19:44,624][11762] Saving configuration to /content/train_dir/default_experiment/cfg.json... +[2023-07-04 15:19:44,628][11762] Rollout worker 0 uses device cpu +[2023-07-04 15:19:44,630][11762] Rollout worker 1 uses device cpu +[2023-07-04 15:19:44,635][11762] Rollout worker 2 uses device cpu +[2023-07-04 15:19:44,636][11762] Rollout worker 3 uses device cpu +[2023-07-04 15:19:44,637][11762] Rollout worker 4 uses device cpu +[2023-07-04 15:19:44,640][11762] Rollout worker 5 uses device cpu +[2023-07-04 15:19:44,644][11762] Rollout worker 6 uses device cpu +[2023-07-04 15:19:44,645][11762] Rollout worker 7 uses device cpu +[2023-07-04 15:19:44,773][11762] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:19:44,774][11762] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:19:44,811][11762] Starting all processes... +[2023-07-04 15:19:44,814][11762] Starting process learner_proc0 +[2023-07-04 15:19:44,861][11762] Starting all processes... +[2023-07-04 15:19:44,868][11762] Starting process inference_proc0-0 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc0 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc1 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc2 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc3 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc4 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc5 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc6 +[2023-07-04 15:19:44,870][11762] Starting process rollout_proc7 +[2023-07-04 15:19:55,959][11911] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:19:55,960][11911] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-07-04 15:19:56,006][11911] Num visible devices: 1 +[2023-07-04 15:19:56,050][11911] Starting seed is not provided +[2023-07-04 15:19:56,050][11911] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:19:56,050][11911] Initializing actor-critic model on device cuda:0 +[2023-07-04 15:19:56,051][11911] RunningMeanStd input shape: (3, 72, 128) +[2023-07-04 15:19:56,052][11911] RunningMeanStd input shape: (1,) +[2023-07-04 15:19:56,189][11911] ConvEncoder: input_channels=3 +[2023-07-04 15:19:57,005][11930] Worker 5 uses CPU cores [1] +[2023-07-04 15:19:57,040][11929] Worker 4 uses CPU cores [0] +[2023-07-04 15:19:57,054][11931] Worker 6 uses CPU cores [0] +[2023-07-04 15:19:57,159][11924] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:19:57,163][11924] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-07-04 15:19:57,180][11927] Worker 2 uses CPU cores [0] +[2023-07-04 15:19:57,205][11928] Worker 3 uses CPU cores [1] +[2023-07-04 15:19:57,232][11924] Num visible devices: 1 +[2023-07-04 15:19:57,246][11932] Worker 7 uses CPU cores [1] +[2023-07-04 15:19:57,272][11911] Conv encoder output size: 512 +[2023-07-04 15:19:57,272][11911] Policy head output size: 512 +[2023-07-04 15:19:57,290][11926] Worker 1 uses CPU cores [1] +[2023-07-04 15:19:57,298][11911] Created Actor Critic model with architecture: +[2023-07-04 15:19:57,298][11911] ActorCriticSharedWeights( (obs_normalizer): ObservationNormalizer( (running_mean_std): RunningMeanStdDictInPlace( (running_mean_std): ModuleDict( @@ -151,178 +1077,243 @@ main_loop: 0.0248 (distribution_linear): Linear(in_features=512, out_features=5, bias=True) ) ) -[2023-07-04 14:56:25,542][13844] Worker 4 uses CPU cores [0] -[2023-07-04 14:56:25,565][13840] Worker 1 uses CPU cores [1] -[2023-07-04 14:56:25,624][13841] Worker 2 uses CPU cores [0] -[2023-07-04 14:56:25,641][13845] Worker 6 uses CPU cores [0] -[2023-07-04 14:56:25,811][13842] Worker 3 uses CPU cores [1] -[2023-07-04 14:56:25,832][13839] Worker 0 uses CPU cores [0] -[2023-07-04 14:56:25,886][13843] Worker 5 uses CPU cores [1] -[2023-07-04 14:56:28,667][13487] Heartbeat connected on Batcher_0 -[2023-07-04 14:56:28,682][13487] Heartbeat connected on InferenceWorker_p0-w0 -[2023-07-04 14:56:28,697][13487] Heartbeat connected on RolloutWorker_w0 -[2023-07-04 14:56:28,707][13487] Heartbeat connected on RolloutWorker_w1 -[2023-07-04 14:56:28,714][13487] Heartbeat connected on RolloutWorker_w2 -[2023-07-04 14:56:28,720][13487] Heartbeat connected on RolloutWorker_w3 -[2023-07-04 14:56:28,724][13487] Heartbeat connected on RolloutWorker_w4 -[2023-07-04 14:56:28,729][13487] Heartbeat connected on RolloutWorker_w5 -[2023-07-04 14:56:28,753][13487] Heartbeat connected on RolloutWorker_w6 -[2023-07-04 14:56:28,759][13487] Heartbeat connected on RolloutWorker_w7 -[2023-07-04 14:56:32,229][13825] Using optimizer -[2023-07-04 14:56:32,230][13825] No checkpoints found -[2023-07-04 14:56:32,230][13825] Did not load from checkpoint, starting from scratch! -[2023-07-04 14:56:32,230][13825] Initialized policy 0 weights for model version 0 -[2023-07-04 14:56:32,233][13825] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:56:32,240][13825] LearnerWorker_p0 finished initialization! -[2023-07-04 14:56:32,241][13487] Heartbeat connected on LearnerWorker_p0 -[2023-07-04 14:56:32,459][13838] RunningMeanStd input shape: (3, 72, 128) -[2023-07-04 14:56:32,460][13838] RunningMeanStd input shape: (1,) -[2023-07-04 14:56:32,473][13838] ConvEncoder: input_channels=3 -[2023-07-04 14:56:32,585][13838] Conv encoder output size: 512 -[2023-07-04 14:56:32,586][13838] Policy head output size: 512 -[2023-07-04 14:56:33,357][13487] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) -[2023-07-04 14:56:33,926][13487] Inference worker 0-0 is ready! -[2023-07-04 14:56:33,928][13487] All inference workers are ready! Signal rollout workers to start! -[2023-07-04 14:56:34,046][13840] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:34,065][13841] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:34,066][13846] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:34,074][13842] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:34,075][13843] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:34,079][13845] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:34,077][13839] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:34,081][13844] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:56:35,868][13840] Decorrelating experience for 0 frames... -[2023-07-04 14:56:35,862][13843] Decorrelating experience for 0 frames... -[2023-07-04 14:56:35,860][13846] Decorrelating experience for 0 frames... -[2023-07-04 14:56:36,440][13844] Decorrelating experience for 0 frames... -[2023-07-04 14:56:36,449][13841] Decorrelating experience for 0 frames... -[2023-07-04 14:56:36,444][13839] Decorrelating experience for 0 frames... -[2023-07-04 14:56:36,450][13845] Decorrelating experience for 0 frames... -[2023-07-04 14:56:37,968][13842] Decorrelating experience for 0 frames... -[2023-07-04 14:56:37,979][13843] Decorrelating experience for 32 frames... -[2023-07-04 14:56:37,986][13840] Decorrelating experience for 32 frames... -[2023-07-04 14:56:38,089][13839] Decorrelating experience for 32 frames... -[2023-07-04 14:56:38,355][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) -[2023-07-04 14:56:38,401][13846] Decorrelating experience for 32 frames... -[2023-07-04 14:56:39,856][13842] Decorrelating experience for 32 frames... -[2023-07-04 14:56:40,243][13843] Decorrelating experience for 64 frames... -[2023-07-04 14:56:40,337][13844] Decorrelating experience for 32 frames... -[2023-07-04 14:56:40,507][13841] Decorrelating experience for 32 frames... -[2023-07-04 14:56:40,623][13846] Decorrelating experience for 64 frames... -[2023-07-04 14:56:40,968][13839] Decorrelating experience for 64 frames... -[2023-07-04 14:56:42,209][13845] Decorrelating experience for 32 frames... -[2023-07-04 14:56:42,228][13840] Decorrelating experience for 64 frames... -[2023-07-04 14:56:42,476][13842] Decorrelating experience for 64 frames... -[2023-07-04 14:56:42,619][13844] Decorrelating experience for 64 frames... -[2023-07-04 14:56:42,739][13843] Decorrelating experience for 96 frames... -[2023-07-04 14:56:42,924][13841] Decorrelating experience for 64 frames... -[2023-07-04 14:56:43,355][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) -[2023-07-04 14:56:43,972][13842] Decorrelating experience for 96 frames... -[2023-07-04 14:56:44,395][13840] Decorrelating experience for 96 frames... -[2023-07-04 14:56:45,050][13839] Decorrelating experience for 96 frames... -[2023-07-04 14:56:45,170][13845] Decorrelating experience for 64 frames... -[2023-07-04 14:56:45,431][13844] Decorrelating experience for 96 frames... -[2023-07-04 14:56:45,813][13846] Decorrelating experience for 96 frames... -[2023-07-04 14:56:46,956][13841] Decorrelating experience for 96 frames... -[2023-07-04 14:56:47,275][13845] Decorrelating experience for 96 frames... -[2023-07-04 14:56:48,355][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 2.1. Samples: 32. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) -[2023-07-04 14:56:48,361][13487] Avg episode reward: [(0, '1.747')] -[2023-07-04 14:56:49,762][13825] Signal inference workers to stop experience collection... -[2023-07-04 14:56:49,786][13838] InferenceWorker_p0-w0: stopping experience collection -[2023-07-04 14:56:52,138][13825] Signal inference workers to resume experience collection... -[2023-07-04 14:56:52,138][13838] InferenceWorker_p0-w0: resuming experience collection -[2023-07-04 14:56:53,355][13487] Fps is (10 sec: 409.6, 60 sec: 204.8, 300 sec: 204.8). Total num frames: 4096. Throughput: 0: 120.3. Samples: 2406. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0) -[2023-07-04 14:56:53,365][13487] Avg episode reward: [(0, '2.567')] -[2023-07-04 14:56:58,359][13487] Fps is (10 sec: 2048.0, 60 sec: 819.3, 300 sec: 819.3). Total num frames: 20480. Throughput: 0: 242.9. Samples: 6072. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0) -[2023-07-04 14:56:58,364][13487] Avg episode reward: [(0, '3.442')] -[2023-07-04 14:57:03,356][13487] Fps is (10 sec: 2867.2, 60 sec: 1092.3, 300 sec: 1092.3). Total num frames: 32768. Throughput: 0: 266.5. Samples: 7996. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) -[2023-07-04 14:57:03,363][13487] Avg episode reward: [(0, '3.844')] -[2023-07-04 14:57:05,411][13838] Updated weights for policy 0, policy_version 10 (0.0013) -[2023-07-04 14:57:08,358][13487] Fps is (10 sec: 2866.5, 60 sec: 1404.3, 300 sec: 1404.3). Total num frames: 49152. Throughput: 0: 341.9. Samples: 11968. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) -[2023-07-04 14:57:08,362][13487] Avg episode reward: [(0, '4.352')] -[2023-07-04 14:57:13,355][13487] Fps is (10 sec: 3276.9, 60 sec: 1638.5, 300 sec: 1638.5). Total num frames: 65536. Throughput: 0: 424.9. Samples: 16996. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) -[2023-07-04 14:57:13,364][13487] Avg episode reward: [(0, '4.580')] -[2023-07-04 14:57:17,518][13838] Updated weights for policy 0, policy_version 20 (0.0028) -[2023-07-04 14:57:18,355][13487] Fps is (10 sec: 3277.7, 60 sec: 1820.5, 300 sec: 1820.5). Total num frames: 81920. Throughput: 0: 438.8. Samples: 19746. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) -[2023-07-04 14:57:18,362][13487] Avg episode reward: [(0, '4.505')] -[2023-07-04 14:57:23,355][13487] Fps is (10 sec: 3276.8, 60 sec: 1966.2, 300 sec: 1966.2). Total num frames: 98304. Throughput: 0: 544.0. Samples: 24482. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) -[2023-07-04 14:57:23,364][13487] Avg episode reward: [(0, '4.418')] -[2023-07-04 14:57:28,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2010.8, 300 sec: 2010.8). Total num frames: 110592. Throughput: 0: 618.8. Samples: 27846. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) -[2023-07-04 14:57:28,358][13487] Avg episode reward: [(0, '4.334')] -[2023-07-04 14:57:28,364][13825] Saving new best policy, reward=4.334! -[2023-07-04 14:57:33,355][13487] Fps is (10 sec: 2048.0, 60 sec: 1979.8, 300 sec: 1979.8). Total num frames: 118784. Throughput: 0: 653.3. Samples: 29430. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) -[2023-07-04 14:57:33,365][13487] Avg episode reward: [(0, '4.259')] -[2023-07-04 14:57:33,917][13838] Updated weights for policy 0, policy_version 30 (0.0029) -[2023-07-04 14:57:38,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2321.1, 300 sec: 2142.6). Total num frames: 139264. Throughput: 0: 705.0. Samples: 34132. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) -[2023-07-04 14:57:38,358][13487] Avg episode reward: [(0, '4.359')] -[2023-07-04 14:57:38,361][13825] Saving new best policy, reward=4.359! -[2023-07-04 14:57:43,355][13487] Fps is (10 sec: 3686.4, 60 sec: 2594.1, 300 sec: 2223.6). Total num frames: 155648. Throughput: 0: 743.9. Samples: 39548. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) -[2023-07-04 14:57:43,359][13487] Avg episode reward: [(0, '4.429')] -[2023-07-04 14:57:43,367][13825] Saving new best policy, reward=4.429! -[2023-07-04 14:57:46,324][13838] Updated weights for policy 0, policy_version 40 (0.0014) -[2023-07-04 14:57:48,358][13487] Fps is (10 sec: 2866.3, 60 sec: 2798.8, 300 sec: 2239.1). Total num frames: 167936. Throughput: 0: 741.1. Samples: 41348. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) -[2023-07-04 14:57:48,370][13487] Avg episode reward: [(0, '4.436')] -[2023-07-04 14:57:48,372][13825] Saving new best policy, reward=4.436! -[2023-07-04 14:57:53,358][13487] Fps is (10 sec: 2456.9, 60 sec: 2935.3, 300 sec: 2252.8). Total num frames: 180224. Throughput: 0: 727.6. Samples: 44708. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) -[2023-07-04 14:57:53,364][13487] Avg episode reward: [(0, '4.338')] -[2023-07-04 14:57:58,356][13487] Fps is (10 sec: 2458.2, 60 sec: 2867.2, 300 sec: 2264.9). Total num frames: 192512. Throughput: 0: 701.2. Samples: 48550. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) -[2023-07-04 14:57:58,358][13487] Avg episode reward: [(0, '4.355')] -[2023-07-04 14:58:01,471][13838] Updated weights for policy 0, policy_version 50 (0.0025) -[2023-07-04 14:58:03,355][13487] Fps is (10 sec: 2868.0, 60 sec: 2935.5, 300 sec: 2321.1). Total num frames: 208896. Throughput: 0: 701.0. Samples: 51292. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) -[2023-07-04 14:58:03,362][13487] Avg episode reward: [(0, '4.364')] -[2023-07-04 14:58:03,372][13825] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000051_208896.pth... -[2023-07-04 14:58:08,355][13487] Fps is (10 sec: 3277.0, 60 sec: 2935.6, 300 sec: 2371.4). Total num frames: 225280. Throughput: 0: 711.7. Samples: 56508. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) -[2023-07-04 14:58:08,360][13487] Avg episode reward: [(0, '4.315')] -[2023-07-04 14:58:13,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2867.2, 300 sec: 2375.7). Total num frames: 237568. Throughput: 0: 720.0. Samples: 60244. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) -[2023-07-04 14:58:13,360][13487] Avg episode reward: [(0, '4.179')] -[2023-07-04 14:58:15,912][13838] Updated weights for policy 0, policy_version 60 (0.0016) -[2023-07-04 14:58:18,355][13487] Fps is (10 sec: 2457.5, 60 sec: 2798.9, 300 sec: 2379.6). Total num frames: 249856. Throughput: 0: 727.3. Samples: 62160. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) -[2023-07-04 14:58:18,360][13487] Avg episode reward: [(0, '4.397')] -[2023-07-04 14:58:23,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2798.9, 300 sec: 2420.4). Total num frames: 266240. Throughput: 0: 727.3. Samples: 66860. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) -[2023-07-04 14:58:23,357][13487] Avg episode reward: [(0, '4.491')] -[2023-07-04 14:58:23,418][13825] Saving new best policy, reward=4.491! -[2023-07-04 14:58:27,577][13838] Updated weights for policy 0, policy_version 70 (0.0032) -[2023-07-04 14:58:28,355][13487] Fps is (10 sec: 3686.5, 60 sec: 2935.5, 300 sec: 2493.3). Total num frames: 286720. Throughput: 0: 734.4. Samples: 72596. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) -[2023-07-04 14:58:28,358][13487] Avg episode reward: [(0, '4.461')] -[2023-07-04 14:58:33,355][13487] Fps is (10 sec: 3686.4, 60 sec: 3072.0, 300 sec: 2525.9). Total num frames: 303104. Throughput: 0: 752.8. Samples: 75222. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) -[2023-07-04 14:58:33,358][13487] Avg episode reward: [(0, '4.361')] -[2023-07-04 14:58:38,355][13487] Fps is (10 sec: 2867.2, 60 sec: 2935.5, 300 sec: 2523.2). Total num frames: 315392. Throughput: 0: 765.6. Samples: 79156. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) -[2023-07-04 14:58:38,358][13487] Avg episode reward: [(0, '4.276')] -[2023-07-04 14:58:41,845][13838] Updated weights for policy 0, policy_version 80 (0.0032) -[2023-07-04 14:58:43,355][13487] Fps is (10 sec: 2457.6, 60 sec: 2867.2, 300 sec: 2520.7). Total num frames: 327680. Throughput: 0: 766.1. Samples: 83024. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) -[2023-07-04 14:58:43,357][13487] Avg episode reward: [(0, '4.358')] -[2023-07-04 14:58:48,355][13487] Fps is (10 sec: 3276.8, 60 sec: 3003.9, 300 sec: 2579.0). Total num frames: 348160. Throughput: 0: 770.8. Samples: 85976. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) -[2023-07-04 14:58:48,360][13487] Avg episode reward: [(0, '4.336')] -[2023-07-04 14:58:52,573][13838] Updated weights for policy 0, policy_version 90 (0.0013) -[2023-07-04 14:58:53,357][13487] Fps is (10 sec: 4095.3, 60 sec: 3140.3, 300 sec: 2633.1). Total num frames: 368640. Throughput: 0: 792.8. Samples: 92186. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) -[2023-07-04 14:58:53,360][13487] Avg episode reward: [(0, '4.373')] -[2023-07-04 14:58:58,355][13487] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 2627.1). Total num frames: 380928. Throughput: 0: 807.2. Samples: 96570. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) -[2023-07-04 14:58:58,358][13487] Avg episode reward: [(0, '4.562')] -[2023-07-04 14:58:58,364][13825] Saving new best policy, reward=4.562! -[2023-07-04 14:59:03,355][13487] Fps is (10 sec: 2867.7, 60 sec: 3140.3, 300 sec: 2648.8). Total num frames: 397312. Throughput: 0: 807.0. Samples: 98474. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) -[2023-07-04 14:59:03,367][13487] Avg episode reward: [(0, '4.574')] -[2023-07-04 14:59:03,382][13825] Saving new best policy, reward=4.574! -[2023-07-04 14:59:07,557][13838] Updated weights for policy 0, policy_version 100 (0.0027) -[2023-07-04 14:59:08,355][13487] Fps is (10 sec: 2867.2, 60 sec: 3072.0, 300 sec: 2642.6). Total num frames: 409600. Throughput: 0: 791.1. Samples: 102458. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) -[2023-07-04 14:59:08,363][13487] Avg episode reward: [(0, '4.634')] -[2023-07-04 14:59:08,369][13825] Saving new best policy, reward=4.634! -[2023-07-04 14:59:13,355][13487] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 2713.6). Total num frames: 434176. Throughput: 0: 800.0. Samples: 108596. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) -[2023-07-04 14:59:13,357][13487] Avg episode reward: [(0, '4.654')] -[2023-07-04 14:59:13,371][13825] Saving new best policy, reward=4.654! -[2023-07-04 14:59:17,911][13838] Updated weights for policy 0, policy_version 110 (0.0022) -[2023-07-04 14:59:18,355][13487] Fps is (10 sec: 4096.0, 60 sec: 3345.1, 300 sec: 2730.7). Total num frames: 450560. Throughput: 0: 809.2. Samples: 111636. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) -[2023-07-04 14:59:18,360][13487] Avg episode reward: [(0, '4.615')] -[2023-07-04 14:59:23,355][13487] Fps is (10 sec: 2457.6, 60 sec: 3208.5, 300 sec: 2698.6). Total num frames: 458752. Throughput: 0: 800.2. Samples: 115164. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) -[2023-07-04 14:59:23,363][13487] Avg episode reward: [(0, '4.720')] -[2023-07-04 14:59:23,384][13825] Saving new best policy, reward=4.720! -[2023-07-04 14:59:24,904][13487] Keyboard interrupt detected in the event loop EvtLoop [Runner_EvtLoop, process=main process 13487], exiting... -[2023-07-04 14:59:24,912][13825] Stopping Batcher_0... -[2023-07-04 14:59:24,914][13825] Loop batcher_evt_loop terminating... -[2023-07-04 14:59:24,913][13825] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000112_458752.pth... -[2023-07-04 14:59:24,911][13487] Runner profile tree view: -main_loop: 196.1569 -[2023-07-04 14:59:24,931][13487] Collected {0: 458752}, FPS: 2338.7 -[2023-07-04 14:59:25,139][13845] EvtLoop [rollout_proc6_evt_loop, process=rollout_proc6] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance6'), args=(0, 0) +[2023-07-04 15:19:57,431][11925] Worker 0 uses CPU cores [0] +[2023-07-04 15:20:02,307][11911] Using optimizer +[2023-07-04 15:20:02,307][11911] No checkpoints found +[2023-07-04 15:20:02,308][11911] Did not load from checkpoint, starting from scratch! +[2023-07-04 15:20:02,308][11911] Initialized policy 0 weights for model version 0 +[2023-07-04 15:20:02,312][11911] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:20:02,332][11911] LearnerWorker_p0 finished initialization! +[2023-07-04 15:20:02,522][11924] RunningMeanStd input shape: (3, 72, 128) +[2023-07-04 15:20:02,523][11924] RunningMeanStd input shape: (1,) +[2023-07-04 15:20:02,541][11924] ConvEncoder: input_channels=3 +[2023-07-04 15:20:02,638][11924] Conv encoder output size: 512 +[2023-07-04 15:20:02,638][11924] Policy head output size: 512 +[2023-07-04 15:20:03,841][11762] Inference worker 0-0 is ready! +[2023-07-04 15:20:03,845][11762] All inference workers are ready! Signal rollout workers to start! +[2023-07-04 15:20:03,979][11926] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,008][11927] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,009][11930] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,011][11932] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,015][11928] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,024][11931] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,028][11929] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,057][11925] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:20:04,766][11762] Heartbeat connected on Batcher_0 +[2023-07-04 15:20:04,772][11762] Heartbeat connected on LearnerWorker_p0 +[2023-07-04 15:20:04,808][11762] Heartbeat connected on InferenceWorker_p0-w0 +[2023-07-04 15:20:05,044][11930] Decorrelating experience for 0 frames... +[2023-07-04 15:20:05,041][11926] Decorrelating experience for 0 frames... +[2023-07-04 15:20:05,279][11925] Decorrelating experience for 0 frames... +[2023-07-04 15:20:05,281][11929] Decorrelating experience for 0 frames... +[2023-07-04 15:20:05,288][11927] Decorrelating experience for 0 frames... +[2023-07-04 15:20:05,602][11930] Decorrelating experience for 32 frames... +[2023-07-04 15:20:06,056][11929] Decorrelating experience for 32 frames... +[2023-07-04 15:20:06,063][11927] Decorrelating experience for 32 frames... +[2023-07-04 15:20:06,344][11932] Decorrelating experience for 0 frames... +[2023-07-04 15:20:06,782][11926] Decorrelating experience for 32 frames... +[2023-07-04 15:20:06,972][11930] Decorrelating experience for 64 frames... +[2023-07-04 15:20:07,067][11762] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-04 15:20:07,798][11930] Decorrelating experience for 96 frames... +[2023-07-04 15:20:08,081][11929] Decorrelating experience for 64 frames... +[2023-07-04 15:20:08,083][11927] Decorrelating experience for 64 frames... +[2023-07-04 15:20:08,246][11762] Heartbeat connected on RolloutWorker_w5 +[2023-07-04 15:20:08,392][11931] Decorrelating experience for 0 frames... +[2023-07-04 15:20:08,958][11925] Decorrelating experience for 32 frames... +[2023-07-04 15:20:09,704][11926] Decorrelating experience for 64 frames... +[2023-07-04 15:20:10,057][11928] Decorrelating experience for 0 frames... +[2023-07-04 15:20:10,626][11929] Decorrelating experience for 96 frames... +[2023-07-04 15:20:10,631][11927] Decorrelating experience for 96 frames... +[2023-07-04 15:20:10,807][11931] Decorrelating experience for 32 frames... +[2023-07-04 15:20:11,179][11762] Heartbeat connected on RolloutWorker_w4 +[2023-07-04 15:20:11,188][11762] Heartbeat connected on RolloutWorker_w2 +[2023-07-04 15:20:12,067][11762] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 3.2. Samples: 16. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-04 15:20:12,337][11926] Decorrelating experience for 96 frames... +[2023-07-04 15:20:12,665][11932] Decorrelating experience for 32 frames... +[2023-07-04 15:20:12,675][11928] Decorrelating experience for 32 frames... +[2023-07-04 15:20:12,751][11762] Heartbeat connected on RolloutWorker_w1 +[2023-07-04 15:20:14,110][11925] Decorrelating experience for 64 frames... +[2023-07-04 15:20:17,070][11762] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 158.9. Samples: 1590. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-04 15:20:17,074][11762] Avg episode reward: [(0, '2.976')] +[2023-07-04 15:20:17,667][11932] Decorrelating experience for 64 frames... +[2023-07-04 15:20:17,670][11928] Decorrelating experience for 64 frames... +[2023-07-04 15:20:17,892][11911] Signal inference workers to stop experience collection... +[2023-07-04 15:20:17,915][11924] InferenceWorker_p0-w0: stopping experience collection +[2023-07-04 15:20:18,587][11931] Decorrelating experience for 64 frames... +[2023-07-04 15:20:18,666][11925] Decorrelating experience for 96 frames... +[2023-07-04 15:20:18,824][11762] Heartbeat connected on RolloutWorker_w0 +[2023-07-04 15:20:19,243][11931] Decorrelating experience for 96 frames... +[2023-07-04 15:20:19,343][11762] Heartbeat connected on RolloutWorker_w6 +[2023-07-04 15:20:19,357][11928] Decorrelating experience for 96 frames... +[2023-07-04 15:20:19,371][11932] Decorrelating experience for 96 frames... +[2023-07-04 15:20:19,485][11762] Heartbeat connected on RolloutWorker_w3 +[2023-07-04 15:20:19,503][11762] Heartbeat connected on RolloutWorker_w7 +[2023-07-04 15:20:19,609][11911] Signal inference workers to resume experience collection... +[2023-07-04 15:20:19,610][11924] InferenceWorker_p0-w0: resuming experience collection +[2023-07-04 15:20:22,067][11762] Fps is (10 sec: 1228.8, 60 sec: 819.2, 300 sec: 819.2). Total num frames: 12288. Throughput: 0: 256.1. Samples: 3842. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0) +[2023-07-04 15:20:22,074][11762] Avg episode reward: [(0, '3.005')] +[2023-07-04 15:20:27,067][11762] Fps is (10 sec: 3278.0, 60 sec: 1638.4, 300 sec: 1638.4). Total num frames: 32768. Throughput: 0: 357.4. Samples: 7148. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0) +[2023-07-04 15:20:27,069][11762] Avg episode reward: [(0, '3.857')] +[2023-07-04 15:20:28,255][11924] Updated weights for policy 0, policy_version 10 (0.0024) +[2023-07-04 15:20:32,067][11762] Fps is (10 sec: 3686.4, 60 sec: 1966.1, 300 sec: 1966.1). Total num frames: 49152. Throughput: 0: 520.9. Samples: 13022. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:20:32,069][11762] Avg episode reward: [(0, '4.392')] +[2023-07-04 15:20:37,067][11762] Fps is (10 sec: 3276.8, 60 sec: 2184.5, 300 sec: 2184.5). Total num frames: 65536. Throughput: 0: 578.5. Samples: 17354. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-07-04 15:20:37,073][11762] Avg episode reward: [(0, '4.397')] +[2023-07-04 15:20:41,647][11924] Updated weights for policy 0, policy_version 20 (0.0035) +[2023-07-04 15:20:42,069][11762] Fps is (10 sec: 3275.9, 60 sec: 2340.4, 300 sec: 2340.4). Total num frames: 81920. Throughput: 0: 556.9. Samples: 19492. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:20:42,075][11762] Avg episode reward: [(0, '4.379')] +[2023-07-04 15:20:47,067][11762] Fps is (10 sec: 3686.4, 60 sec: 2560.0, 300 sec: 2560.0). Total num frames: 102400. Throughput: 0: 639.8. Samples: 25592. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:20:47,073][11762] Avg episode reward: [(0, '4.485')] +[2023-07-04 15:20:47,078][11911] Saving new best policy, reward=4.485! +[2023-07-04 15:20:50,974][11924] Updated weights for policy 0, policy_version 30 (0.0015) +[2023-07-04 15:20:52,067][11762] Fps is (10 sec: 4097.1, 60 sec: 2730.7, 300 sec: 2730.7). Total num frames: 122880. Throughput: 0: 707.1. Samples: 31818. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-07-04 15:20:52,069][11762] Avg episode reward: [(0, '4.684')] +[2023-07-04 15:20:52,081][11911] Saving new best policy, reward=4.684! +[2023-07-04 15:20:57,067][11762] Fps is (10 sec: 3276.8, 60 sec: 2703.4, 300 sec: 2703.4). Total num frames: 135168. Throughput: 0: 743.4. Samples: 33470. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:20:57,072][11762] Avg episode reward: [(0, '4.698')] +[2023-07-04 15:20:57,082][11911] Saving new best policy, reward=4.698! +[2023-07-04 15:21:02,067][11762] Fps is (10 sec: 2867.2, 60 sec: 2755.5, 300 sec: 2755.5). Total num frames: 151552. Throughput: 0: 802.8. Samples: 37712. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-07-04 15:21:02,072][11762] Avg episode reward: [(0, '4.795')] +[2023-07-04 15:21:02,085][11911] Saving new best policy, reward=4.795! +[2023-07-04 15:21:05,105][11924] Updated weights for policy 0, policy_version 40 (0.0018) +[2023-07-04 15:21:07,067][11762] Fps is (10 sec: 3686.4, 60 sec: 2867.2, 300 sec: 2867.2). Total num frames: 172032. Throughput: 0: 880.7. Samples: 43474. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-07-04 15:21:07,069][11762] Avg episode reward: [(0, '4.614')] +[2023-07-04 15:21:12,067][11762] Fps is (10 sec: 4096.0, 60 sec: 3208.5, 300 sec: 2961.7). Total num frames: 192512. Throughput: 0: 881.9. Samples: 46832. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:21:12,068][11762] Avg episode reward: [(0, '4.623')] +[2023-07-04 15:21:14,527][11924] Updated weights for policy 0, policy_version 50 (0.0017) +[2023-07-04 15:21:17,068][11762] Fps is (10 sec: 3685.8, 60 sec: 3481.7, 300 sec: 2984.2). Total num frames: 208896. Throughput: 0: 882.9. Samples: 52752. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:21:17,071][11762] Avg episode reward: [(0, '4.688')] +[2023-07-04 15:21:22,067][11762] Fps is (10 sec: 3276.7, 60 sec: 3549.9, 300 sec: 3003.7). Total num frames: 225280. Throughput: 0: 879.9. Samples: 56952. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0) +[2023-07-04 15:21:22,071][11762] Avg episode reward: [(0, '4.777')] +[2023-07-04 15:21:27,071][11762] Fps is (10 sec: 3275.9, 60 sec: 3481.3, 300 sec: 3020.6). Total num frames: 241664. Throughput: 0: 878.5. Samples: 59024. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-07-04 15:21:27,079][11762] Avg episode reward: [(0, '4.760')] +[2023-07-04 15:21:27,866][11924] Updated weights for policy 0, policy_version 60 (0.0018) +[2023-07-04 15:21:32,067][11762] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3084.0). Total num frames: 262144. Throughput: 0: 886.1. Samples: 65466. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-07-04 15:21:32,073][11762] Avg episode reward: [(0, '5.215')] +[2023-07-04 15:21:32,083][11911] Saving new best policy, reward=5.215! +[2023-07-04 15:21:37,067][11762] Fps is (10 sec: 4097.8, 60 sec: 3618.1, 300 sec: 3140.3). Total num frames: 282624. Throughput: 0: 885.1. Samples: 71648. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:21:37,075][11762] Avg episode reward: [(0, '5.326')] +[2023-07-04 15:21:37,079][11911] Saving new best policy, reward=5.326! +[2023-07-04 15:21:37,783][11924] Updated weights for policy 0, policy_version 70 (0.0016) +[2023-07-04 15:21:42,070][11762] Fps is (10 sec: 3275.8, 60 sec: 3549.8, 300 sec: 3104.2). Total num frames: 294912. Throughput: 0: 892.0. Samples: 73612. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:21:42,072][11762] Avg episode reward: [(0, '4.979')] +[2023-07-04 15:21:42,081][11911] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000072_294912.pth... +[2023-07-04 15:21:47,067][11762] Fps is (10 sec: 2867.1, 60 sec: 3481.6, 300 sec: 3113.0). Total num frames: 311296. Throughput: 0: 895.1. Samples: 77992. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-07-04 15:21:47,070][11762] Avg episode reward: [(0, '4.955')] +[2023-07-04 15:21:50,537][11924] Updated weights for policy 0, policy_version 80 (0.0020) +[2023-07-04 15:21:52,067][11762] Fps is (10 sec: 3687.5, 60 sec: 3481.6, 300 sec: 3159.8). Total num frames: 331776. Throughput: 0: 900.9. Samples: 84016. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:21:52,075][11762] Avg episode reward: [(0, '4.940')] +[2023-07-04 15:21:57,067][11762] Fps is (10 sec: 4505.7, 60 sec: 3686.4, 300 sec: 3239.6). Total num frames: 356352. Throughput: 0: 902.0. Samples: 87422. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-07-04 15:21:57,074][11762] Avg episode reward: [(0, '4.783')] +[2023-07-04 15:22:00,514][11924] Updated weights for policy 0, policy_version 90 (0.0013) +[2023-07-04 15:22:02,067][11762] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3205.6). Total num frames: 368640. Throughput: 0: 893.8. Samples: 92970. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-07-04 15:22:02,072][11762] Avg episode reward: [(0, '4.892')] +[2023-07-04 15:22:07,067][11762] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3208.5). Total num frames: 385024. Throughput: 0: 896.2. Samples: 97280. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:22:07,072][11762] Avg episode reward: [(0, '5.158')] +[2023-07-04 15:22:12,067][11762] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3211.3). Total num frames: 401408. Throughput: 0: 899.7. Samples: 99506. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:22:12,069][11762] Avg episode reward: [(0, '5.560')] +[2023-07-04 15:22:12,152][11911] Saving new best policy, reward=5.560! +[2023-07-04 15:22:13,078][11924] Updated weights for policy 0, policy_version 100 (0.0018) +[2023-07-04 15:22:17,067][11762] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3276.8). Total num frames: 425984. Throughput: 0: 898.8. Samples: 105912. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:22:17,076][11762] Avg episode reward: [(0, '5.886')] +[2023-07-04 15:22:17,081][11911] Saving new best policy, reward=5.886! +[2023-07-04 15:22:22,067][11762] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3307.1). Total num frames: 446464. Throughput: 0: 898.5. Samples: 112082. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:22:22,073][11762] Avg episode reward: [(0, '5.793')] +[2023-07-04 15:22:23,609][11924] Updated weights for policy 0, policy_version 110 (0.0014) +[2023-07-04 15:22:27,070][11762] Fps is (10 sec: 3275.7, 60 sec: 3618.2, 300 sec: 3276.7). Total num frames: 458752. Throughput: 0: 901.9. Samples: 114198. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-07-04 15:22:27,079][11762] Avg episode reward: [(0, '5.643')] +[2023-07-04 15:22:32,067][11762] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3276.8). Total num frames: 475136. Throughput: 0: 900.9. Samples: 118532. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:22:32,074][11762] Avg episode reward: [(0, '5.685')] +[2023-07-04 15:22:35,764][11924] Updated weights for policy 0, policy_version 120 (0.0017) +[2023-07-04 15:22:37,067][11762] Fps is (10 sec: 3687.7, 60 sec: 3549.9, 300 sec: 3304.1). Total num frames: 495616. Throughput: 0: 903.1. Samples: 124656. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:22:37,068][11762] Avg episode reward: [(0, '5.803')] +[2023-07-04 15:22:42,067][11762] Fps is (10 sec: 4096.0, 60 sec: 3686.6, 300 sec: 3329.7). Total num frames: 516096. Throughput: 0: 903.3. Samples: 128070. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:22:42,069][11762] Avg episode reward: [(0, '6.133')] +[2023-07-04 15:22:42,127][11911] Saving new best policy, reward=6.133! +[2023-07-04 15:22:46,005][11924] Updated weights for policy 0, policy_version 130 (0.0013) +[2023-07-04 15:22:47,068][11762] Fps is (10 sec: 3685.8, 60 sec: 3686.3, 300 sec: 3328.0). Total num frames: 532480. Throughput: 0: 903.2. Samples: 133614. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-07-04 15:22:47,070][11762] Avg episode reward: [(0, '6.713')] +[2023-07-04 15:22:47,080][11911] Saving new best policy, reward=6.713! +[2023-07-04 15:22:52,067][11762] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3326.4). Total num frames: 548864. Throughput: 0: 901.5. Samples: 137848. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-07-04 15:22:52,077][11762] Avg episode reward: [(0, '7.141')] +[2023-07-04 15:22:52,089][11911] Saving new best policy, reward=7.141! +[2023-07-04 15:22:57,067][11762] Fps is (10 sec: 3277.3, 60 sec: 3481.6, 300 sec: 3325.0). Total num frames: 565248. Throughput: 0: 901.6. Samples: 140076. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-07-04 15:22:57,074][11762] Avg episode reward: [(0, '7.291')] +[2023-07-04 15:22:57,079][11911] Saving new best policy, reward=7.291! +[2023-07-04 15:22:58,304][11924] Updated weights for policy 0, policy_version 140 (0.0018) +[2023-07-04 15:23:02,067][11762] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3370.4). Total num frames: 589824. Throughput: 0: 910.4. Samples: 146882. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:23:02,073][11762] Avg episode reward: [(0, '7.595')] +[2023-07-04 15:23:02,083][11911] Saving new best policy, reward=7.595! +[2023-07-04 15:23:07,067][11762] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3390.6). Total num frames: 610304. Throughput: 0: 910.0. Samples: 153030. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-07-04 15:23:07,073][11762] Avg episode reward: [(0, '7.373')] +[2023-07-04 15:23:08,406][11924] Updated weights for policy 0, policy_version 150 (0.0019) +[2023-07-04 15:23:12,068][11762] Fps is (10 sec: 3276.3, 60 sec: 3686.3, 300 sec: 3365.3). Total num frames: 622592. Throughput: 0: 909.6. Samples: 155130. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:23:12,078][11762] Avg episode reward: [(0, '7.173')] +[2023-07-04 15:23:17,067][11762] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3363.0). Total num frames: 638976. Throughput: 0: 911.6. Samples: 159552. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:23:17,075][11762] Avg episode reward: [(0, '6.405')] +[2023-07-04 15:23:20,851][11924] Updated weights for policy 0, policy_version 160 (0.0015) +[2023-07-04 15:23:22,067][11762] Fps is (10 sec: 3687.0, 60 sec: 3549.9, 300 sec: 3381.8). Total num frames: 659456. Throughput: 0: 909.0. Samples: 165562. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:23:22,072][11762] Avg episode reward: [(0, '6.512')] +[2023-07-04 15:23:27,067][11762] Fps is (10 sec: 4096.0, 60 sec: 3686.6, 300 sec: 3399.7). Total num frames: 679936. Throughput: 0: 907.8. Samples: 168920. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:23:27,072][11762] Avg episode reward: [(0, '7.027')] +[2023-07-04 15:23:31,154][11924] Updated weights for policy 0, policy_version 170 (0.0022) +[2023-07-04 15:23:32,072][11762] Fps is (10 sec: 3684.6, 60 sec: 3686.1, 300 sec: 3396.6). Total num frames: 696320. Throughput: 0: 906.5. Samples: 174410. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-07-04 15:23:32,089][11762] Avg episode reward: [(0, '7.720')] +[2023-07-04 15:23:32,100][11911] Saving new best policy, reward=7.720! +[2023-07-04 15:23:37,067][11762] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3393.8). Total num frames: 712704. Throughput: 0: 907.9. Samples: 178704. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:23:37,074][11762] Avg episode reward: [(0, '7.966')] +[2023-07-04 15:23:37,076][11911] Saving new best policy, reward=7.966! +[2023-07-04 15:23:42,067][11762] Fps is (10 sec: 3278.4, 60 sec: 3549.9, 300 sec: 3391.1). Total num frames: 729088. Throughput: 0: 905.2. Samples: 180810. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:23:42,071][11762] Avg episode reward: [(0, '8.377')] +[2023-07-04 15:23:42,083][11911] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000178_729088.pth... +[2023-07-04 15:23:42,197][11911] Saving new best policy, reward=8.377! +[2023-07-04 15:23:43,776][11924] Updated weights for policy 0, policy_version 180 (0.0014) +[2023-07-04 15:23:47,067][11762] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3407.1). Total num frames: 749568. Throughput: 0: 897.4. Samples: 187266. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:23:47,069][11762] Avg episode reward: [(0, '8.528')] +[2023-07-04 15:23:47,074][11911] Saving new best policy, reward=8.528! +[2023-07-04 15:23:52,067][11762] Fps is (10 sec: 4095.7, 60 sec: 3686.4, 300 sec: 3422.4). Total num frames: 770048. Throughput: 0: 898.7. Samples: 193470. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:23:52,070][11762] Avg episode reward: [(0, '8.678')] +[2023-07-04 15:23:52,076][11911] Saving new best policy, reward=8.678! +[2023-07-04 15:23:54,221][11924] Updated weights for policy 0, policy_version 190 (0.0012) +[2023-07-04 15:23:57,067][11762] Fps is (10 sec: 3686.2, 60 sec: 3686.4, 300 sec: 3419.3). Total num frames: 786432. Throughput: 0: 900.2. Samples: 195636. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-07-04 15:23:57,070][11762] Avg episode reward: [(0, '8.520')] +[2023-07-04 15:24:02,067][11762] Fps is (10 sec: 2867.4, 60 sec: 3481.6, 300 sec: 3398.8). Total num frames: 798720. Throughput: 0: 901.2. Samples: 200106. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-07-04 15:24:02,069][11762] Avg episode reward: [(0, '8.505')] +[2023-07-04 15:24:06,175][11924] Updated weights for policy 0, policy_version 200 (0.0027) +[2023-07-04 15:24:07,067][11762] Fps is (10 sec: 3277.0, 60 sec: 3481.6, 300 sec: 3413.3). Total num frames: 819200. Throughput: 0: 900.4. Samples: 206082. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:24:07,072][11762] Avg episode reward: [(0, '8.831')] +[2023-07-04 15:24:07,110][11911] Saving new best policy, reward=8.831! +[2023-07-04 15:24:12,067][11762] Fps is (10 sec: 4505.6, 60 sec: 3686.5, 300 sec: 3444.0). Total num frames: 843776. Throughput: 0: 899.1. Samples: 209378. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:24:12,069][11762] Avg episode reward: [(0, '9.321')] +[2023-07-04 15:24:12,079][11911] Saving new best policy, reward=9.321! +[2023-07-04 15:24:16,563][11924] Updated weights for policy 0, policy_version 210 (0.0018) +[2023-07-04 15:24:17,067][11762] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3440.6). Total num frames: 860160. Throughput: 0: 901.1. Samples: 214954. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:24:17,072][11762] Avg episode reward: [(0, '9.657')] +[2023-07-04 15:24:17,076][11911] Saving new best policy, reward=9.657! +[2023-07-04 15:24:22,067][11762] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3421.4). Total num frames: 872448. Throughput: 0: 900.2. Samples: 219214. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:24:22,073][11762] Avg episode reward: [(0, '9.752')] +[2023-07-04 15:24:22,089][11911] Saving new best policy, reward=9.752! +[2023-07-04 15:24:27,067][11762] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3418.6). Total num frames: 888832. Throughput: 0: 893.6. Samples: 221022. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-07-04 15:24:27,072][11762] Avg episode reward: [(0, '10.147')] +[2023-07-04 15:24:27,075][11911] Saving new best policy, reward=10.147! +[2023-07-04 15:24:30,142][11924] Updated weights for policy 0, policy_version 220 (0.0024) +[2023-07-04 15:24:32,067][11762] Fps is (10 sec: 3686.4, 60 sec: 3550.2, 300 sec: 3431.4). Total num frames: 909312. Throughput: 0: 876.0. Samples: 226684. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:24:32,068][11762] Avg episode reward: [(0, '9.975')] +[2023-07-04 15:24:37,067][11762] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3428.5). Total num frames: 925696. Throughput: 0: 875.3. Samples: 232860. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:24:37,069][11762] Avg episode reward: [(0, '11.119')] +[2023-07-04 15:24:37,161][11911] Saving new best policy, reward=11.119! +[2023-07-04 15:24:41,643][11924] Updated weights for policy 0, policy_version 230 (0.0024) +[2023-07-04 15:24:42,067][11762] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3425.7). Total num frames: 942080. Throughput: 0: 871.5. Samples: 234854. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:24:42,073][11762] Avg episode reward: [(0, '11.602')] +[2023-07-04 15:24:42,081][11911] Saving new best policy, reward=11.602! +[2023-07-04 15:24:42,461][11762] Keyboard interrupt detected in the event loop EvtLoop [Runner_EvtLoop, process=main process 11762], exiting... +[2023-07-04 15:24:42,468][11911] Stopping Batcher_0... +[2023-07-04 15:24:42,470][11911] Loop batcher_evt_loop terminating... +[2023-07-04 15:24:42,467][11762] Runner profile tree view: +main_loop: 297.6563 +[2023-07-04 15:24:42,471][11911] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000230_942080.pth... +[2023-07-04 15:24:42,470][11762] Collected {0: 942080}, FPS: 3165.0 +[2023-07-04 15:24:42,542][11931] EvtLoop [rollout_proc6_evt_loop, process=rollout_proc6] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance6'), args=(0, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -349,8 +1340,8 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,197][13845] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc6_evt_loop -[2023-07-04 14:59:25,069][13841] EvtLoop [rollout_proc2_evt_loop, process=rollout_proc2] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance2'), args=(1, 0) +[2023-07-04 15:24:42,562][11931] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc6_evt_loop +[2023-07-04 15:24:42,538][11932] EvtLoop [rollout_proc7_evt_loop, process=rollout_proc7] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance7'), args=(1, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -377,11 +1368,7 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,225][13838] Weights refcount: 2 0 -[2023-07-04 14:59:25,335][13841] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc2_evt_loop -[2023-07-04 14:59:25,343][13838] Stopping InferenceWorker_p0-w0... -[2023-07-04 14:59:25,344][13838] Loop inference_proc0-0_evt_loop terminating... -[2023-07-04 14:59:25,319][13839] EvtLoop [rollout_proc0_evt_loop, process=rollout_proc0] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance0'), args=(0, 0) +[2023-07-04 15:24:42,499][11930] EvtLoop [rollout_proc5_evt_loop, process=rollout_proc5] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance5'), args=(1, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -408,7 +1395,8 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,204][13840] EvtLoop [rollout_proc1_evt_loop, process=rollout_proc1] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance1'), args=(0, 0) +[2023-07-04 15:24:42,610][11924] Weights refcount: 2 0 +[2023-07-04 15:24:42,504][11926] EvtLoop [rollout_proc1_evt_loop, process=rollout_proc1] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance1'), args=(0, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -435,8 +1423,12 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,386][13840] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc1_evt_loop -[2023-07-04 14:59:25,167][13843] EvtLoop [rollout_proc5_evt_loop, process=rollout_proc5] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance5'), args=(1, 0) +[2023-07-04 15:24:42,619][11926] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc1_evt_loop +[2023-07-04 15:24:42,565][11932] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc7_evt_loop +[2023-07-04 15:24:42,645][11924] Stopping InferenceWorker_p0-w0... +[2023-07-04 15:24:42,645][11924] Loop inference_proc0-0_evt_loop terminating... +[2023-07-04 15:24:42,591][11930] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc5_evt_loop +[2023-07-04 15:24:42,557][11928] EvtLoop [rollout_proc3_evt_loop, process=rollout_proc3] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance3'), args=(0, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -463,8 +1455,8 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,387][13843] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc5_evt_loop -[2023-07-04 14:59:25,188][13846] EvtLoop [rollout_proc7_evt_loop, process=rollout_proc7] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance7'), args=(0, 0) +[2023-07-04 15:24:42,653][11928] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc3_evt_loop +[2023-07-04 15:24:42,603][11927] EvtLoop [rollout_proc2_evt_loop, process=rollout_proc2] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance2'), args=(1, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -491,8 +1483,8 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,388][13846] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc7_evt_loop -[2023-07-04 14:59:25,235][13844] EvtLoop [rollout_proc4_evt_loop, process=rollout_proc4] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance4'), args=(1, 0) +[2023-07-04 15:24:42,659][11927] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc2_evt_loop +[2023-07-04 15:24:42,650][11929] EvtLoop [rollout_proc4_evt_loop, process=rollout_proc4] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance4'), args=(1, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -519,8 +1511,7 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,408][13844] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc4_evt_loop -[2023-07-04 14:59:25,248][13842] EvtLoop [rollout_proc3_evt_loop, process=rollout_proc3] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance3'), args=(1, 0) +[2023-07-04 15:24:42,684][11925] EvtLoop [rollout_proc0_evt_loop, process=rollout_proc0] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance0'), args=(1, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) @@ -547,39 +1538,289 @@ Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step reward = self.game.make_action(actions_flattened, self.skip_frames) vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. -[2023-07-04 14:59:25,417][13842] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc3_evt_loop -[2023-07-04 14:59:25,365][13839] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc0_evt_loop -[2023-07-04 14:59:25,570][13825] Stopping LearnerWorker_p0... -[2023-07-04 14:59:25,571][13825] Loop learner_proc0_evt_loop terminating... -[2023-07-04 14:59:27,388][13487] Environment doom_basic already registered, overwriting... -[2023-07-04 14:59:27,391][13487] Environment doom_two_colors_easy already registered, overwriting... -[2023-07-04 14:59:27,396][13487] Environment doom_two_colors_hard already registered, overwriting... -[2023-07-04 14:59:27,403][13487] Environment doom_dm already registered, overwriting... -[2023-07-04 14:59:27,409][13487] Environment doom_dwango5 already registered, overwriting... -[2023-07-04 14:59:27,416][13487] Environment doom_my_way_home_flat_actions already registered, overwriting... -[2023-07-04 14:59:27,428][13487] Environment doom_defend_the_center_flat_actions already registered, overwriting... -[2023-07-04 14:59:27,431][13487] Environment doom_my_way_home already registered, overwriting... -[2023-07-04 14:59:27,432][13487] Environment doom_deadly_corridor already registered, overwriting... -[2023-07-04 14:59:27,436][13487] Environment doom_defend_the_center already registered, overwriting... -[2023-07-04 14:59:27,437][13487] Environment doom_defend_the_line already registered, overwriting... -[2023-07-04 14:59:27,441][13487] Environment doom_health_gathering already registered, overwriting... -[2023-07-04 14:59:27,445][13487] Environment doom_health_gathering_supreme already registered, overwriting... -[2023-07-04 14:59:27,447][13487] Environment doom_battle already registered, overwriting... -[2023-07-04 14:59:27,453][13487] Environment doom_battle2 already registered, overwriting... -[2023-07-04 14:59:27,454][13487] Environment doom_duel_bots already registered, overwriting... -[2023-07-04 14:59:27,458][13487] Environment doom_deathmatch_bots already registered, overwriting... -[2023-07-04 14:59:27,459][13487] Environment doom_duel already registered, overwriting... -[2023-07-04 14:59:27,462][13487] Environment doom_deathmatch_full already registered, overwriting... -[2023-07-04 14:59:27,463][13487] Environment doom_benchmark already registered, overwriting... -[2023-07-04 14:59:27,468][13487] register_encoder_factory: -[2023-07-04 14:59:27,519][13487] Loading existing experiment configuration from /content/train_dir/default_experiment/cfg.json -[2023-07-04 14:59:27,536][13487] Overriding arg 'train_for_env_steps' with value 25000 passed from command line -[2023-07-04 14:59:27,547][13487] Experiment dir /content/train_dir/default_experiment already exists! -[2023-07-04 14:59:27,559][13487] Resuming existing experiment from /content/train_dir/default_experiment... -[2023-07-04 14:59:27,562][13487] Weights and Biases integration disabled -[2023-07-04 14:59:27,575][13487] Environment var CUDA_VISIBLE_DEVICES is 0 +[2023-07-04 15:24:42,796][11925] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc0_evt_loop +[2023-07-04 15:24:42,748][11929] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc4_evt_loop +[2023-07-04 15:24:42,992][11911] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000072_294912.pth +[2023-07-04 15:24:43,027][11911] Stopping LearnerWorker_p0... +[2023-07-04 15:24:43,032][11911] Loop learner_proc0_evt_loop terminating... +[2023-07-04 15:25:30,346][17091] Saving configuration to /content/train_dir/default_experiment/config.json... +[2023-07-04 15:25:30,349][17091] Rollout worker 0 uses device cpu +[2023-07-04 15:25:30,350][17091] Rollout worker 1 uses device cpu +[2023-07-04 15:25:30,351][17091] Rollout worker 2 uses device cpu +[2023-07-04 15:25:30,352][17091] Rollout worker 3 uses device cpu +[2023-07-04 15:25:30,355][17091] Rollout worker 4 uses device cpu +[2023-07-04 15:25:30,356][17091] Rollout worker 5 uses device cpu +[2023-07-04 15:25:30,357][17091] Rollout worker 6 uses device cpu +[2023-07-04 15:25:30,358][17091] Rollout worker 7 uses device cpu +[2023-07-04 15:25:30,541][17091] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:25:30,546][17091] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:25:30,587][17091] Starting all processes... +[2023-07-04 15:25:30,591][17091] Starting process learner_proc0 +[2023-07-04 15:25:30,659][17091] Starting all processes... +[2023-07-04 15:25:30,675][17091] Starting process inference_proc0-0 +[2023-07-04 15:25:30,676][17091] Starting process rollout_proc0 +[2023-07-04 15:25:30,680][17091] Starting process rollout_proc1 +[2023-07-04 15:25:30,680][17091] Starting process rollout_proc2 +[2023-07-04 15:25:30,689][17091] Starting process rollout_proc3 +[2023-07-04 15:25:30,689][17091] Starting process rollout_proc4 +[2023-07-04 15:25:30,689][17091] Starting process rollout_proc5 +[2023-07-04 15:25:30,689][17091] Starting process rollout_proc6 +[2023-07-04 15:25:30,689][17091] Starting process rollout_proc7 +[2023-07-04 15:25:41,907][17310] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:25:41,908][17310] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-07-04 15:25:41,966][17310] Num visible devices: 1 +[2023-07-04 15:25:41,985][17326] Worker 2 uses CPU cores [0] +[2023-07-04 15:25:42,001][17310] Starting seed is not provided +[2023-07-04 15:25:42,002][17310] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:25:42,003][17310] Initializing actor-critic model on device cuda:0 +[2023-07-04 15:25:42,004][17310] RunningMeanStd input shape: (3, 72, 128) +[2023-07-04 15:25:42,005][17310] RunningMeanStd input shape: (1,) +[2023-07-04 15:25:42,038][17331] Worker 7 uses CPU cores [1] +[2023-07-04 15:25:42,094][17310] ConvEncoder: input_channels=3 +[2023-07-04 15:25:42,254][17324] Worker 0 uses CPU cores [0] +[2023-07-04 15:25:42,344][17327] Worker 3 uses CPU cores [1] +[2023-07-04 15:25:42,373][17325] Worker 1 uses CPU cores [1] +[2023-07-04 15:25:42,379][17330] Worker 6 uses CPU cores [0] +[2023-07-04 15:25:42,400][17323] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:25:42,401][17323] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-07-04 15:25:42,411][17329] Worker 5 uses CPU cores [1] +[2023-07-04 15:25:42,431][17323] Num visible devices: 1 +[2023-07-04 15:25:42,448][17328] Worker 4 uses CPU cores [0] +[2023-07-04 15:25:42,499][17310] Conv encoder output size: 512 +[2023-07-04 15:25:42,499][17310] Policy head output size: 512 +[2023-07-04 15:25:42,513][17310] Created Actor Critic model with architecture: +[2023-07-04 15:25:42,513][17310] ActorCriticSharedWeights( + (obs_normalizer): ObservationNormalizer( + (running_mean_std): RunningMeanStdDictInPlace( + (running_mean_std): ModuleDict( + (obs): RunningMeanStdInPlace() + ) + ) + ) + (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace) + (encoder): VizdoomEncoder( + (basic_encoder): ConvEncoder( + (enc): RecursiveScriptModule( + original_name=ConvEncoderImpl + (conv_head): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Conv2d) + (1): RecursiveScriptModule(original_name=ELU) + (2): RecursiveScriptModule(original_name=Conv2d) + (3): RecursiveScriptModule(original_name=ELU) + (4): RecursiveScriptModule(original_name=Conv2d) + (5): RecursiveScriptModule(original_name=ELU) + ) + (mlp_layers): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Linear) + (1): RecursiveScriptModule(original_name=ELU) + ) + ) + ) + ) + (core): ModelCoreRNN( + (core): GRU(512, 512) + ) + (decoder): MlpDecoder( + (mlp): Identity() + ) + (critic_linear): Linear(in_features=512, out_features=1, bias=True) + (action_parameterization): ActionParameterizationDefault( + (distribution_linear): Linear(in_features=512, out_features=5, bias=True) + ) +) +[2023-07-04 15:25:44,870][17310] Using optimizer +[2023-07-04 15:25:44,871][17310] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000230_942080.pth... +[2023-07-04 15:25:44,903][17310] Loading model from checkpoint +[2023-07-04 15:25:44,907][17310] Loaded experiment state at self.train_step=230, self.env_steps=942080 +[2023-07-04 15:25:44,908][17310] Initialized policy 0 weights for model version 230 +[2023-07-04 15:25:44,912][17310] LearnerWorker_p0 finished initialization! +[2023-07-04 15:25:44,912][17310] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:25:45,101][17323] RunningMeanStd input shape: (3, 72, 128) +[2023-07-04 15:25:45,102][17323] RunningMeanStd input shape: (1,) +[2023-07-04 15:25:45,114][17323] ConvEncoder: input_channels=3 +[2023-07-04 15:25:45,215][17323] Conv encoder output size: 512 +[2023-07-04 15:25:45,215][17323] Policy head output size: 512 +[2023-07-04 15:25:46,687][17091] Keyboard interrupt detected in the event loop EvtLoop [Runner_EvtLoop, process=main process 17091], exiting... +[2023-07-04 15:25:46,690][17328] Stopping RolloutWorker_w4... +[2023-07-04 15:25:46,690][17328] Loop rollout_proc4_evt_loop terminating... +[2023-07-04 15:25:46,690][17310] Stopping Batcher_0... +[2023-07-04 15:25:46,691][17310] Loop batcher_evt_loop terminating... +[2023-07-04 15:25:46,692][17310] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000230_942080.pth... +[2023-07-04 15:25:46,692][17324] Stopping RolloutWorker_w0... +[2023-07-04 15:25:46,692][17324] Loop rollout_proc0_evt_loop terminating... +[2023-07-04 15:25:46,693][17330] Stopping RolloutWorker_w6... +[2023-07-04 15:25:46,694][17330] Loop rollout_proc6_evt_loop terminating... +[2023-07-04 15:25:46,695][17326] Stopping RolloutWorker_w2... +[2023-07-04 15:25:46,696][17326] Loop rollout_proc2_evt_loop terminating... +[2023-07-04 15:25:46,690][17091] Runner profile tree view: +main_loop: 16.1026 +[2023-07-04 15:25:46,704][17091] Collected {0: 942080}, FPS: 0.0 +[2023-07-04 15:25:46,712][17329] Stopping RolloutWorker_w5... +[2023-07-04 15:25:46,704][17325] Stopping RolloutWorker_w1... +[2023-07-04 15:25:46,706][17327] Stopping RolloutWorker_w3... +[2023-07-04 15:25:46,718][17331] Stopping RolloutWorker_w7... +[2023-07-04 15:25:46,720][17325] Loop rollout_proc1_evt_loop terminating... +[2023-07-04 15:25:46,736][17329] Loop rollout_proc5_evt_loop terminating... +[2023-07-04 15:25:46,734][17327] Loop rollout_proc3_evt_loop terminating... +[2023-07-04 15:25:46,736][17331] Loop rollout_proc7_evt_loop terminating... +[2023-07-04 15:25:46,778][17323] Weights refcount: 2 0 +[2023-07-04 15:25:46,775][17091] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-07-04 15:25:46,781][17091] Overriding arg 'num_workers' with value 1 passed from command line +[2023-07-04 15:25:46,784][17091] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-07-04 15:25:46,786][17091] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-07-04 15:25:46,788][17091] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-07-04 15:25:46,789][17091] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-07-04 15:25:46,790][17323] Stopping InferenceWorker_p0-w0... +[2023-07-04 15:25:46,793][17323] Loop inference_proc0-0_evt_loop terminating... +[2023-07-04 15:25:46,791][17091] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file! +[2023-07-04 15:25:46,795][17091] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-07-04 15:25:46,797][17091] Adding new argument 'push_to_hub'=False that is not in the saved config file! +[2023-07-04 15:25:46,799][17091] Adding new argument 'hf_repository'=None that is not in the saved config file! +[2023-07-04 15:25:46,804][17091] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-07-04 15:25:46,805][17091] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-07-04 15:25:46,810][17091] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-07-04 15:25:46,812][17091] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-07-04 15:25:46,813][17091] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-07-04 15:25:46,875][17091] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:25:46,883][17091] RunningMeanStd input shape: (3, 72, 128) +[2023-07-04 15:25:46,892][17091] RunningMeanStd input shape: (1,) +[2023-07-04 15:25:46,973][17091] ConvEncoder: input_channels=3 +[2023-07-04 15:25:46,991][17310] Stopping LearnerWorker_p0... +[2023-07-04 15:25:46,999][17310] Loop learner_proc0_evt_loop terminating... +[2023-07-04 15:25:47,450][17091] Conv encoder output size: 512 +[2023-07-04 15:25:47,458][17091] Policy head output size: 512 +[2023-07-04 15:25:53,319][17091] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000230_942080.pth... +[2023-07-04 15:25:54,502][17091] Num frames 100... +[2023-07-04 15:25:54,622][17091] Num frames 200... +[2023-07-04 15:25:54,747][17091] Num frames 300... +[2023-07-04 15:25:54,891][17091] Num frames 400... +[2023-07-04 15:25:55,016][17091] Num frames 500... +[2023-07-04 15:25:55,138][17091] Num frames 600... +[2023-07-04 15:25:55,205][17091] Avg episode rewards: #0: 9.080, true rewards: #0: 6.080 +[2023-07-04 15:25:55,208][17091] Avg episode reward: 9.080, avg true_objective: 6.080 +[2023-07-04 15:25:55,327][17091] Num frames 700... +[2023-07-04 15:25:55,456][17091] Num frames 800... +[2023-07-04 15:25:55,578][17091] Num frames 900... +[2023-07-04 15:25:55,712][17091] Num frames 1000... +[2023-07-04 15:25:55,837][17091] Num frames 1100... +[2023-07-04 15:25:55,972][17091] Num frames 1200... +[2023-07-04 15:25:56,106][17091] Num frames 1300... +[2023-07-04 15:25:56,242][17091] Num frames 1400... +[2023-07-04 15:25:56,357][17091] Avg episode rewards: #0: 12.710, true rewards: #0: 7.210 +[2023-07-04 15:25:56,359][17091] Avg episode reward: 12.710, avg true_objective: 7.210 +[2023-07-04 15:25:56,447][17091] Num frames 1500... +[2023-07-04 15:25:56,584][17091] Num frames 1600... +[2023-07-04 15:25:56,708][17091] Num frames 1700... +[2023-07-04 15:25:56,839][17091] Num frames 1800... +[2023-07-04 15:25:56,970][17091] Num frames 1900... +[2023-07-04 15:25:57,103][17091] Num frames 2000... +[2023-07-04 15:25:57,236][17091] Num frames 2100... +[2023-07-04 15:25:57,357][17091] Num frames 2200... +[2023-07-04 15:25:57,483][17091] Num frames 2300... +[2023-07-04 15:25:57,610][17091] Num frames 2400... +[2023-07-04 15:25:57,730][17091] Num frames 2500... +[2023-07-04 15:25:57,902][17091] Avg episode rewards: #0: 16.647, true rewards: #0: 8.647 +[2023-07-04 15:25:57,905][17091] Avg episode reward: 16.647, avg true_objective: 8.647 +[2023-07-04 15:25:57,917][17091] Num frames 2600... +[2023-07-04 15:25:58,039][17091] Num frames 2700... +[2023-07-04 15:25:58,162][17091] Num frames 2800... +[2023-07-04 15:25:58,300][17091] Num frames 2900... +[2023-07-04 15:25:58,423][17091] Num frames 3000... +[2023-07-04 15:25:58,550][17091] Num frames 3100... +[2023-07-04 15:25:58,670][17091] Num frames 3200... +[2023-07-04 15:25:58,798][17091] Num frames 3300... +[2023-07-04 15:25:58,930][17091] Num frames 3400... +[2023-07-04 15:25:59,048][17091] Num frames 3500... +[2023-07-04 15:25:59,173][17091] Num frames 3600... +[2023-07-04 15:25:59,295][17091] Num frames 3700... +[2023-07-04 15:25:59,417][17091] Num frames 3800... +[2023-07-04 15:25:59,535][17091] Num frames 3900... +[2023-07-04 15:25:59,600][17091] Avg episode rewards: #0: 19.765, true rewards: #0: 9.765 +[2023-07-04 15:25:59,601][17091] Avg episode reward: 19.765, avg true_objective: 9.765 +[2023-07-04 15:25:59,714][17091] Num frames 4000... +[2023-07-04 15:25:59,841][17091] Num frames 4100... +[2023-07-04 15:25:59,964][17091] Num frames 4200... +[2023-07-04 15:26:00,135][17091] Avg episode rewards: #0: 16.580, true rewards: #0: 8.580 +[2023-07-04 15:26:00,137][17091] Avg episode reward: 16.580, avg true_objective: 8.580 +[2023-07-04 15:26:00,152][17091] Num frames 4300... +[2023-07-04 15:26:00,280][17091] Num frames 4400... +[2023-07-04 15:26:00,407][17091] Num frames 4500... +[2023-07-04 15:26:00,534][17091] Num frames 4600... +[2023-07-04 15:26:00,655][17091] Num frames 4700... +[2023-07-04 15:26:00,756][17091] Avg episode rewards: #0: 14.730, true rewards: #0: 7.897 +[2023-07-04 15:26:00,758][17091] Avg episode reward: 14.730, avg true_objective: 7.897 +[2023-07-04 15:26:00,834][17091] Num frames 4800... +[2023-07-04 15:26:00,964][17091] Num frames 4900... +[2023-07-04 15:26:01,087][17091] Num frames 5000... +[2023-07-04 15:26:01,213][17091] Num frames 5100... +[2023-07-04 15:26:01,336][17091] Num frames 5200... +[2023-07-04 15:26:01,464][17091] Num frames 5300... +[2023-07-04 15:26:01,583][17091] Num frames 5400... +[2023-07-04 15:26:01,705][17091] Num frames 5500... +[2023-07-04 15:26:01,833][17091] Num frames 5600... +[2023-07-04 15:26:01,932][17091] Avg episode rewards: #0: 15.334, true rewards: #0: 8.049 +[2023-07-04 15:26:01,934][17091] Avg episode reward: 15.334, avg true_objective: 8.049 +[2023-07-04 15:26:02,019][17091] Num frames 5700... +[2023-07-04 15:26:02,142][17091] Num frames 5800... +[2023-07-04 15:26:02,264][17091] Num frames 5900... +[2023-07-04 15:26:02,394][17091] Num frames 6000... +[2023-07-04 15:26:02,512][17091] Num frames 6100... +[2023-07-04 15:26:02,642][17091] Num frames 6200... +[2023-07-04 15:26:02,768][17091] Num frames 6300... +[2023-07-04 15:26:02,909][17091] Num frames 6400... +[2023-07-04 15:26:02,970][17091] Avg episode rewards: #0: 15.253, true rewards: #0: 8.002 +[2023-07-04 15:26:02,972][17091] Avg episode reward: 15.253, avg true_objective: 8.002 +[2023-07-04 15:26:03,113][17091] Num frames 6500... +[2023-07-04 15:26:03,236][17091] Num frames 6600... +[2023-07-04 15:26:03,360][17091] Num frames 6700... +[2023-07-04 15:26:03,462][17091] Avg episode rewards: #0: 14.153, true rewards: #0: 7.487 +[2023-07-04 15:26:03,463][17091] Avg episode reward: 14.153, avg true_objective: 7.487 +[2023-07-04 15:26:03,541][17091] Num frames 6800... +[2023-07-04 15:26:03,717][17091] Num frames 6900... +[2023-07-04 15:26:03,899][17091] Num frames 7000... +[2023-07-04 15:26:04,077][17091] Num frames 7100... +[2023-07-04 15:26:04,250][17091] Num frames 7200... +[2023-07-04 15:26:04,422][17091] Num frames 7300... +[2023-07-04 15:26:04,595][17091] Num frames 7400... +[2023-07-04 15:26:04,771][17091] Num frames 7500... +[2023-07-04 15:26:04,949][17091] Num frames 7600... +[2023-07-04 15:26:05,135][17091] Num frames 7700... +[2023-07-04 15:26:05,319][17091] Num frames 7800... +[2023-07-04 15:26:05,491][17091] Num frames 7900... +[2023-07-04 15:26:05,667][17091] Num frames 8000... +[2023-07-04 15:26:05,757][17091] Avg episode rewards: #0: 15.318, true rewards: #0: 8.018 +[2023-07-04 15:26:05,760][17091] Avg episode reward: 15.318, avg true_objective: 8.018 +[2023-07-04 15:26:55,950][17091] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2023-07-04 15:26:55,980][17091] Environment doom_basic already registered, overwriting... +[2023-07-04 15:26:55,986][17091] Environment doom_two_colors_easy already registered, overwriting... +[2023-07-04 15:26:55,988][17091] Environment doom_two_colors_hard already registered, overwriting... +[2023-07-04 15:26:55,991][17091] Environment doom_dm already registered, overwriting... +[2023-07-04 15:26:55,992][17091] Environment doom_dwango5 already registered, overwriting... +[2023-07-04 15:26:55,993][17091] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-07-04 15:26:55,997][17091] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-07-04 15:26:55,998][17091] Environment doom_my_way_home already registered, overwriting... +[2023-07-04 15:26:55,999][17091] Environment doom_deadly_corridor already registered, overwriting... +[2023-07-04 15:26:56,000][17091] Environment doom_defend_the_center already registered, overwriting... +[2023-07-04 15:26:56,001][17091] Environment doom_defend_the_line already registered, overwriting... +[2023-07-04 15:26:56,003][17091] Environment doom_health_gathering already registered, overwriting... +[2023-07-04 15:26:56,004][17091] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-07-04 15:26:56,006][17091] Environment doom_battle already registered, overwriting... +[2023-07-04 15:26:56,008][17091] Environment doom_battle2 already registered, overwriting... +[2023-07-04 15:26:56,009][17091] Environment doom_duel_bots already registered, overwriting... +[2023-07-04 15:26:56,011][17091] Environment doom_deathmatch_bots already registered, overwriting... +[2023-07-04 15:26:56,012][17091] Environment doom_duel already registered, overwriting... +[2023-07-04 15:26:56,013][17091] Environment doom_deathmatch_full already registered, overwriting... +[2023-07-04 15:26:56,015][17091] Environment doom_benchmark already registered, overwriting... +[2023-07-04 15:26:56,016][17091] register_encoder_factory: +[2023-07-04 15:26:56,040][17091] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-07-04 15:26:56,053][17091] Experiment dir /content/train_dir/default_experiment already exists! +[2023-07-04 15:26:56,055][17091] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-07-04 15:26:56,056][17091] Weights and Biases integration disabled +[2023-07-04 15:26:56,061][17091] Environment var CUDA_VISIBLE_DEVICES is 0 -[2023-07-04 14:59:32,208][13487] Starting experiment with the following configuration: +[2023-07-04 15:26:57,668][17091] Starting experiment with the following configuration: help=False algo=APPO env=doom_health_gathering_supreme @@ -627,6 +1868,8 @@ max_grad_norm=4.0 learning_rate=0.0001 lr_schedule=constant lr_schedule_kl_threshold=0.008 +lr_adaptive_min=1e-06 +lr_adaptive_max=0.01 obs_subtract_mean=0.0 obs_scale=255.0 normalize_input=True @@ -644,7 +1887,7 @@ stats_avg=100 summaries_use_frameskip=True heartbeat_interval=20 heartbeat_reporting_interval=600 -train_for_env_steps=25000 +train_for_env_steps=4000000 train_for_seconds=10000000000 save_every_sec=120 keep_checkpoints=2 @@ -708,54 +1951,53 @@ command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_ cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000} git_hash=unknown git_repo_name=not a git repository -train_script=.usr.local.lib.python3.10.dist-packages.ipykernel_launcher -[2023-07-04 14:59:32,213][13487] Saving configuration to /content/train_dir/default_experiment/cfg.json... -[2023-07-04 14:59:32,216][13487] Rollout worker 0 uses device cpu -[2023-07-04 14:59:32,219][13487] Rollout worker 1 uses device cpu -[2023-07-04 14:59:32,222][13487] Rollout worker 2 uses device cpu -[2023-07-04 14:59:32,224][13487] Rollout worker 3 uses device cpu -[2023-07-04 14:59:32,225][13487] Rollout worker 4 uses device cpu -[2023-07-04 14:59:32,227][13487] Rollout worker 5 uses device cpu -[2023-07-04 14:59:32,229][13487] Rollout worker 6 uses device cpu -[2023-07-04 14:59:32,236][13487] Rollout worker 7 uses device cpu -[2023-07-04 14:59:32,377][13487] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:59:32,379][13487] InferenceWorker_p0-w0: min num requests: 2 -[2023-07-04 14:59:32,419][13487] Starting all processes... -[2023-07-04 14:59:32,421][13487] Starting process learner_proc0 -[2023-07-04 14:59:32,479][13487] Starting all processes... -[2023-07-04 14:59:32,488][13487] Starting process inference_proc0-0 -[2023-07-04 14:59:32,488][13487] Starting process rollout_proc0 -[2023-07-04 14:59:32,489][13487] Starting process rollout_proc1 -[2023-07-04 14:59:32,489][13487] Starting process rollout_proc2 -[2023-07-04 14:59:32,489][13487] Starting process rollout_proc3 -[2023-07-04 14:59:32,489][13487] Starting process rollout_proc4 -[2023-07-04 14:59:32,489][13487] Starting process rollout_proc5 -[2023-07-04 14:59:32,489][13487] Starting process rollout_proc6 -[2023-07-04 14:59:32,489][13487] Starting process rollout_proc7 -[2023-07-04 14:59:44,353][19035] Worker 6 uses CPU cores [0] -[2023-07-04 14:59:44,366][19017] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:59:44,371][19017] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 -[2023-07-04 14:59:44,475][19017] Num visible devices: 1 -[2023-07-04 14:59:44,533][19017] Starting seed is not provided -[2023-07-04 14:59:44,534][19017] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:59:44,535][19017] Initializing actor-critic model on device cuda:0 -[2023-07-04 14:59:44,538][19017] RunningMeanStd input shape: (3, 72, 128) -[2023-07-04 14:59:44,539][19017] RunningMeanStd input shape: (1,) -[2023-07-04 14:59:44,575][19038] Worker 7 uses CPU cores [1] -[2023-07-04 14:59:44,606][19034] Worker 3 uses CPU cores [1] -[2023-07-04 14:59:44,636][19030] Worker 0 uses CPU cores [0] -[2023-07-04 14:59:44,687][19017] ConvEncoder: input_channels=3 -[2023-07-04 14:59:44,717][19032] Worker 1 uses CPU cores [1] -[2023-07-04 14:59:44,738][19033] Worker 2 uses CPU cores [0] -[2023-07-04 14:59:44,748][19037] Worker 5 uses CPU cores [1] -[2023-07-04 14:59:44,792][19036] Worker 4 uses CPU cores [0] -[2023-07-04 14:59:44,916][19031] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:59:44,917][19031] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 -[2023-07-04 14:59:44,942][19031] Num visible devices: 1 -[2023-07-04 14:59:45,039][19017] Conv encoder output size: 512 -[2023-07-04 14:59:45,040][19017] Policy head output size: 512 -[2023-07-04 14:59:45,066][19017] Created Actor Critic model with architecture: -[2023-07-04 14:59:45,067][19017] ActorCriticSharedWeights( +[2023-07-04 15:26:57,670][17091] Saving configuration to /content/train_dir/default_experiment/config.json... +[2023-07-04 15:26:57,673][17091] Rollout worker 0 uses device cpu +[2023-07-04 15:26:57,675][17091] Rollout worker 1 uses device cpu +[2023-07-04 15:26:57,677][17091] Rollout worker 2 uses device cpu +[2023-07-04 15:26:57,678][17091] Rollout worker 3 uses device cpu +[2023-07-04 15:26:57,679][17091] Rollout worker 4 uses device cpu +[2023-07-04 15:26:57,681][17091] Rollout worker 5 uses device cpu +[2023-07-04 15:26:57,683][17091] Rollout worker 6 uses device cpu +[2023-07-04 15:26:57,684][17091] Rollout worker 7 uses device cpu +[2023-07-04 15:26:57,792][17091] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:26:57,793][17091] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-04 15:26:57,824][17091] Starting all processes... +[2023-07-04 15:26:57,825][17091] Starting process learner_proc0 +[2023-07-04 15:26:57,878][17091] Starting all processes... +[2023-07-04 15:26:57,883][17091] Starting process inference_proc0-0 +[2023-07-04 15:26:57,884][17091] Starting process rollout_proc0 +[2023-07-04 15:26:57,885][17091] Starting process rollout_proc1 +[2023-07-04 15:26:57,885][17091] Starting process rollout_proc2 +[2023-07-04 15:26:57,885][17091] Starting process rollout_proc3 +[2023-07-04 15:26:57,885][17091] Starting process rollout_proc4 +[2023-07-04 15:26:57,885][17091] Starting process rollout_proc5 +[2023-07-04 15:26:57,885][17091] Starting process rollout_proc6 +[2023-07-04 15:26:57,885][17091] Starting process rollout_proc7 +[2023-07-04 15:27:07,723][17772] Worker 7 uses CPU cores [1] +[2023-07-04 15:27:07,994][17752] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:27:07,995][17752] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-07-04 15:27:08,047][17769] Worker 1 uses CPU cores [1] +[2023-07-04 15:27:08,077][17752] Num visible devices: 1 +[2023-07-04 15:27:08,099][17752] Starting seed is not provided +[2023-07-04 15:27:08,100][17752] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:27:08,103][17752] Initializing actor-critic model on device cuda:0 +[2023-07-04 15:27:08,103][17752] RunningMeanStd input shape: (3, 72, 128) +[2023-07-04 15:27:08,104][17752] RunningMeanStd input shape: (1,) +[2023-07-04 15:27:08,245][17752] ConvEncoder: input_channels=3 +[2023-07-04 15:27:08,494][17765] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:27:08,496][17771] Worker 6 uses CPU cores [0] +[2023-07-04 15:27:08,497][17765] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-07-04 15:27:08,553][17765] Num visible devices: 1 +[2023-07-04 15:27:08,605][17768] Worker 3 uses CPU cores [1] +[2023-07-04 15:27:08,639][17766] Worker 0 uses CPU cores [0] +[2023-07-04 15:27:08,641][17770] Worker 4 uses CPU cores [0] +[2023-07-04 15:27:08,647][17767] Worker 2 uses CPU cores [0] +[2023-07-04 15:27:08,690][17773] Worker 5 uses CPU cores [1] +[2023-07-04 15:27:08,762][17752] Conv encoder output size: 512 +[2023-07-04 15:27:08,763][17752] Policy head output size: 512 +[2023-07-04 15:27:08,787][17752] Created Actor Critic model with architecture: +[2023-07-04 15:27:08,788][17752] ActorCriticSharedWeights( (obs_normalizer): ObservationNormalizer( (running_mean_std): RunningMeanStdDictInPlace( (running_mean_std): ModuleDict( @@ -796,388 +2038,338 @@ train_script=.usr.local.lib.python3.10.dist-packages.ipykernel_launcher (distribution_linear): Linear(in_features=512, out_features=5, bias=True) ) ) -[2023-07-04 14:59:48,353][19017] Using optimizer -[2023-07-04 14:59:48,354][19017] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000112_458752.pth... -[2023-07-04 14:59:48,388][19017] Loading model from checkpoint -[2023-07-04 14:59:48,392][19017] Loaded experiment state at self.train_step=112, self.env_steps=458752 -[2023-07-04 14:59:48,393][19017] Initialized policy 0 weights for model version 112 -[2023-07-04 14:59:48,397][19017] LearnerWorker_p0 finished initialization! -[2023-07-04 14:59:48,398][19017] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 14:59:48,601][19031] RunningMeanStd input shape: (3, 72, 128) -[2023-07-04 14:59:48,603][19031] RunningMeanStd input shape: (1,) -[2023-07-04 14:59:48,615][19031] ConvEncoder: input_channels=3 -[2023-07-04 14:59:48,721][19031] Conv encoder output size: 512 -[2023-07-04 14:59:48,721][19031] Policy head output size: 512 -[2023-07-04 14:59:49,975][13487] Inference worker 0-0 is ready! -[2023-07-04 14:59:49,979][13487] All inference workers are ready! Signal rollout workers to start! -[2023-07-04 14:59:50,078][19035] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:50,080][19033] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:50,081][19030] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:50,076][19036] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:50,086][19034] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:50,080][19038] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:50,083][19037] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:50,085][19032] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 14:59:51,479][19033] Decorrelating experience for 0 frames... -[2023-07-04 14:59:51,483][19036] Decorrelating experience for 0 frames... -[2023-07-04 14:59:51,484][19035] Decorrelating experience for 0 frames... -[2023-07-04 14:59:51,907][19032] Decorrelating experience for 0 frames... -[2023-07-04 14:59:51,911][19034] Decorrelating experience for 0 frames... -[2023-07-04 14:59:51,913][19037] Decorrelating experience for 0 frames... -[2023-07-04 14:59:51,916][19038] Decorrelating experience for 0 frames... -[2023-07-04 14:59:52,369][13487] Heartbeat connected on Batcher_0 -[2023-07-04 14:59:52,376][13487] Heartbeat connected on LearnerWorker_p0 -[2023-07-04 14:59:52,429][13487] Heartbeat connected on InferenceWorker_p0-w0 -[2023-07-04 14:59:52,575][13487] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 458752. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) -[2023-07-04 14:59:52,797][19035] Decorrelating experience for 32 frames... -[2023-07-04 14:59:52,800][19036] Decorrelating experience for 32 frames... -[2023-07-04 14:59:52,889][19030] Decorrelating experience for 0 frames... -[2023-07-04 14:59:53,212][19037] Decorrelating experience for 32 frames... -[2023-07-04 14:59:53,221][19034] Decorrelating experience for 32 frames... -[2023-07-04 14:59:53,242][19038] Decorrelating experience for 32 frames... -[2023-07-04 14:59:54,084][19036] Decorrelating experience for 64 frames... -[2023-07-04 14:59:54,093][19035] Decorrelating experience for 64 frames... -[2023-07-04 14:59:54,305][19032] Decorrelating experience for 32 frames... -[2023-07-04 14:59:54,508][19034] Decorrelating experience for 64 frames... -[2023-07-04 14:59:54,545][19033] Decorrelating experience for 32 frames... -[2023-07-04 14:59:55,417][19037] Decorrelating experience for 64 frames... -[2023-07-04 14:59:55,562][19036] Decorrelating experience for 96 frames... -[2023-07-04 14:59:55,672][19035] Decorrelating experience for 96 frames... -[2023-07-04 14:59:55,815][13487] Heartbeat connected on RolloutWorker_w4 -[2023-07-04 14:59:55,996][13487] Heartbeat connected on RolloutWorker_w6 -[2023-07-04 14:59:56,018][19032] Decorrelating experience for 64 frames... -[2023-07-04 14:59:56,031][19030] Decorrelating experience for 32 frames... -[2023-07-04 14:59:56,215][19034] Decorrelating experience for 96 frames... -[2023-07-04 14:59:56,483][13487] Heartbeat connected on RolloutWorker_w3 -[2023-07-04 14:59:57,431][19030] Decorrelating experience for 64 frames... -[2023-07-04 14:59:57,575][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 458752. Throughput: 0: 3.2. Samples: 16. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) -[2023-07-04 14:59:57,581][13487] Avg episode reward: [(0, '2.010')] -[2023-07-04 14:59:57,876][19038] Decorrelating experience for 64 frames... -[2023-07-04 14:59:58,089][19037] Decorrelating experience for 96 frames... -[2023-07-04 14:59:58,653][13487] Heartbeat connected on RolloutWorker_w5 -[2023-07-04 15:00:02,577][13487] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 458752. Throughput: 0: 177.0. Samples: 1770. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) -[2023-07-04 15:00:02,580][13487] Avg episode reward: [(0, '3.143')] -[2023-07-04 15:00:03,425][19017] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth... -[2023-07-04 15:00:03,437][13487] Component Batcher_0 stopped! -[2023-07-04 15:00:03,425][19017] Stopping Batcher_0... -[2023-07-04 15:00:03,445][19017] Loop batcher_evt_loop terminating... -[2023-07-04 15:00:03,506][19031] Weights refcount: 2 0 -[2023-07-04 15:00:03,540][19031] Stopping InferenceWorker_p0-w0... -[2023-07-04 15:00:03,541][19031] Loop inference_proc0-0_evt_loop terminating... -[2023-07-04 15:00:03,542][13487] Component InferenceWorker_p0-w0 stopped! -[2023-07-04 15:00:03,574][19034] Stopping RolloutWorker_w3... -[2023-07-04 15:00:03,575][19034] Loop rollout_proc3_evt_loop terminating... -[2023-07-04 15:00:03,574][13487] Component RolloutWorker_w3 stopped! -[2023-07-04 15:00:03,591][13487] Component RolloutWorker_w6 stopped! -[2023-07-04 15:00:03,597][19035] Stopping RolloutWorker_w6... -[2023-07-04 15:00:03,598][19035] Loop rollout_proc6_evt_loop terminating... -[2023-07-04 15:00:03,601][19037] Stopping RolloutWorker_w5... -[2023-07-04 15:00:03,602][19037] Loop rollout_proc5_evt_loop terminating... -[2023-07-04 15:00:03,602][13487] Component RolloutWorker_w5 stopped! -[2023-07-04 15:00:03,564][19032] Decorrelating experience for 96 frames... -[2023-07-04 15:00:03,633][13487] Component RolloutWorker_w4 stopped! -[2023-07-04 15:00:03,636][19036] Stopping RolloutWorker_w4... -[2023-07-04 15:00:03,630][19017] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000051_208896.pth -[2023-07-04 15:00:03,637][19036] Loop rollout_proc4_evt_loop terminating... -[2023-07-04 15:00:03,665][19017] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth... -[2023-07-04 15:00:03,868][13487] Component LearnerWorker_p0 stopped! -[2023-07-04 15:00:03,871][19017] Stopping LearnerWorker_p0... -[2023-07-04 15:00:03,874][19017] Loop learner_proc0_evt_loop terminating... -[2023-07-04 15:00:03,941][19030] Decorrelating experience for 96 frames... -[2023-07-04 15:00:04,551][19033] Decorrelating experience for 64 frames... -[2023-07-04 15:00:04,784][13487] Component RolloutWorker_w1 stopped! -[2023-07-04 15:00:04,785][19032] Stopping RolloutWorker_w1... -[2023-07-04 15:00:04,798][19032] Loop rollout_proc1_evt_loop terminating... -[2023-07-04 15:00:05,354][13487] Component RolloutWorker_w0 stopped! -[2023-07-04 15:00:05,360][19030] Stopping RolloutWorker_w0... -[2023-07-04 15:00:05,372][19030] Loop rollout_proc0_evt_loop terminating... -[2023-07-04 15:00:07,815][19038] Decorrelating experience for 96 frames... -[2023-07-04 15:00:08,061][19038] Stopping RolloutWorker_w7... -[2023-07-04 15:00:08,062][19038] Loop rollout_proc7_evt_loop terminating... -[2023-07-04 15:00:08,061][13487] Component RolloutWorker_w7 stopped! -[2023-07-04 15:00:08,310][19033] Decorrelating experience for 96 frames... -[2023-07-04 15:00:08,535][13487] Component RolloutWorker_w2 stopped! -[2023-07-04 15:00:08,542][13487] Waiting for process learner_proc0 to stop... -[2023-07-04 15:00:08,535][19033] Stopping RolloutWorker_w2... -[2023-07-04 15:00:08,546][19033] Loop rollout_proc2_evt_loop terminating... -[2023-07-04 15:00:08,545][13487] Waiting for process inference_proc0-0 to join... -[2023-07-04 15:00:08,551][13487] Waiting for process rollout_proc0 to join... -[2023-07-04 15:00:08,606][13487] Waiting for process rollout_proc1 to join... -[2023-07-04 15:00:08,612][13487] Waiting for process rollout_proc2 to join... -[2023-07-04 15:00:09,048][13487] Waiting for process rollout_proc3 to join... -[2023-07-04 15:00:09,053][13487] Waiting for process rollout_proc4 to join... -[2023-07-04 15:00:09,056][13487] Waiting for process rollout_proc5 to join... -[2023-07-04 15:00:09,060][13487] Waiting for process rollout_proc6 to join... -[2023-07-04 15:00:09,063][13487] Waiting for process rollout_proc7 to join... -[2023-07-04 15:00:09,065][13487] Batcher 0 profile tree view: -batching: 0.0380, releasing_batches: 0.0000 -[2023-07-04 15:00:09,068][13487] InferenceWorker_p0-w0 profile tree view: -wait_policy: 0.0015 - wait_policy_total: 10.3235 -update_model: 0.0217 - weight_update: 0.0012 -one_step: 0.0061 - handle_policy_step: 2.8497 - deserialize: 0.0506, stack: 0.0128, obs_to_device_normalize: 0.4272, forward: 1.8411, send_messages: 0.0826 - prepare_outputs: 0.3033 - to_cpu: 0.1717 -[2023-07-04 15:00:09,070][13487] Learner 0 profile tree view: -misc: 0.0000, prepare_batch: 3.6049 -train: 0.9956 - epoch_init: 0.0000, minibatch_init: 0.0000, losses_postprocess: 0.0002, kl_divergence: 0.0003, after_optimizer: 0.0048 - calculate_losses: 0.1297 - losses_init: 0.0000, forward_head: 0.1186, bptt_initial: 0.0057, tail: 0.0008, advantages_returns: 0.0009, losses: 0.0021 - bptt: 0.0014 - bptt_forward_core: 0.0014 - update: 0.8563 - clip: 0.0018 -[2023-07-04 15:00:09,071][13487] RolloutWorker_w0 profile tree view: -wait_for_trajectories: 0.0299, enqueue_policy_requests: 0.0007 -[2023-07-04 15:00:09,074][13487] RolloutWorker_w7 profile tree view: -wait_for_trajectories: 0.0003, enqueue_policy_requests: 0.0026 -[2023-07-04 15:00:09,075][13487] Loop Runner_EvtLoop terminating... -[2023-07-04 15:00:09,078][13487] Runner profile tree view: -main_loop: 36.6591 -[2023-07-04 15:00:09,080][13487] Collected {0: 462848}, FPS: 111.7 -[2023-07-04 15:02:33,856][19799] Saving configuration to /content/train_dir/default_experiment/config.json... -[2023-07-04 15:02:33,868][19799] Rollout worker 0 uses device cpu -[2023-07-04 15:02:33,872][19799] Rollout worker 1 uses device cpu -[2023-07-04 15:02:33,874][19799] Rollout worker 2 uses device cpu -[2023-07-04 15:02:33,883][19799] Rollout worker 3 uses device cpu -[2023-07-04 15:02:33,889][19799] Rollout worker 4 uses device cpu -[2023-07-04 15:02:33,897][19799] Rollout worker 5 uses device cpu -[2023-07-04 15:02:33,898][19799] Rollout worker 6 uses device cpu -[2023-07-04 15:02:33,899][19799] Rollout worker 7 uses device cpu -[2023-07-04 15:02:34,113][19799] Using GPUs [0] for process 0 (actually maps to GPUs [0]) -[2023-07-04 15:02:34,121][19799] InferenceWorker_p0-w0: min num requests: 2 -[2023-07-04 15:02:34,169][19799] Starting all processes... -[2023-07-04 15:02:34,177][19799] Starting process learner_proc0 -[2023-07-04 15:02:34,188][19799] EvtLoop [Runner_EvtLoop, process=main process 19799] unhandled exception in slot='_on_start' connected to emitter=Emitter(object_id='Runner_EvtLoop', signal_name='start'), args=() +[2023-07-04 15:27:10,523][17752] Using optimizer +[2023-07-04 15:27:10,525][17752] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000230_942080.pth... +[2023-07-04 15:27:10,566][17752] Loading model from checkpoint +[2023-07-04 15:27:10,572][17752] Loaded experiment state at self.train_step=230, self.env_steps=942080 +[2023-07-04 15:27:10,573][17752] Initialized policy 0 weights for model version 230 +[2023-07-04 15:27:10,583][17752] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-04 15:27:10,590][17752] LearnerWorker_p0 finished initialization! +[2023-07-04 15:27:10,791][17765] RunningMeanStd input shape: (3, 72, 128) +[2023-07-04 15:27:10,792][17765] RunningMeanStd input shape: (1,) +[2023-07-04 15:27:10,824][17765] ConvEncoder: input_channels=3 +[2023-07-04 15:27:10,989][17765] Conv encoder output size: 512 +[2023-07-04 15:27:10,990][17765] Policy head output size: 512 +[2023-07-04 15:27:11,061][17091] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 942080. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-04 15:27:12,899][17091] Inference worker 0-0 is ready! +[2023-07-04 15:27:12,902][17091] All inference workers are ready! Signal rollout workers to start! +[2023-07-04 15:27:13,041][17768] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:13,039][17773] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:13,051][17769] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:13,054][17772] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:13,107][17767] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:13,096][17766] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:13,116][17770] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:13,121][17771] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-04 15:27:14,009][17767] Decorrelating experience for 0 frames... +[2023-07-04 15:27:14,012][17766] Decorrelating experience for 0 frames... +[2023-07-04 15:27:14,426][17770] Decorrelating experience for 0 frames... +[2023-07-04 15:27:14,724][17773] Decorrelating experience for 0 frames... +[2023-07-04 15:27:14,748][17772] Decorrelating experience for 0 frames... +[2023-07-04 15:27:14,754][17768] Decorrelating experience for 0 frames... +[2023-07-04 15:27:14,751][17769] Decorrelating experience for 0 frames... +[2023-07-04 15:27:14,926][17770] Decorrelating experience for 32 frames... +[2023-07-04 15:27:16,047][17771] Decorrelating experience for 0 frames... +[2023-07-04 15:27:16,061][17091] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 942080. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-04 15:27:16,087][17767] Decorrelating experience for 32 frames... +[2023-07-04 15:27:16,085][17766] Decorrelating experience for 32 frames... +[2023-07-04 15:27:16,290][17769] Decorrelating experience for 32 frames... +[2023-07-04 15:27:16,299][17768] Decorrelating experience for 32 frames... +[2023-07-04 15:27:16,302][17772] Decorrelating experience for 32 frames... +[2023-07-04 15:27:16,609][17773] Decorrelating experience for 32 frames... +[2023-07-04 15:27:17,269][17771] Decorrelating experience for 32 frames... +[2023-07-04 15:27:17,477][17772] Decorrelating experience for 64 frames... +[2023-07-04 15:27:17,481][17768] Decorrelating experience for 64 frames... +[2023-07-04 15:27:17,490][17767] Decorrelating experience for 64 frames... +[2023-07-04 15:27:17,503][17766] Decorrelating experience for 64 frames... +[2023-07-04 15:27:17,785][17091] Heartbeat connected on Batcher_0 +[2023-07-04 15:27:17,790][17091] Heartbeat connected on LearnerWorker_p0 +[2023-07-04 15:27:17,838][17091] Heartbeat connected on InferenceWorker_p0-w0 +[2023-07-04 15:27:18,404][17771] Decorrelating experience for 64 frames... +[2023-07-04 15:27:18,423][17773] Decorrelating experience for 64 frames... +[2023-07-04 15:27:18,428][17769] Decorrelating experience for 64 frames... +[2023-07-04 15:27:18,458][17767] Decorrelating experience for 96 frames... +[2023-07-04 15:27:18,669][17091] Heartbeat connected on RolloutWorker_w2 +[2023-07-04 15:27:19,771][17772] Decorrelating experience for 96 frames... +[2023-07-04 15:27:19,776][17768] Decorrelating experience for 96 frames... +[2023-07-04 15:27:19,988][17769] Decorrelating experience for 96 frames... +[2023-07-04 15:27:20,168][17091] Heartbeat connected on RolloutWorker_w7 +[2023-07-04 15:27:20,206][17091] Heartbeat connected on RolloutWorker_w3 +[2023-07-04 15:27:20,580][17091] Heartbeat connected on RolloutWorker_w1 +[2023-07-04 15:27:21,061][17091] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 942080. Throughput: 0: 1.2. Samples: 12. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-04 15:27:21,063][17091] Avg episode reward: [(0, '0.640')] +[2023-07-04 15:27:21,111][17770] Decorrelating experience for 64 frames... +[2023-07-04 15:27:21,245][17771] Decorrelating experience for 96 frames... +[2023-07-04 15:27:21,585][17091] Heartbeat connected on RolloutWorker_w6 +[2023-07-04 15:27:22,697][17773] Decorrelating experience for 96 frames... +[2023-07-04 15:27:23,487][17091] Heartbeat connected on RolloutWorker_w5 +[2023-07-04 15:27:24,202][17752] Signal inference workers to stop experience collection... +[2023-07-04 15:27:24,224][17765] InferenceWorker_p0-w0: stopping experience collection +[2023-07-04 15:27:24,569][17770] Decorrelating experience for 96 frames... +[2023-07-04 15:27:24,775][17091] Heartbeat connected on RolloutWorker_w4 +[2023-07-04 15:27:24,918][17766] Decorrelating experience for 96 frames... +[2023-07-04 15:27:25,019][17752] Signal inference workers to resume experience collection... +[2023-07-04 15:27:25,020][17765] InferenceWorker_p0-w0: resuming experience collection +[2023-07-04 15:27:25,290][17091] Heartbeat connected on RolloutWorker_w0 +[2023-07-04 15:27:26,061][17091] Fps is (10 sec: 409.6, 60 sec: 273.1, 300 sec: 273.1). Total num frames: 946176. Throughput: 0: 147.1. Samples: 2206. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0) +[2023-07-04 15:27:26,066][17091] Avg episode reward: [(0, '3.416')] +[2023-07-04 15:27:31,061][17091] Fps is (10 sec: 2048.0, 60 sec: 1024.0, 300 sec: 1024.0). Total num frames: 962560. Throughput: 0: 273.1. Samples: 5462. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:27:31,068][17091] Avg episode reward: [(0, '6.261')] +[2023-07-04 15:27:36,061][17091] Fps is (10 sec: 2867.2, 60 sec: 1310.7, 300 sec: 1310.7). Total num frames: 974848. Throughput: 0: 303.0. Samples: 7576. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:27:36,071][17091] Avg episode reward: [(0, '10.258')] +[2023-07-04 15:27:37,196][17765] Updated weights for policy 0, policy_version 240 (0.0036) +[2023-07-04 15:27:41,061][17091] Fps is (10 sec: 3686.4, 60 sec: 1911.5, 300 sec: 1911.5). Total num frames: 999424. Throughput: 0: 460.5. Samples: 13816. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-07-04 15:27:41,069][17091] Avg episode reward: [(0, '10.577')] +[2023-07-04 15:27:46,061][17091] Fps is (10 sec: 4096.0, 60 sec: 2106.5, 300 sec: 2106.5). Total num frames: 1015808. Throughput: 0: 558.4. Samples: 19544. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-07-04 15:27:46,063][17091] Avg episode reward: [(0, '11.373')] +[2023-07-04 15:27:47,738][17765] Updated weights for policy 0, policy_version 250 (0.0015) +[2023-07-04 15:27:51,061][17091] Fps is (10 sec: 3276.8, 60 sec: 2252.8, 300 sec: 2252.8). Total num frames: 1032192. Throughput: 0: 541.8. Samples: 21670. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-07-04 15:27:51,063][17091] Avg episode reward: [(0, '12.366')] +[2023-07-04 15:27:51,071][17752] Saving new best policy, reward=12.366! +[2023-07-04 15:27:56,061][17091] Fps is (10 sec: 2867.2, 60 sec: 2275.6, 300 sec: 2275.6). Total num frames: 1044480. Throughput: 0: 577.3. Samples: 25978. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-07-04 15:27:56,068][17091] Avg episode reward: [(0, '11.984')] +[2023-07-04 15:28:01,063][17091] Fps is (10 sec: 2866.5, 60 sec: 2375.6, 300 sec: 2375.6). Total num frames: 1060864. Throughput: 0: 687.3. Samples: 30928. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-07-04 15:28:01,070][17091] Avg episode reward: [(0, '12.085')] +[2023-07-04 15:28:01,385][17765] Updated weights for policy 0, policy_version 260 (0.0013) +[2023-07-04 15:28:06,061][17091] Fps is (10 sec: 3686.4, 60 sec: 2532.1, 300 sec: 2532.1). Total num frames: 1081344. Throughput: 0: 758.7. Samples: 34154. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:28:06,063][17091] Avg episode reward: [(0, '12.578')] +[2023-07-04 15:28:06,071][17752] Saving new best policy, reward=12.578! +[2023-07-04 15:28:11,061][17091] Fps is (10 sec: 3277.5, 60 sec: 2525.9, 300 sec: 2525.9). Total num frames: 1093632. Throughput: 0: 790.4. Samples: 37776. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-07-04 15:28:11,063][17091] Avg episode reward: [(0, '12.210')] +[2023-07-04 15:28:14,024][17091] Keyboard interrupt detected in the event loop EvtLoop [Runner_EvtLoop, process=main process 17091], exiting... +[2023-07-04 15:28:14,026][17091] Runner profile tree view: +main_loop: 76.2025 +[2023-07-04 15:28:14,028][17752] Stopping Batcher_0... +[2023-07-04 15:28:14,030][17752] Loop batcher_evt_loop terminating... +[2023-07-04 15:28:14,028][17091] Collected {0: 1101824}, FPS: 2096.3 +[2023-07-04 15:28:14,030][17752] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000269_1101824.pth... +[2023-07-04 15:28:14,048][17768] EvtLoop [rollout_proc3_evt_loop, process=rollout_proc3] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance3'), args=(1, 0) Traceback (most recent call last): File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) - File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start - self._start_processes() - File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes - p.start() - File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start - self._process.start() - File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start - self._popen = self._Popen(self) - File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen - return Popen(process_obj) - File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ - super().__init__(process_obj) - File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ - self._launch(process_obj) - File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch - reduction.dump(process_obj, fp) - File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump - ForkingPickler(file, protocol).dump(obj) -TypeError: cannot pickle 'TLSBuffer' object -[2023-07-04 15:02:34,196][19799] Unhandled exception cannot pickle 'TLSBuffer' object in evt loop Runner_EvtLoop -[2023-07-04 15:02:34,203][19799] Uncaught exception in Runner evt loop + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 117, in step + obs, info["reset_info"] = self.env.reset() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 30, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 462, in reset + obs, info = self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 82, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 414, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 51, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 346, in reset + self.game.new_episode() +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,144][17768] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc3_evt_loop +[2023-07-04 15:28:14,156][17765] Weights refcount: 2 0 +[2023-07-04 15:28:14,086][17773] EvtLoop [rollout_proc5_evt_loop, process=rollout_proc5] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance5'), args=(0, 0) Traceback (most recent call last): - File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner.py", line 770, in run - evt_loop_status = self.event_loop.exec() - File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 403, in exec - raise exc - File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 399, in exec - while self._loop_iteration(): - File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 383, in _loop_iteration - self._process_signal(s) - File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 358, in _process_signal - raise exc File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal slot_callable(*args) - File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 49, in _on_start - self._start_processes() - File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/runners/runner_parallel.py", line 56, in _start_processes - p.start() - File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 515, in start - self._process.start() - File "/usr/lib/python3.10/multiprocessing/process.py", line 121, in start - self._popen = self._Popen(self) - File "/usr/lib/python3.10/multiprocessing/context.py", line 288, in _Popen - return Popen(process_obj) - File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 32, in __init__ - super().__init__(process_obj) - File "/usr/lib/python3.10/multiprocessing/popen_fork.py", line 19, in __init__ - self._launch(process_obj) - File "/usr/lib/python3.10/multiprocessing/popen_spawn_posix.py", line 47, in _launch - reduction.dump(process_obj, fp) - File "/usr/lib/python3.10/multiprocessing/reduction.py", line 60, in dump - ForkingPickler(file, protocol).dump(obj) -TypeError: cannot pickle 'TLSBuffer' object -[2023-07-04 15:02:34,210][19799] Runner profile tree view: -main_loop: 0.0419 -[2023-07-04 15:02:34,212][19799] Collected {}, FPS: 0.0 -[2023-07-04 15:02:34,410][19799] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json -[2023-07-04 15:02:34,413][19799] Overriding arg 'num_workers' with value 1 passed from command line -[2023-07-04 15:02:34,423][19799] Adding new argument 'no_render'=True that is not in the saved config file! -[2023-07-04 15:02:34,428][19799] Adding new argument 'save_video'=True that is not in the saved config file! -[2023-07-04 15:02:34,436][19799] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! -[2023-07-04 15:02:34,442][19799] Adding new argument 'video_name'=None that is not in the saved config file! -[2023-07-04 15:02:34,447][19799] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file! -[2023-07-04 15:02:34,457][19799] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! -[2023-07-04 15:02:34,459][19799] Adding new argument 'push_to_hub'=False that is not in the saved config file! -[2023-07-04 15:02:34,460][19799] Adding new argument 'hf_repository'=None that is not in the saved config file! -[2023-07-04 15:02:34,461][19799] Adding new argument 'policy_index'=0 that is not in the saved config file! -[2023-07-04 15:02:34,462][19799] Adding new argument 'eval_deterministic'=False that is not in the saved config file! -[2023-07-04 15:02:34,463][19799] Adding new argument 'enjoy_script'=None that is not in the saved config file! -[2023-07-04 15:02:34,465][19799] Using frameskip 1 and render_action_repeat=4 for evaluation -[2023-07-04 15:02:34,528][19799] Doom resolution: 160x120, resize resolution: (128, 72) -[2023-07-04 15:02:34,536][19799] RunningMeanStd input shape: (3, 72, 128) -[2023-07-04 15:02:34,539][19799] RunningMeanStd input shape: (1,) -[2023-07-04 15:02:34,576][19799] ConvEncoder: input_channels=3 -[2023-07-04 15:02:34,894][19799] Conv encoder output size: 512 -[2023-07-04 15:02:34,897][19799] Policy head output size: 512 -[2023-07-04 15:02:42,149][19799] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth... -[2023-07-04 15:02:44,118][19799] Num frames 100... -[2023-07-04 15:02:44,316][19799] Num frames 200... -[2023-07-04 15:02:44,533][19799] Num frames 300... -[2023-07-04 15:02:44,758][19799] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840 -[2023-07-04 15:02:44,761][19799] Avg episode reward: 3.840, avg true_objective: 3.840 -[2023-07-04 15:02:44,802][19799] Num frames 400... -[2023-07-04 15:02:44,936][19799] Num frames 500... -[2023-07-04 15:02:45,089][19799] Num frames 600... -[2023-07-04 15:02:45,228][19799] Num frames 700... -[2023-07-04 15:02:45,372][19799] Num frames 800... -[2023-07-04 15:02:45,425][19799] Avg episode rewards: #0: 4.500, true rewards: #0: 4.000 -[2023-07-04 15:02:45,427][19799] Avg episode reward: 4.500, avg true_objective: 4.000 -[2023-07-04 15:02:45,569][19799] Num frames 900... -[2023-07-04 15:02:45,711][19799] Num frames 1000... -[2023-07-04 15:02:45,843][19799] Num frames 1100... -[2023-07-04 15:02:45,997][19799] Num frames 1200... -[2023-07-04 15:02:46,175][19799] Avg episode rewards: #0: 5.267, true rewards: #0: 4.267 -[2023-07-04 15:02:46,177][19799] Avg episode reward: 5.267, avg true_objective: 4.267 -[2023-07-04 15:02:46,213][19799] Num frames 1300... -[2023-07-04 15:02:46,350][19799] Num frames 1400... -[2023-07-04 15:02:46,484][19799] Num frames 1500... -[2023-07-04 15:02:46,631][19799] Num frames 1600... -[2023-07-04 15:02:46,781][19799] Num frames 1700... -[2023-07-04 15:02:46,874][19799] Avg episode rewards: #0: 5.320, true rewards: #0: 4.320 -[2023-07-04 15:02:46,876][19799] Avg episode reward: 5.320, avg true_objective: 4.320 -[2023-07-04 15:02:46,985][19799] Num frames 1800... -[2023-07-04 15:02:47,125][19799] Num frames 1900... -[2023-07-04 15:02:47,265][19799] Num frames 2000... -[2023-07-04 15:02:47,411][19799] Num frames 2100... -[2023-07-04 15:02:47,566][19799] Avg episode rewards: #0: 5.352, true rewards: #0: 4.352 -[2023-07-04 15:02:47,569][19799] Avg episode reward: 5.352, avg true_objective: 4.352 -[2023-07-04 15:02:47,610][19799] Num frames 2200... -[2023-07-04 15:02:47,744][19799] Num frames 2300... -[2023-07-04 15:02:47,891][19799] Num frames 2400... -[2023-07-04 15:02:48,040][19799] Num frames 2500... -[2023-07-04 15:02:48,182][19799] Avg episode rewards: #0: 5.100, true rewards: #0: 4.267 -[2023-07-04 15:02:48,184][19799] Avg episode reward: 5.100, avg true_objective: 4.267 -[2023-07-04 15:02:48,256][19799] Num frames 2600... -[2023-07-04 15:02:48,392][19799] Num frames 2700... -[2023-07-04 15:02:48,531][19799] Num frames 2800... -[2023-07-04 15:02:48,667][19799] Num frames 2900... -[2023-07-04 15:02:48,811][19799] Num frames 3000... -[2023-07-04 15:02:48,881][19799] Avg episode rewards: #0: 5.154, true rewards: #0: 4.297 -[2023-07-04 15:02:48,883][19799] Avg episode reward: 5.154, avg true_objective: 4.297 -[2023-07-04 15:02:49,008][19799] Num frames 3100... -[2023-07-04 15:02:49,141][19799] Num frames 3200... -[2023-07-04 15:02:49,277][19799] Num frames 3300... -[2023-07-04 15:02:49,472][19799] Avg episode rewards: #0: 4.990, true rewards: #0: 4.240 -[2023-07-04 15:02:49,475][19799] Avg episode reward: 4.990, avg true_objective: 4.240 -[2023-07-04 15:02:49,493][19799] Num frames 3400... -[2023-07-04 15:02:49,629][19799] Num frames 3500... -[2023-07-04 15:02:49,757][19799] Num frames 3600... -[2023-07-04 15:02:49,890][19799] Num frames 3700... -[2023-07-04 15:02:50,052][19799] Avg episode rewards: #0: 4.862, true rewards: #0: 4.196 -[2023-07-04 15:02:50,054][19799] Avg episode reward: 4.862, avg true_objective: 4.196 -[2023-07-04 15:02:50,097][19799] Num frames 3800... -[2023-07-04 15:02:50,234][19799] Num frames 3900... -[2023-07-04 15:02:50,385][19799] Num frames 4000... -[2023-07-04 15:02:50,510][19799] Num frames 4100... -[2023-07-04 15:02:50,696][19799] Avg episode rewards: #0: 4.792, true rewards: #0: 4.192 -[2023-07-04 15:02:50,698][19799] Avg episode reward: 4.792, avg true_objective: 4.192 -[2023-07-04 15:03:17,366][19799] Replay video saved to /content/train_dir/default_experiment/replay.mp4! -[2023-07-04 15:05:23,598][19799] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json -[2023-07-04 15:05:23,604][19799] Overriding arg 'num_workers' with value 1 passed from command line -[2023-07-04 15:05:23,611][19799] Adding new argument 'no_render'=True that is not in the saved config file! -[2023-07-04 15:05:23,613][19799] Adding new argument 'save_video'=True that is not in the saved config file! -[2023-07-04 15:05:23,620][19799] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! -[2023-07-04 15:05:23,627][19799] Adding new argument 'video_name'=None that is not in the saved config file! -[2023-07-04 15:05:23,629][19799] Adding new argument 'max_num_frames'=100000 that is not in the saved config file! -[2023-07-04 15:05:23,630][19799] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! -[2023-07-04 15:05:23,634][19799] Adding new argument 'push_to_hub'=True that is not in the saved config file! -[2023-07-04 15:05:23,635][19799] Adding new argument 'hf_repository'='HilbertS/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file! -[2023-07-04 15:05:23,640][19799] Adding new argument 'policy_index'=0 that is not in the saved config file! -[2023-07-04 15:05:23,641][19799] Adding new argument 'eval_deterministic'=False that is not in the saved config file! -[2023-07-04 15:05:23,643][19799] Adding new argument 'enjoy_script'=None that is not in the saved config file! -[2023-07-04 15:05:23,648][19799] Using frameskip 1 and render_action_repeat=4 for evaluation -[2023-07-04 15:05:23,691][19799] RunningMeanStd input shape: (3, 72, 128) -[2023-07-04 15:05:23,697][19799] RunningMeanStd input shape: (1,) -[2023-07-04 15:05:23,728][19799] ConvEncoder: input_channels=3 -[2023-07-04 15:05:23,875][19799] Conv encoder output size: 512 -[2023-07-04 15:05:23,894][19799] Policy head output size: 512 -[2023-07-04 15:05:23,958][19799] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000113_462848.pth... -[2023-07-04 15:05:25,102][19799] Num frames 100... -[2023-07-04 15:05:25,316][19799] Num frames 200... -[2023-07-04 15:05:25,517][19799] Num frames 300... -[2023-07-04 15:05:25,721][19799] Num frames 400... -[2023-07-04 15:05:25,882][19799] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480 -[2023-07-04 15:05:25,888][19799] Avg episode reward: 5.480, avg true_objective: 4.480 -[2023-07-04 15:05:26,109][19799] Num frames 500... -[2023-07-04 15:05:26,368][19799] Num frames 600... -[2023-07-04 15:05:26,570][19799] Num frames 700... -[2023-07-04 15:05:26,811][19799] Num frames 800... -[2023-07-04 15:05:26,961][19799] Avg episode rewards: #0: 4.660, true rewards: #0: 4.160 -[2023-07-04 15:05:26,968][19799] Avg episode reward: 4.660, avg true_objective: 4.160 -[2023-07-04 15:05:27,085][19799] Num frames 900... -[2023-07-04 15:05:27,210][19799] Num frames 1000... -[2023-07-04 15:05:27,342][19799] Num frames 1100... -[2023-07-04 15:05:27,468][19799] Num frames 1200... -[2023-07-04 15:05:27,605][19799] Num frames 1300... -[2023-07-04 15:05:27,749][19799] Num frames 1400... -[2023-07-04 15:05:27,836][19799] Avg episode rewards: #0: 6.070, true rewards: #0: 4.737 -[2023-07-04 15:05:27,838][19799] Avg episode reward: 6.070, avg true_objective: 4.737 -[2023-07-04 15:05:27,940][19799] Num frames 1500... -[2023-07-04 15:05:28,080][19799] Num frames 1600... -[2023-07-04 15:05:28,212][19799] Num frames 1700... -[2023-07-04 15:05:28,354][19799] Num frames 1800... -[2023-07-04 15:05:28,420][19799] Avg episode rewards: #0: 5.513, true rewards: #0: 4.512 -[2023-07-04 15:05:28,422][19799] Avg episode reward: 5.513, avg true_objective: 4.512 -[2023-07-04 15:05:28,553][19799] Num frames 1900... -[2023-07-04 15:05:28,681][19799] Num frames 2000... -[2023-07-04 15:05:28,815][19799] Num frames 2100... -[2023-07-04 15:05:28,982][19799] Avg episode rewards: #0: 5.178, true rewards: #0: 4.378 -[2023-07-04 15:05:28,985][19799] Avg episode reward: 5.178, avg true_objective: 4.378 -[2023-07-04 15:05:29,001][19799] Num frames 2200... -[2023-07-04 15:05:29,130][19799] Num frames 2300... -[2023-07-04 15:05:29,274][19799] Num frames 2400... -[2023-07-04 15:05:29,415][19799] Num frames 2500... -[2023-07-04 15:05:29,554][19799] Num frames 2600... -[2023-07-04 15:05:29,694][19799] Num frames 2700... -[2023-07-04 15:05:29,793][19799] Avg episode rewards: #0: 5.555, true rewards: #0: 4.555 -[2023-07-04 15:05:29,795][19799] Avg episode reward: 5.555, avg true_objective: 4.555 -[2023-07-04 15:05:29,900][19799] Num frames 2800... -[2023-07-04 15:05:30,041][19799] Num frames 2900... -[2023-07-04 15:05:30,169][19799] Num frames 3000... -[2023-07-04 15:05:30,302][19799] Num frames 3100... -[2023-07-04 15:05:30,471][19799] Avg episode rewards: #0: 5.544, true rewards: #0: 4.544 -[2023-07-04 15:05:30,473][19799] Avg episode reward: 5.544, avg true_objective: 4.544 -[2023-07-04 15:05:30,501][19799] Num frames 3200... -[2023-07-04 15:05:30,636][19799] Num frames 3300... -[2023-07-04 15:05:30,768][19799] Num frames 3400... -[2023-07-04 15:05:30,916][19799] Num frames 3500... -[2023-07-04 15:05:31,058][19799] Num frames 3600... -[2023-07-04 15:05:31,154][19799] Avg episode rewards: #0: 5.536, true rewards: #0: 4.536 -[2023-07-04 15:05:31,156][19799] Avg episode reward: 5.536, avg true_objective: 4.536 -[2023-07-04 15:05:31,255][19799] Num frames 3700... -[2023-07-04 15:05:31,392][19799] Num frames 3800... -[2023-07-04 15:05:31,527][19799] Num frames 3900... -[2023-07-04 15:05:31,669][19799] Num frames 4000... -[2023-07-04 15:05:31,744][19799] Avg episode rewards: #0: 5.348, true rewards: #0: 4.459 -[2023-07-04 15:05:31,746][19799] Avg episode reward: 5.348, avg true_objective: 4.459 -[2023-07-04 15:05:31,863][19799] Num frames 4100... -[2023-07-04 15:05:31,996][19799] Num frames 4200... -[2023-07-04 15:05:32,140][19799] Num frames 4300... -[2023-07-04 15:05:32,267][19799] Num frames 4400... -[2023-07-04 15:05:32,403][19799] Num frames 4500... -[2023-07-04 15:05:32,530][19799] Avg episode rewards: #0: 5.557, true rewards: #0: 4.557 -[2023-07-04 15:05:32,531][19799] Avg episode reward: 5.557, avg true_objective: 4.557 -[2023-07-04 15:06:01,713][19799] Replay video saved to /content/train_dir/default_experiment/replay.mp4! + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 469, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 86, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step + reward = self.game.make_action(actions_flattened, self.skip_frames) +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,176][17765] Stopping InferenceWorker_p0-w0... +[2023-07-04 15:28:14,178][17765] Loop inference_proc0-0_evt_loop terminating... +[2023-07-04 15:28:14,179][17767] EvtLoop [rollout_proc2_evt_loop, process=rollout_proc2] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance2'), args=(1, 0) +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 469, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 86, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step + reward = self.game.make_action(actions_flattened, self.skip_frames) +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,164][17773] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc5_evt_loop +[2023-07-04 15:28:14,107][17769] EvtLoop [rollout_proc1_evt_loop, process=rollout_proc1] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance1'), args=(1, 0) +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 469, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 86, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step + reward = self.game.make_action(actions_flattened, self.skip_frames) +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,214][17769] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc1_evt_loop +[2023-07-04 15:28:14,203][17770] EvtLoop [rollout_proc4_evt_loop, process=rollout_proc4] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance4'), args=(1, 0) +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 469, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 86, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step + reward = self.game.make_action(actions_flattened, self.skip_frames) +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,215][17770] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc4_evt_loop +[2023-07-04 15:28:14,146][17772] EvtLoop [rollout_proc7_evt_loop, process=rollout_proc7] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance7'), args=(0, 0) +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 469, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 86, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step + reward = self.game.make_action(actions_flattened, self.skip_frames) +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,220][17772] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc7_evt_loop +[2023-07-04 15:28:14,208][17766] EvtLoop [rollout_proc0_evt_loop, process=rollout_proc0] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance0'), args=(0, 0) +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 469, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 86, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step + reward = self.game.make_action(actions_flattened, self.skip_frames) +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,226][17766] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc0_evt_loop +[2023-07-04 15:28:14,181][17767] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc2_evt_loop +[2023-07-04 15:28:14,352][17771] EvtLoop [rollout_proc6_evt_loop, process=rollout_proc6] unhandled exception in slot='advance_rollouts' connected to emitter=Emitter(object_id='InferenceWorker_p0-w0', signal_name='advance6'), args=(1, 0) +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 241, in advance_rollouts + complete_rollouts, episodic_stats = runner.advance_rollouts(policy_id, self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 634, in advance_rollouts + new_obs, rewards, terminated, truncated, infos = e.step(actions) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 129, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 115, in step + obs, rew, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 33, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 469, in step + observation, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 86, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 408, in step + return self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 54, in step + obs, reward, terminated, truncated, info = self.env.step(action) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 452, in step + reward = self.game.make_action(actions_flattened, self.skip_frames) +vizdoom.vizdoom.SignalException: Signal SIGINT received. ViZDoom instance has been closed. +[2023-07-04 15:28:14,354][17771] Unhandled exception Signal SIGINT received. ViZDoom instance has been closed. in evt loop rollout_proc6_evt_loop +[2023-07-04 15:28:14,472][17752] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000178_729088.pth +[2023-07-04 15:28:14,514][17752] Stopping LearnerWorker_p0... +[2023-07-04 15:28:14,523][17752] Loop learner_proc0_evt_loop terminating...