diff --git "a/sf_log.txt" "b/sf_log.txt" --- "a/sf_log.txt" +++ "b/sf_log.txt" @@ -1115,3 +1115,1092 @@ main_loop: 1122.0795 [2023-02-25 14:31:20,054][00869] Avg episode rewards: #0: 20.329, true rewards: #0: 9.229 [2023-02-25 14:31:20,056][00869] Avg episode reward: 20.329, avg true_objective: 9.229 [2023-02-25 14:32:13,809][00869] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2023-02-25 14:32:17,582][00869] The model has been pushed to https://huggingface.co/chist/rl_course_vizdoom_health_gathering_supreme +[2023-02-25 14:34:35,570][00869] Environment doom_basic already registered, overwriting... +[2023-02-25 14:34:35,573][00869] Environment doom_two_colors_easy already registered, overwriting... +[2023-02-25 14:34:35,575][00869] Environment doom_two_colors_hard already registered, overwriting... +[2023-02-25 14:34:35,577][00869] Environment doom_dm already registered, overwriting... +[2023-02-25 14:34:35,578][00869] Environment doom_dwango5 already registered, overwriting... +[2023-02-25 14:34:35,579][00869] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-02-25 14:34:35,581][00869] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-02-25 14:34:35,582][00869] Environment doom_my_way_home already registered, overwriting... +[2023-02-25 14:34:35,584][00869] Environment doom_deadly_corridor already registered, overwriting... +[2023-02-25 14:34:35,586][00869] Environment doom_defend_the_center already registered, overwriting... +[2023-02-25 14:34:35,587][00869] Environment doom_defend_the_line already registered, overwriting... +[2023-02-25 14:34:35,589][00869] Environment doom_health_gathering already registered, overwriting... +[2023-02-25 14:34:35,590][00869] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-02-25 14:34:35,592][00869] Environment doom_battle already registered, overwriting... +[2023-02-25 14:34:35,593][00869] Environment doom_battle2 already registered, overwriting... +[2023-02-25 14:34:35,595][00869] Environment doom_duel_bots already registered, overwriting... +[2023-02-25 14:34:35,597][00869] Environment doom_deathmatch_bots already registered, overwriting... +[2023-02-25 14:34:35,598][00869] Environment doom_duel already registered, overwriting... +[2023-02-25 14:34:35,600][00869] Environment doom_deathmatch_full already registered, overwriting... +[2023-02-25 14:34:35,601][00869] Environment doom_benchmark already registered, overwriting... +[2023-02-25 14:34:35,603][00869] register_encoder_factory: +[2023-02-25 14:34:35,637][00869] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-02-25 14:34:35,638][00869] Overriding arg 'train_for_env_steps' with value 8000000 passed from command line +[2023-02-25 14:34:35,652][00869] Experiment dir /content/train_dir/default_experiment already exists! +[2023-02-25 14:34:35,653][00869] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-02-25 14:34:35,654][00869] Weights and Biases integration disabled +[2023-02-25 14:34:35,658][00869] Environment var CUDA_VISIBLE_DEVICES is 0 + +[2023-02-25 14:34:37,836][00869] Starting experiment with the following configuration: +help=False +algo=APPO +env=doom_health_gathering_supreme +experiment=default_experiment +train_dir=/content/train_dir +restart_behavior=resume +device=gpu +seed=None +num_policies=1 +async_rl=True +serial_mode=False +batched_sampling=False +num_batches_to_accumulate=2 +worker_num_splits=2 +policy_workers_per_policy=1 +max_policy_lag=1000 +num_workers=8 +num_envs_per_worker=4 +batch_size=1024 +num_batches_per_epoch=1 +num_epochs=1 +rollout=32 +recurrence=32 +shuffle_minibatches=False +gamma=0.99 +reward_scale=1.0 +reward_clip=1000.0 +value_bootstrap=False +normalize_returns=True +exploration_loss_coeff=0.001 +value_loss_coeff=0.5 +kl_loss_coeff=0.0 +exploration_loss=symmetric_kl +gae_lambda=0.95 +ppo_clip_ratio=0.1 +ppo_clip_value=0.2 +with_vtrace=False +vtrace_rho=1.0 +vtrace_c=1.0 +optimizer=adam +adam_eps=1e-06 +adam_beta1=0.9 +adam_beta2=0.999 +max_grad_norm=4.0 +learning_rate=0.0001 +lr_schedule=constant +lr_schedule_kl_threshold=0.008 +lr_adaptive_min=1e-06 +lr_adaptive_max=0.01 +obs_subtract_mean=0.0 +obs_scale=255.0 +normalize_input=True +normalize_input_keys=None +decorrelate_experience_max_seconds=0 +decorrelate_envs_on_one_worker=True +actor_worker_gpus=[] +set_workers_cpu_affinity=True +force_envs_single_thread=False +default_niceness=0 +log_to_file=True +experiment_summaries_interval=10 +flush_summaries_interval=30 +stats_avg=100 +summaries_use_frameskip=True +heartbeat_interval=20 +heartbeat_reporting_interval=600 +train_for_env_steps=8000000 +train_for_seconds=10000000000 +save_every_sec=120 +keep_checkpoints=2 +load_checkpoint_kind=latest +save_milestones_sec=-1 +save_best_every_sec=5 +save_best_metric=reward +save_best_after=100000 +benchmark=False +encoder_mlp_layers=[512, 512] +encoder_conv_architecture=convnet_simple +encoder_conv_mlp_layers=[512] +use_rnn=True +rnn_size=512 +rnn_type=gru +rnn_num_layers=1 +decoder_mlp_layers=[] +nonlinearity=elu +policy_initialization=orthogonal +policy_init_gain=1.0 +actor_critic_share_weights=True +adaptive_stddev=True +continuous_tanh_scale=0.0 +initial_stddev=1.0 +use_env_info_cache=False +env_gpu_actions=False +env_gpu_observations=True +env_frameskip=4 +env_framestack=1 +pixel_format=CHW +use_record_episode_statistics=False +with_wandb=False +wandb_user=None +wandb_project=sample_factory +wandb_group=None +wandb_job_type=SF +wandb_tags=[] +with_pbt=False +pbt_mix_policies_in_one_env=True +pbt_period_env_steps=5000000 +pbt_start_mutation=20000000 +pbt_replace_fraction=0.3 +pbt_mutation_rate=0.15 +pbt_replace_reward_gap=0.1 +pbt_replace_reward_gap_absolute=1e-06 +pbt_optimize_gamma=False +pbt_target_objective=true_objective +pbt_perturb_min=1.1 +pbt_perturb_max=1.5 +num_agents=-1 +num_humans=0 +num_bots=-1 +start_bot_difficulty=None +timelimit=None +res_w=128 +res_h=72 +wide_aspect_ratio=False +eval_env_frameskip=1 +fps=35 +command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000 +cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000} +git_hash=unknown +git_repo_name=not a git repository +[2023-02-25 14:34:37,839][00869] Saving configuration to /content/train_dir/default_experiment/config.json... +[2023-02-25 14:34:37,842][00869] Rollout worker 0 uses device cpu +[2023-02-25 14:34:37,844][00869] Rollout worker 1 uses device cpu +[2023-02-25 14:34:37,845][00869] Rollout worker 2 uses device cpu +[2023-02-25 14:34:37,848][00869] Rollout worker 3 uses device cpu +[2023-02-25 14:34:37,850][00869] Rollout worker 4 uses device cpu +[2023-02-25 14:34:37,851][00869] Rollout worker 5 uses device cpu +[2023-02-25 14:34:37,853][00869] Rollout worker 6 uses device cpu +[2023-02-25 14:34:37,854][00869] Rollout worker 7 uses device cpu +[2023-02-25 14:34:38,005][00869] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-25 14:34:38,008][00869] InferenceWorker_p0-w0: min num requests: 2 +[2023-02-25 14:34:38,051][00869] Starting all processes... +[2023-02-25 14:34:38,053][00869] Starting process learner_proc0 +[2023-02-25 14:34:38,245][00869] Starting all processes... +[2023-02-25 14:34:38,259][00869] Starting process inference_proc0-0 +[2023-02-25 14:34:38,262][00869] Starting process rollout_proc0 +[2023-02-25 14:34:38,262][00869] Starting process rollout_proc1 +[2023-02-25 14:34:38,262][00869] Starting process rollout_proc2 +[2023-02-25 14:34:38,262][00869] Starting process rollout_proc3 +[2023-02-25 14:34:38,363][00869] Starting process rollout_proc4 +[2023-02-25 14:34:38,373][00869] Starting process rollout_proc5 +[2023-02-25 14:34:38,373][00869] Starting process rollout_proc6 +[2023-02-25 14:34:38,377][00869] Starting process rollout_proc7 +[2023-02-25 14:34:49,144][20465] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-25 14:34:49,151][20465] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-02-25 14:34:49,201][20465] Num visible devices: 1 +[2023-02-25 14:34:49,237][20465] Starting seed is not provided +[2023-02-25 14:34:49,238][20465] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-25 14:34:49,238][20465] Initializing actor-critic model on device cuda:0 +[2023-02-25 14:34:49,239][20465] RunningMeanStd input shape: (3, 72, 128) +[2023-02-25 14:34:49,240][20465] RunningMeanStd input shape: (1,) +[2023-02-25 14:34:49,325][20465] ConvEncoder: input_channels=3 +[2023-02-25 14:34:50,028][20479] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-25 14:34:50,035][20479] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-02-25 14:34:50,115][20479] Num visible devices: 1 +[2023-02-25 14:34:50,121][20480] Worker 0 uses CPU cores [0] +[2023-02-25 14:34:50,128][20481] Worker 1 uses CPU cores [1] +[2023-02-25 14:34:50,254][20465] Conv encoder output size: 512 +[2023-02-25 14:34:50,254][20465] Policy head output size: 512 +[2023-02-25 14:34:50,348][20465] Created Actor Critic model with architecture: +[2023-02-25 14:34:50,349][20465] ActorCriticSharedWeights( + (obs_normalizer): ObservationNormalizer( + (running_mean_std): RunningMeanStdDictInPlace( + (running_mean_std): ModuleDict( + (obs): RunningMeanStdInPlace() + ) + ) + ) + (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace) + (encoder): VizdoomEncoder( + (basic_encoder): ConvEncoder( + (enc): RecursiveScriptModule( + original_name=ConvEncoderImpl + (conv_head): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Conv2d) + (1): RecursiveScriptModule(original_name=ELU) + (2): RecursiveScriptModule(original_name=Conv2d) + (3): RecursiveScriptModule(original_name=ELU) + (4): RecursiveScriptModule(original_name=Conv2d) + (5): RecursiveScriptModule(original_name=ELU) + ) + (mlp_layers): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Linear) + (1): RecursiveScriptModule(original_name=ELU) + ) + ) + ) + ) + (core): ModelCoreRNN( + (core): GRU(512, 512) + ) + (decoder): MlpDecoder( + (mlp): Identity() + ) + (critic_linear): Linear(in_features=512, out_features=1, bias=True) + (action_parameterization): ActionParameterizationDefault( + (distribution_linear): Linear(in_features=512, out_features=5, bias=True) + ) +) +[2023-02-25 14:34:50,638][20490] Worker 3 uses CPU cores [1] +[2023-02-25 14:34:50,898][20489] Worker 2 uses CPU cores [0] +[2023-02-25 14:34:50,933][20492] Worker 5 uses CPU cores [1] +[2023-02-25 14:34:51,075][20506] Worker 7 uses CPU cores [1] +[2023-02-25 14:34:51,151][20498] Worker 6 uses CPU cores [0] +[2023-02-25 14:34:51,190][20500] Worker 4 uses CPU cores [0] +[2023-02-25 14:34:54,317][20465] Using optimizer +[2023-02-25 14:34:54,318][20465] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth... +[2023-02-25 14:34:54,362][20465] Loading model from checkpoint +[2023-02-25 14:34:54,370][20465] Loaded experiment state at self.train_step=978, self.env_steps=4005888 +[2023-02-25 14:34:54,371][20465] Initialized policy 0 weights for model version 978 +[2023-02-25 14:34:54,375][20465] LearnerWorker_p0 finished initialization! +[2023-02-25 14:34:54,377][20465] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-25 14:34:54,688][20479] RunningMeanStd input shape: (3, 72, 128) +[2023-02-25 14:34:54,690][20479] RunningMeanStd input shape: (1,) +[2023-02-25 14:34:54,708][20479] ConvEncoder: input_channels=3 +[2023-02-25 14:34:54,867][20479] Conv encoder output size: 512 +[2023-02-25 14:34:54,868][20479] Policy head output size: 512 +[2023-02-25 14:34:55,658][00869] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4005888. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-25 14:34:57,308][00869] Inference worker 0-0 is ready! +[2023-02-25 14:34:57,310][00869] All inference workers are ready! Signal rollout workers to start! +[2023-02-25 14:34:57,434][20489] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,458][20500] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,468][20481] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,467][20480] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,475][20492] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,477][20506] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,479][20490] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,481][20498] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-25 14:34:57,995][00869] Heartbeat connected on Batcher_0 +[2023-02-25 14:34:58,001][00869] Heartbeat connected on LearnerWorker_p0 +[2023-02-25 14:34:58,034][00869] Heartbeat connected on InferenceWorker_p0-w0 +[2023-02-25 14:34:58,981][20481] Decorrelating experience for 0 frames... +[2023-02-25 14:34:58,985][20506] Decorrelating experience for 0 frames... +[2023-02-25 14:34:58,988][20492] Decorrelating experience for 0 frames... +[2023-02-25 14:34:58,993][20490] Decorrelating experience for 0 frames... +[2023-02-25 14:34:58,991][20489] Decorrelating experience for 0 frames... +[2023-02-25 14:34:59,028][20480] Decorrelating experience for 0 frames... +[2023-02-25 14:34:59,037][20500] Decorrelating experience for 0 frames... +[2023-02-25 14:34:59,054][20498] Decorrelating experience for 0 frames... +[2023-02-25 14:35:00,045][20489] Decorrelating experience for 32 frames... +[2023-02-25 14:35:00,067][20480] Decorrelating experience for 32 frames... +[2023-02-25 14:35:00,085][20500] Decorrelating experience for 32 frames... +[2023-02-25 14:35:00,211][20490] Decorrelating experience for 32 frames... +[2023-02-25 14:35:00,447][20506] Decorrelating experience for 32 frames... +[2023-02-25 14:35:00,462][20492] Decorrelating experience for 32 frames... +[2023-02-25 14:35:00,549][20481] Decorrelating experience for 32 frames... +[2023-02-25 14:35:00,658][00869] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-25 14:35:01,395][20492] Decorrelating experience for 64 frames... +[2023-02-25 14:35:01,459][20481] Decorrelating experience for 64 frames... +[2023-02-25 14:35:01,627][20489] Decorrelating experience for 64 frames... +[2023-02-25 14:35:01,687][20480] Decorrelating experience for 64 frames... +[2023-02-25 14:35:01,778][20500] Decorrelating experience for 64 frames... +[2023-02-25 14:35:01,919][20498] Decorrelating experience for 32 frames... +[2023-02-25 14:35:02,308][20481] Decorrelating experience for 96 frames... +[2023-02-25 14:35:02,501][00869] Heartbeat connected on RolloutWorker_w1 +[2023-02-25 14:35:02,883][20490] Decorrelating experience for 64 frames... +[2023-02-25 14:35:03,200][20500] Decorrelating experience for 96 frames... +[2023-02-25 14:35:03,250][20492] Decorrelating experience for 96 frames... +[2023-02-25 14:35:03,402][20480] Decorrelating experience for 96 frames... +[2023-02-25 14:35:03,568][00869] Heartbeat connected on RolloutWorker_w4 +[2023-02-25 14:35:03,581][00869] Heartbeat connected on RolloutWorker_w5 +[2023-02-25 14:35:03,685][00869] Heartbeat connected on RolloutWorker_w0 +[2023-02-25 14:35:03,729][20498] Decorrelating experience for 64 frames... +[2023-02-25 14:35:04,477][20490] Decorrelating experience for 96 frames... +[2023-02-25 14:35:04,542][20489] Decorrelating experience for 96 frames... +[2023-02-25 14:35:04,792][00869] Heartbeat connected on RolloutWorker_w2 +[2023-02-25 14:35:04,894][20506] Decorrelating experience for 64 frames... +[2023-02-25 14:35:04,891][00869] Heartbeat connected on RolloutWorker_w3 +[2023-02-25 14:35:05,661][00869] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 2.0. Samples: 20. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-25 14:35:05,667][00869] Avg episode reward: [(0, '1.320')] +[2023-02-25 14:35:05,690][20498] Decorrelating experience for 96 frames... +[2023-02-25 14:35:06,236][00869] Heartbeat connected on RolloutWorker_w6 +[2023-02-25 14:35:08,777][20506] Decorrelating experience for 96 frames... +[2023-02-25 14:35:09,841][20465] Signal inference workers to stop experience collection... +[2023-02-25 14:35:09,865][20479] InferenceWorker_p0-w0: stopping experience collection +[2023-02-25 14:35:09,984][00869] Heartbeat connected on RolloutWorker_w7 +[2023-02-25 14:35:10,659][00869] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 158.8. Samples: 2382. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-25 14:35:10,663][00869] Avg episode reward: [(0, '3.216')] +[2023-02-25 14:35:12,762][20465] Signal inference workers to resume experience collection... +[2023-02-25 14:35:12,763][20479] InferenceWorker_p0-w0: resuming experience collection +[2023-02-25 14:35:15,658][00869] Fps is (10 sec: 1229.1, 60 sec: 614.4, 300 sec: 614.4). Total num frames: 4018176. Throughput: 0: 185.4. Samples: 3708. Policy #0 lag: (min: 1.0, avg: 1.4, max: 2.0) +[2023-02-25 14:35:15,661][00869] Avg episode reward: [(0, '4.400')] +[2023-02-25 14:35:20,658][00869] Fps is (10 sec: 3686.7, 60 sec: 1474.6, 300 sec: 1474.6). Total num frames: 4042752. Throughput: 0: 286.4. Samples: 7160. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:35:20,661][00869] Avg episode reward: [(0, '11.052')] +[2023-02-25 14:35:21,477][20479] Updated weights for policy 0, policy_version 988 (0.0013) +[2023-02-25 14:35:25,658][00869] Fps is (10 sec: 4505.6, 60 sec: 1911.5, 300 sec: 1911.5). Total num frames: 4063232. Throughput: 0: 475.9. Samples: 14278. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:35:25,661][00869] Avg episode reward: [(0, '14.947')] +[2023-02-25 14:35:30,659][00869] Fps is (10 sec: 3686.1, 60 sec: 2106.5, 300 sec: 2106.5). Total num frames: 4079616. Throughput: 0: 541.2. Samples: 18944. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:35:30,664][00869] Avg episode reward: [(0, '16.247')] +[2023-02-25 14:35:33,461][20479] Updated weights for policy 0, policy_version 998 (0.0033) +[2023-02-25 14:35:35,658][00869] Fps is (10 sec: 2867.2, 60 sec: 2150.4, 300 sec: 2150.4). Total num frames: 4091904. Throughput: 0: 526.4. Samples: 21054. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:35:35,660][00869] Avg episode reward: [(0, '17.673')] +[2023-02-25 14:35:40,658][00869] Fps is (10 sec: 3686.7, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 4116480. Throughput: 0: 601.0. Samples: 27046. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:35:40,667][00869] Avg episode reward: [(0, '20.318')] +[2023-02-25 14:35:42,892][20479] Updated weights for policy 0, policy_version 1008 (0.0013) +[2023-02-25 14:35:45,660][00869] Fps is (10 sec: 4504.7, 60 sec: 2621.3, 300 sec: 2621.3). Total num frames: 4136960. Throughput: 0: 755.4. Samples: 33994. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:35:45,667][00869] Avg episode reward: [(0, '22.429')] +[2023-02-25 14:35:50,658][00869] Fps is (10 sec: 3686.4, 60 sec: 2681.0, 300 sec: 2681.0). Total num frames: 4153344. Throughput: 0: 802.4. Samples: 36128. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:35:50,663][00869] Avg episode reward: [(0, '22.712')] +[2023-02-25 14:35:55,619][20479] Updated weights for policy 0, policy_version 1018 (0.0014) +[2023-02-25 14:35:55,658][00869] Fps is (10 sec: 3277.4, 60 sec: 2730.7, 300 sec: 2730.7). Total num frames: 4169728. Throughput: 0: 843.7. Samples: 40346. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:35:55,661][00869] Avg episode reward: [(0, '22.447')] +[2023-02-25 14:36:00,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3072.0, 300 sec: 2835.7). Total num frames: 4190208. Throughput: 0: 956.2. Samples: 46738. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:36:00,666][00869] Avg episode reward: [(0, '21.958')] +[2023-02-25 14:36:04,884][20479] Updated weights for policy 0, policy_version 1028 (0.0015) +[2023-02-25 14:36:05,659][00869] Fps is (10 sec: 4095.9, 60 sec: 3413.5, 300 sec: 2925.7). Total num frames: 4210688. Throughput: 0: 954.6. Samples: 50118. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:36:05,670][00869] Avg episode reward: [(0, '21.638')] +[2023-02-25 14:36:10,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 2949.1). Total num frames: 4227072. Throughput: 0: 904.9. Samples: 54998. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:36:10,662][00869] Avg episode reward: [(0, '21.587')] +[2023-02-25 14:36:15,658][00869] Fps is (10 sec: 3276.9, 60 sec: 3754.7, 300 sec: 2969.6). Total num frames: 4243456. Throughput: 0: 903.5. Samples: 59602. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:36:15,667][00869] Avg episode reward: [(0, '20.822')] +[2023-02-25 14:36:17,350][20479] Updated weights for policy 0, policy_version 1038 (0.0012) +[2023-02-25 14:36:20,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3035.9). Total num frames: 4263936. Throughput: 0: 935.0. Samples: 63130. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0) +[2023-02-25 14:36:20,661][00869] Avg episode reward: [(0, '21.735')] +[2023-02-25 14:36:25,661][00869] Fps is (10 sec: 4504.6, 60 sec: 3754.5, 300 sec: 3140.2). Total num frames: 4288512. Throughput: 0: 958.7. Samples: 70190. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:36:25,669][00869] Avg episode reward: [(0, '21.998')] +[2023-02-25 14:36:26,463][20479] Updated weights for policy 0, policy_version 1048 (0.0020) +[2023-02-25 14:36:30,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3686.5, 300 sec: 3104.3). Total num frames: 4300800. Throughput: 0: 908.6. Samples: 74878. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:36:30,670][00869] Avg episode reward: [(0, '21.704')] +[2023-02-25 14:36:35,658][00869] Fps is (10 sec: 2867.8, 60 sec: 3754.7, 300 sec: 3113.0). Total num frames: 4317184. Throughput: 0: 910.6. Samples: 77106. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:36:35,661][00869] Avg episode reward: [(0, '22.063')] +[2023-02-25 14:36:35,673][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001055_4321280.pth... +[2023-02-25 14:36:35,855][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000939_3846144.pth +[2023-02-25 14:36:38,636][20479] Updated weights for policy 0, policy_version 1058 (0.0030) +[2023-02-25 14:36:40,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3198.8). Total num frames: 4341760. Throughput: 0: 958.5. Samples: 83478. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:36:40,666][00869] Avg episode reward: [(0, '23.105')] +[2023-02-25 14:36:45,659][00869] Fps is (10 sec: 4505.5, 60 sec: 3754.8, 300 sec: 3239.6). Total num frames: 4362240. Throughput: 0: 962.2. Samples: 90038. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:36:45,665][00869] Avg episode reward: [(0, '24.815')] +[2023-02-25 14:36:45,677][20465] Saving new best policy, reward=24.815! +[2023-02-25 14:36:48,642][20479] Updated weights for policy 0, policy_version 1068 (0.0038) +[2023-02-25 14:36:50,661][00869] Fps is (10 sec: 3685.4, 60 sec: 3754.5, 300 sec: 3241.1). Total num frames: 4378624. Throughput: 0: 935.9. Samples: 92238. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0) +[2023-02-25 14:36:50,671][00869] Avg episode reward: [(0, '23.759')] +[2023-02-25 14:36:55,658][00869] Fps is (10 sec: 3276.9, 60 sec: 3754.7, 300 sec: 3242.7). Total num frames: 4395008. Throughput: 0: 930.5. Samples: 96872. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:36:55,662][00869] Avg episode reward: [(0, '25.545')] +[2023-02-25 14:36:55,671][20465] Saving new best policy, reward=25.545! +[2023-02-25 14:36:59,737][20479] Updated weights for policy 0, policy_version 1078 (0.0015) +[2023-02-25 14:37:00,658][00869] Fps is (10 sec: 4097.1, 60 sec: 3822.9, 300 sec: 3309.6). Total num frames: 4419584. Throughput: 0: 983.3. Samples: 103852. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:37:00,661][00869] Avg episode reward: [(0, '24.991')] +[2023-02-25 14:37:05,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3823.0, 300 sec: 3339.8). Total num frames: 4440064. Throughput: 0: 980.6. Samples: 107256. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:37:05,664][00869] Avg episode reward: [(0, '23.275')] +[2023-02-25 14:37:10,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3307.1). Total num frames: 4452352. Throughput: 0: 934.8. Samples: 112256. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:37:10,663][00869] Avg episode reward: [(0, '20.026')] +[2023-02-25 14:37:10,829][20479] Updated weights for policy 0, policy_version 1088 (0.0024) +[2023-02-25 14:37:15,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3335.3). Total num frames: 4472832. Throughput: 0: 941.4. Samples: 117242. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:37:15,661][00869] Avg episode reward: [(0, '18.925')] +[2023-02-25 14:37:20,495][20479] Updated weights for policy 0, policy_version 1098 (0.0014) +[2023-02-25 14:37:20,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3389.8). Total num frames: 4497408. Throughput: 0: 968.8. Samples: 120704. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:37:20,665][00869] Avg episode reward: [(0, '18.770')] +[2023-02-25 14:37:25,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3823.1, 300 sec: 3413.3). Total num frames: 4517888. Throughput: 0: 990.0. Samples: 128030. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:37:25,662][00869] Avg episode reward: [(0, '18.431')] +[2023-02-25 14:37:30,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3408.9). Total num frames: 4534272. Throughput: 0: 944.1. Samples: 132520. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:37:30,661][00869] Avg episode reward: [(0, '19.272')] +[2023-02-25 14:37:32,019][20479] Updated weights for policy 0, policy_version 1108 (0.0015) +[2023-02-25 14:37:35,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 3404.8). Total num frames: 4550656. Throughput: 0: 945.3. Samples: 134774. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:37:35,664][00869] Avg episode reward: [(0, '21.006')] +[2023-02-25 14:37:40,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3425.7). Total num frames: 4571136. Throughput: 0: 987.3. Samples: 141300. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:37:40,661][00869] Avg episode reward: [(0, '22.728')] +[2023-02-25 14:37:41,674][20479] Updated weights for policy 0, policy_version 1118 (0.0017) +[2023-02-25 14:37:45,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3823.0, 300 sec: 3445.5). Total num frames: 4591616. Throughput: 0: 972.7. Samples: 147624. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:37:45,665][00869] Avg episode reward: [(0, '22.781')] +[2023-02-25 14:37:50,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3823.1, 300 sec: 3440.6). Total num frames: 4608000. Throughput: 0: 943.3. Samples: 149706. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:37:50,661][00869] Avg episode reward: [(0, '23.428')] +[2023-02-25 14:37:53,956][20479] Updated weights for policy 0, policy_version 1128 (0.0026) +[2023-02-25 14:37:55,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3436.1). Total num frames: 4624384. Throughput: 0: 935.4. Samples: 154348. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:37:55,660][00869] Avg episode reward: [(0, '25.012')] +[2023-02-25 14:38:00,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3476.1). Total num frames: 4648960. Throughput: 0: 978.0. Samples: 161254. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:38:00,665][00869] Avg episode reward: [(0, '24.371')] +[2023-02-25 14:38:02,828][20479] Updated weights for policy 0, policy_version 1138 (0.0012) +[2023-02-25 14:38:05,661][00869] Fps is (10 sec: 4504.6, 60 sec: 3822.8, 300 sec: 3492.3). Total num frames: 4669440. Throughput: 0: 977.8. Samples: 164708. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:38:05,667][00869] Avg episode reward: [(0, '24.073')] +[2023-02-25 14:38:10,661][00869] Fps is (10 sec: 3276.0, 60 sec: 3822.8, 300 sec: 3465.8). Total num frames: 4681728. Throughput: 0: 913.1. Samples: 169120. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:38:10,664][00869] Avg episode reward: [(0, '23.797')] +[2023-02-25 14:38:15,652][20479] Updated weights for policy 0, policy_version 1148 (0.0025) +[2023-02-25 14:38:15,658][00869] Fps is (10 sec: 3277.5, 60 sec: 3822.9, 300 sec: 3481.6). Total num frames: 4702208. Throughput: 0: 926.0. Samples: 174192. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:38:15,661][00869] Avg episode reward: [(0, '23.827')] +[2023-02-25 14:38:20,658][00869] Fps is (10 sec: 4096.9, 60 sec: 3754.7, 300 sec: 3496.6). Total num frames: 4722688. Throughput: 0: 951.9. Samples: 177608. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:38:20,667][00869] Avg episode reward: [(0, '22.376')] +[2023-02-25 14:38:24,776][20479] Updated weights for policy 0, policy_version 1158 (0.0012) +[2023-02-25 14:38:25,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3510.9). Total num frames: 4743168. Throughput: 0: 955.5. Samples: 184298. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:38:25,665][00869] Avg episode reward: [(0, '23.436')] +[2023-02-25 14:38:30,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3486.4). Total num frames: 4755456. Throughput: 0: 904.7. Samples: 188334. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:38:30,660][00869] Avg episode reward: [(0, '22.569')] +[2023-02-25 14:38:35,658][00869] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3481.6). Total num frames: 4771840. Throughput: 0: 904.5. Samples: 190408. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:38:35,661][00869] Avg episode reward: [(0, '22.236')] +[2023-02-25 14:38:35,677][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001165_4771840.pth... +[2023-02-25 14:38:35,824][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth +[2023-02-25 14:38:37,846][20479] Updated weights for policy 0, policy_version 1168 (0.0015) +[2023-02-25 14:38:40,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3495.3). Total num frames: 4792320. Throughput: 0: 941.0. Samples: 196694. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:38:40,665][00869] Avg episode reward: [(0, '23.212')] +[2023-02-25 14:38:45,669][00869] Fps is (10 sec: 4091.5, 60 sec: 3685.7, 300 sec: 3508.1). Total num frames: 4812800. Throughput: 0: 925.7. Samples: 202920. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:38:45,680][00869] Avg episode reward: [(0, '22.605')] +[2023-02-25 14:38:48,743][20479] Updated weights for policy 0, policy_version 1178 (0.0017) +[2023-02-25 14:38:50,661][00869] Fps is (10 sec: 3685.3, 60 sec: 3686.2, 300 sec: 3503.3). Total num frames: 4829184. Throughput: 0: 896.8. Samples: 205066. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:38:50,665][00869] Avg episode reward: [(0, '23.511')] +[2023-02-25 14:38:55,658][00869] Fps is (10 sec: 3280.4, 60 sec: 3686.4, 300 sec: 3498.7). Total num frames: 4845568. Throughput: 0: 903.5. Samples: 209774. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:38:55,660][00869] Avg episode reward: [(0, '24.918')] +[2023-02-25 14:38:59,437][20479] Updated weights for policy 0, policy_version 1188 (0.0030) +[2023-02-25 14:39:00,658][00869] Fps is (10 sec: 4097.2, 60 sec: 3686.4, 300 sec: 3527.6). Total num frames: 4870144. Throughput: 0: 941.4. Samples: 216556. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:39:00,666][00869] Avg episode reward: [(0, '25.199')] +[2023-02-25 14:39:05,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3686.5, 300 sec: 3538.9). Total num frames: 4890624. Throughput: 0: 941.3. Samples: 219966. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:39:05,663][00869] Avg episode reward: [(0, '25.806')] +[2023-02-25 14:39:05,680][20465] Saving new best policy, reward=25.806! +[2023-02-25 14:39:10,659][00869] Fps is (10 sec: 3276.5, 60 sec: 3686.5, 300 sec: 3517.7). Total num frames: 4902912. Throughput: 0: 883.7. Samples: 224066. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:39:10,669][00869] Avg episode reward: [(0, '24.324')] +[2023-02-25 14:39:11,797][20479] Updated weights for policy 0, policy_version 1198 (0.0018) +[2023-02-25 14:39:15,658][00869] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3513.1). Total num frames: 4919296. Throughput: 0: 909.3. Samples: 229252. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:39:15,669][00869] Avg episode reward: [(0, '24.408')] +[2023-02-25 14:39:20,660][00869] Fps is (10 sec: 4095.5, 60 sec: 3686.3, 300 sec: 3539.5). Total num frames: 4943872. Throughput: 0: 940.3. Samples: 232724. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:39:20,665][00869] Avg episode reward: [(0, '21.130')] +[2023-02-25 14:39:21,249][20479] Updated weights for policy 0, policy_version 1208 (0.0013) +[2023-02-25 14:39:25,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3534.7). Total num frames: 4960256. Throughput: 0: 945.5. Samples: 239240. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:39:25,663][00869] Avg episode reward: [(0, '21.239')] +[2023-02-25 14:39:30,658][00869] Fps is (10 sec: 3277.5, 60 sec: 3686.4, 300 sec: 3530.0). Total num frames: 4976640. Throughput: 0: 903.2. Samples: 243556. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:39:30,663][00869] Avg episode reward: [(0, '21.370')] +[2023-02-25 14:39:33,889][20479] Updated weights for policy 0, policy_version 1218 (0.0016) +[2023-02-25 14:39:35,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3525.5). Total num frames: 4993024. Throughput: 0: 905.3. Samples: 245802. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:39:35,666][00869] Avg episode reward: [(0, '22.434')] +[2023-02-25 14:39:40,659][00869] Fps is (10 sec: 4095.5, 60 sec: 3754.6, 300 sec: 3549.9). Total num frames: 5017600. Throughput: 0: 947.9. Samples: 252430. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:39:40,662][00869] Avg episode reward: [(0, '22.524')] +[2023-02-25 14:39:43,189][20479] Updated weights for policy 0, policy_version 1228 (0.0012) +[2023-02-25 14:39:45,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3687.1, 300 sec: 3545.2). Total num frames: 5033984. Throughput: 0: 929.6. Samples: 258386. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:39:45,661][00869] Avg episode reward: [(0, '24.239')] +[2023-02-25 14:39:50,660][00869] Fps is (10 sec: 3276.5, 60 sec: 3686.5, 300 sec: 3540.6). Total num frames: 5050368. Throughput: 0: 901.4. Samples: 260532. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0) +[2023-02-25 14:39:50,663][00869] Avg episode reward: [(0, '23.763')] +[2023-02-25 14:39:55,659][00869] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 5066752. Throughput: 0: 912.8. Samples: 265140. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:39:55,661][00869] Avg episode reward: [(0, '23.398')] +[2023-02-25 14:39:55,779][20479] Updated weights for policy 0, policy_version 1238 (0.0015) +[2023-02-25 14:40:00,658][00869] Fps is (10 sec: 4096.8, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 5091328. Throughput: 0: 949.0. Samples: 271958. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:40:00,666][00869] Avg episode reward: [(0, '23.483')] +[2023-02-25 14:40:05,658][00869] Fps is (10 sec: 4096.1, 60 sec: 3618.1, 300 sec: 3735.0). Total num frames: 5107712. Throughput: 0: 945.0. Samples: 275246. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:40:05,668][00869] Avg episode reward: [(0, '24.779')] +[2023-02-25 14:40:05,769][20479] Updated weights for policy 0, policy_version 1248 (0.0013) +[2023-02-25 14:40:10,659][00869] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3748.9). Total num frames: 5124096. Throughput: 0: 892.0. Samples: 279382. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:40:10,665][00869] Avg episode reward: [(0, '24.985')] +[2023-02-25 14:40:15,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3721.1). Total num frames: 5140480. Throughput: 0: 913.1. Samples: 284646. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:40:15,667][00869] Avg episode reward: [(0, '25.356')] +[2023-02-25 14:40:17,704][20479] Updated weights for policy 0, policy_version 1258 (0.0012) +[2023-02-25 14:40:20,658][00869] Fps is (10 sec: 4096.2, 60 sec: 3686.5, 300 sec: 3735.0). Total num frames: 5165056. Throughput: 0: 938.0. Samples: 288014. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:40:20,661][00869] Avg episode reward: [(0, '26.914')] +[2023-02-25 14:40:20,671][20465] Saving new best policy, reward=26.914! +[2023-02-25 14:40:25,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3735.0). Total num frames: 5181440. Throughput: 0: 926.5. Samples: 294120. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:40:25,665][00869] Avg episode reward: [(0, '25.818')] +[2023-02-25 14:40:28,913][20479] Updated weights for policy 0, policy_version 1268 (0.0017) +[2023-02-25 14:40:30,660][00869] Fps is (10 sec: 3276.2, 60 sec: 3686.3, 300 sec: 3748.9). Total num frames: 5197824. Throughput: 0: 889.1. Samples: 298398. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:40:30,663][00869] Avg episode reward: [(0, '26.240')] +[2023-02-25 14:40:35,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3721.1). Total num frames: 5214208. Throughput: 0: 892.8. Samples: 300706. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:40:35,666][00869] Avg episode reward: [(0, '25.482')] +[2023-02-25 14:40:35,681][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001273_5214208.pth... +[2023-02-25 14:40:35,855][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001055_4321280.pth +[2023-02-25 14:40:39,673][20479] Updated weights for policy 0, policy_version 1278 (0.0015) +[2023-02-25 14:40:40,661][00869] Fps is (10 sec: 4095.5, 60 sec: 3686.3, 300 sec: 3735.0). Total num frames: 5238784. Throughput: 0: 938.0. Samples: 307352. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:40:40,667][00869] Avg episode reward: [(0, '24.387')] +[2023-02-25 14:40:45,661][00869] Fps is (10 sec: 4094.8, 60 sec: 3686.2, 300 sec: 3735.0). Total num frames: 5255168. Throughput: 0: 910.1. Samples: 312914. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:40:45,664][00869] Avg episode reward: [(0, '23.824')] +[2023-02-25 14:40:50,658][00869] Fps is (10 sec: 2868.1, 60 sec: 3618.3, 300 sec: 3721.1). Total num frames: 5267456. Throughput: 0: 884.2. Samples: 315034. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:40:50,661][00869] Avg episode reward: [(0, '23.411')] +[2023-02-25 14:40:52,404][20479] Updated weights for policy 0, policy_version 1288 (0.0028) +[2023-02-25 14:40:55,658][00869] Fps is (10 sec: 3277.8, 60 sec: 3686.4, 300 sec: 3721.1). Total num frames: 5287936. Throughput: 0: 904.4. Samples: 320080. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:40:55,667][00869] Avg episode reward: [(0, '23.788')] +[2023-02-25 14:41:00,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3721.1). Total num frames: 5308416. Throughput: 0: 936.7. Samples: 326798. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:41:00,666][00869] Avg episode reward: [(0, '24.620')] +[2023-02-25 14:41:02,576][20479] Updated weights for policy 0, policy_version 1298 (0.0015) +[2023-02-25 14:41:05,661][00869] Fps is (10 sec: 3275.9, 60 sec: 3549.7, 300 sec: 3707.2). Total num frames: 5320704. Throughput: 0: 905.9. Samples: 328784. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:41:05,666][00869] Avg episode reward: [(0, '25.260')] +[2023-02-25 14:41:10,658][00869] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 3693.3). Total num frames: 5332992. Throughput: 0: 842.4. Samples: 332030. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:41:10,665][00869] Avg episode reward: [(0, '24.088')] +[2023-02-25 14:41:15,662][00869] Fps is (10 sec: 2457.3, 60 sec: 3413.1, 300 sec: 3665.5). Total num frames: 5345280. Throughput: 0: 823.0. Samples: 335434. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:41:15,669][00869] Avg episode reward: [(0, '24.482')] +[2023-02-25 14:41:18,177][20479] Updated weights for policy 0, policy_version 1308 (0.0018) +[2023-02-25 14:41:20,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3651.7). Total num frames: 5365760. Throughput: 0: 842.7. Samples: 338628. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:41:20,664][00869] Avg episode reward: [(0, '25.141')] +[2023-02-25 14:41:25,658][00869] Fps is (10 sec: 4507.3, 60 sec: 3481.6, 300 sec: 3693.3). Total num frames: 5390336. Throughput: 0: 850.5. Samples: 345620. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:41:25,666][00869] Avg episode reward: [(0, '25.654')] +[2023-02-25 14:41:27,654][20479] Updated weights for policy 0, policy_version 1318 (0.0019) +[2023-02-25 14:41:30,659][00869] Fps is (10 sec: 4095.9, 60 sec: 3481.7, 300 sec: 3693.3). Total num frames: 5406720. Throughput: 0: 840.5. Samples: 350736. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:41:30,666][00869] Avg episode reward: [(0, '25.785')] +[2023-02-25 14:41:35,658][00869] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3651.7). Total num frames: 5419008. Throughput: 0: 841.2. Samples: 352886. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:41:35,661][00869] Avg episode reward: [(0, '24.909')] +[2023-02-25 14:41:39,492][20479] Updated weights for policy 0, policy_version 1328 (0.0012) +[2023-02-25 14:41:40,658][00869] Fps is (10 sec: 3686.5, 60 sec: 3413.5, 300 sec: 3665.6). Total num frames: 5443584. Throughput: 0: 860.2. Samples: 358788. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:41:40,661][00869] Avg episode reward: [(0, '23.973')] +[2023-02-25 14:41:45,658][00869] Fps is (10 sec: 4915.2, 60 sec: 3550.0, 300 sec: 3693.4). Total num frames: 5468160. Throughput: 0: 864.0. Samples: 365678. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:41:45,663][00869] Avg episode reward: [(0, '24.099')] +[2023-02-25 14:41:50,036][20479] Updated weights for policy 0, policy_version 1338 (0.0014) +[2023-02-25 14:41:50,659][00869] Fps is (10 sec: 3686.3, 60 sec: 3549.8, 300 sec: 3679.5). Total num frames: 5480448. Throughput: 0: 871.6. Samples: 368006. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:41:50,665][00869] Avg episode reward: [(0, '23.653')] +[2023-02-25 14:41:55,658][00869] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3651.7). Total num frames: 5496832. Throughput: 0: 896.0. Samples: 372348. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:41:55,661][00869] Avg episode reward: [(0, '23.471')] +[2023-02-25 14:42:00,658][00869] Fps is (10 sec: 3686.5, 60 sec: 3481.6, 300 sec: 3651.7). Total num frames: 5517312. Throughput: 0: 966.3. Samples: 378916. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:42:00,660][00869] Avg episode reward: [(0, '21.867')] +[2023-02-25 14:42:00,748][20479] Updated weights for policy 0, policy_version 1348 (0.0017) +[2023-02-25 14:42:05,666][00869] Fps is (10 sec: 4502.0, 60 sec: 3686.1, 300 sec: 3693.2). Total num frames: 5541888. Throughput: 0: 973.6. Samples: 382448. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:42:05,669][00869] Avg episode reward: [(0, '22.658')] +[2023-02-25 14:42:10,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 5554176. Throughput: 0: 933.8. Samples: 387642. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:42:10,664][00869] Avg episode reward: [(0, '22.421')] +[2023-02-25 14:42:12,330][20479] Updated weights for policy 0, policy_version 1358 (0.0012) +[2023-02-25 14:42:15,658][00869] Fps is (10 sec: 2869.5, 60 sec: 3754.9, 300 sec: 3637.8). Total num frames: 5570560. Throughput: 0: 918.4. Samples: 392062. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:42:15,661][00869] Avg episode reward: [(0, '22.421')] +[2023-02-25 14:42:20,659][00869] Fps is (10 sec: 4095.8, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 5595136. Throughput: 0: 948.1. Samples: 395552. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:42:20,668][00869] Avg episode reward: [(0, '21.297')] +[2023-02-25 14:42:22,102][20479] Updated weights for policy 0, policy_version 1368 (0.0029) +[2023-02-25 14:42:25,660][00869] Fps is (10 sec: 4504.8, 60 sec: 3754.6, 300 sec: 3665.5). Total num frames: 5615616. Throughput: 0: 972.4. Samples: 402550. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:42:25,666][00869] Avg episode reward: [(0, '22.995')] +[2023-02-25 14:42:30,658][00869] Fps is (10 sec: 3686.6, 60 sec: 3754.7, 300 sec: 3665.6). Total num frames: 5632000. Throughput: 0: 920.8. Samples: 407112. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:42:30,666][00869] Avg episode reward: [(0, '23.117')] +[2023-02-25 14:42:34,507][20479] Updated weights for policy 0, policy_version 1378 (0.0028) +[2023-02-25 14:42:35,658][00869] Fps is (10 sec: 3277.4, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 5648384. Throughput: 0: 914.8. Samples: 409174. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-25 14:42:35,661][00869] Avg episode reward: [(0, '23.794')] +[2023-02-25 14:42:35,674][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001379_5648384.pth... +[2023-02-25 14:42:35,846][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001165_4771840.pth +[2023-02-25 14:42:40,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3651.7). Total num frames: 5668864. Throughput: 0: 957.1. Samples: 415416. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:42:40,661][00869] Avg episode reward: [(0, '22.848')] +[2023-02-25 14:42:43,603][20479] Updated weights for policy 0, policy_version 1388 (0.0014) +[2023-02-25 14:42:45,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 5689344. Throughput: 0: 961.6. Samples: 422188. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:42:45,667][00869] Avg episode reward: [(0, '23.293')] +[2023-02-25 14:42:50,660][00869] Fps is (10 sec: 3685.9, 60 sec: 3754.6, 300 sec: 3665.6). Total num frames: 5705728. Throughput: 0: 931.7. Samples: 424368. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:42:50,667][00869] Avg episode reward: [(0, '21.260')] +[2023-02-25 14:42:55,664][00869] Fps is (10 sec: 3274.9, 60 sec: 3754.3, 300 sec: 3637.7). Total num frames: 5722112. Throughput: 0: 914.3. Samples: 428792. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:42:55,667][00869] Avg episode reward: [(0, '21.354')] +[2023-02-25 14:42:55,965][20479] Updated weights for policy 0, policy_version 1398 (0.0012) +[2023-02-25 14:43:00,658][00869] Fps is (10 sec: 4096.6, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 5746688. Throughput: 0: 973.6. Samples: 435872. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:43:00,665][00869] Avg episode reward: [(0, '21.756')] +[2023-02-25 14:43:04,957][20479] Updated weights for policy 0, policy_version 1408 (0.0012) +[2023-02-25 14:43:05,658][00869] Fps is (10 sec: 4508.1, 60 sec: 3755.2, 300 sec: 3679.5). Total num frames: 5767168. Throughput: 0: 972.4. Samples: 439310. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:43:05,661][00869] Avg episode reward: [(0, '23.500')] +[2023-02-25 14:43:10,660][00869] Fps is (10 sec: 3685.7, 60 sec: 3822.8, 300 sec: 3665.5). Total num frames: 5783552. Throughput: 0: 921.5. Samples: 444016. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:43:10,663][00869] Avg episode reward: [(0, '23.484')] +[2023-02-25 14:43:15,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 5799936. Throughput: 0: 929.4. Samples: 448936. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:43:15,661][00869] Avg episode reward: [(0, '23.687')] +[2023-02-25 14:43:17,124][20479] Updated weights for policy 0, policy_version 1418 (0.0021) +[2023-02-25 14:43:20,658][00869] Fps is (10 sec: 4096.8, 60 sec: 3823.0, 300 sec: 3665.6). Total num frames: 5824512. Throughput: 0: 961.5. Samples: 452440. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:43:20,665][00869] Avg episode reward: [(0, '24.515')] +[2023-02-25 14:43:25,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3823.1, 300 sec: 3693.3). Total num frames: 5844992. Throughput: 0: 976.7. Samples: 459368. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:43:25,662][00869] Avg episode reward: [(0, '24.454')] +[2023-02-25 14:43:27,094][20479] Updated weights for policy 0, policy_version 1428 (0.0014) +[2023-02-25 14:43:30,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3679.5). Total num frames: 5857280. Throughput: 0: 923.2. Samples: 463734. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:43:30,664][00869] Avg episode reward: [(0, '24.289')] +[2023-02-25 14:43:35,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3679.5). Total num frames: 5877760. Throughput: 0: 924.0. Samples: 465946. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:43:35,665][00869] Avg episode reward: [(0, '24.416')] +[2023-02-25 14:43:38,465][20479] Updated weights for policy 0, policy_version 1438 (0.0018) +[2023-02-25 14:43:40,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3679.6). Total num frames: 5898240. Throughput: 0: 974.4. Samples: 472636. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:43:40,666][00869] Avg episode reward: [(0, '24.300')] +[2023-02-25 14:43:45,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3693.4). Total num frames: 5918720. Throughput: 0: 957.1. Samples: 478942. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:43:45,671][00869] Avg episode reward: [(0, '24.328')] +[2023-02-25 14:43:49,168][20479] Updated weights for policy 0, policy_version 1448 (0.0014) +[2023-02-25 14:43:50,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3823.0, 300 sec: 3693.3). Total num frames: 5935104. Throughput: 0: 929.7. Samples: 481146. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:43:50,664][00869] Avg episode reward: [(0, '24.408')] +[2023-02-25 14:43:55,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3823.3, 300 sec: 3665.6). Total num frames: 5951488. Throughput: 0: 931.0. Samples: 485908. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:43:55,664][00869] Avg episode reward: [(0, '24.508')] +[2023-02-25 14:43:59,836][20479] Updated weights for policy 0, policy_version 1458 (0.0012) +[2023-02-25 14:44:00,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3665.6). Total num frames: 5971968. Throughput: 0: 972.0. Samples: 492676. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:44:00,665][00869] Avg episode reward: [(0, '23.516')] +[2023-02-25 14:44:05,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3693.4). Total num frames: 5992448. Throughput: 0: 968.4. Samples: 496018. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:44:05,663][00869] Avg episode reward: [(0, '24.210')] +[2023-02-25 14:44:10,660][00869] Fps is (10 sec: 3276.1, 60 sec: 3686.4, 300 sec: 3679.4). Total num frames: 6004736. Throughput: 0: 908.2. Samples: 500238. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:44:10,663][00869] Avg episode reward: [(0, '25.167')] +[2023-02-25 14:44:12,490][20479] Updated weights for policy 0, policy_version 1468 (0.0024) +[2023-02-25 14:44:15,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3665.6). Total num frames: 6025216. Throughput: 0: 921.7. Samples: 505210. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:44:15,660][00869] Avg episode reward: [(0, '24.988')] +[2023-02-25 14:44:20,658][00869] Fps is (10 sec: 4096.8, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 6045696. Throughput: 0: 945.7. Samples: 508504. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:44:20,667][00869] Avg episode reward: [(0, '26.008')] +[2023-02-25 14:44:21,828][20479] Updated weights for policy 0, policy_version 1478 (0.0013) +[2023-02-25 14:44:25,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3693.3). Total num frames: 6066176. Throughput: 0: 938.1. Samples: 514850. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:44:25,664][00869] Avg episode reward: [(0, '27.008')] +[2023-02-25 14:44:25,685][20465] Saving new best policy, reward=27.008! +[2023-02-25 14:44:30,667][00869] Fps is (10 sec: 3274.0, 60 sec: 3685.9, 300 sec: 3679.4). Total num frames: 6078464. Throughput: 0: 887.8. Samples: 518900. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:44:30,673][00869] Avg episode reward: [(0, '25.912')] +[2023-02-25 14:44:34,496][20479] Updated weights for policy 0, policy_version 1488 (0.0011) +[2023-02-25 14:44:35,659][00869] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3665.6). Total num frames: 6098944. Throughput: 0: 892.8. Samples: 521322. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:44:35,665][00869] Avg episode reward: [(0, '24.472')] +[2023-02-25 14:44:35,677][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001489_6098944.pth... +[2023-02-25 14:44:35,878][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001273_5214208.pth +[2023-02-25 14:44:40,658][00869] Fps is (10 sec: 4099.5, 60 sec: 3686.4, 300 sec: 3679.5). Total num frames: 6119424. Throughput: 0: 940.7. Samples: 528240. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:44:40,664][00869] Avg episode reward: [(0, '25.728')] +[2023-02-25 14:44:43,555][20479] Updated weights for policy 0, policy_version 1498 (0.0019) +[2023-02-25 14:44:45,658][00869] Fps is (10 sec: 4096.2, 60 sec: 3686.4, 300 sec: 3693.4). Total num frames: 6139904. Throughput: 0: 922.5. Samples: 534190. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:44:45,664][00869] Avg episode reward: [(0, '25.058')] +[2023-02-25 14:44:50,661][00869] Fps is (10 sec: 3685.3, 60 sec: 3686.2, 300 sec: 3693.3). Total num frames: 6156288. Throughput: 0: 896.4. Samples: 536360. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:44:50,666][00869] Avg episode reward: [(0, '25.817')] +[2023-02-25 14:44:55,665][20479] Updated weights for policy 0, policy_version 1508 (0.0024) +[2023-02-25 14:44:55,664][00869] Fps is (10 sec: 3684.4, 60 sec: 3754.3, 300 sec: 3679.4). Total num frames: 6176768. Throughput: 0: 920.2. Samples: 541648. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:44:55,674][00869] Avg episode reward: [(0, '24.029')] +[2023-02-25 14:45:00,658][00869] Fps is (10 sec: 4097.2, 60 sec: 3754.7, 300 sec: 3693.3). Total num frames: 6197248. Throughput: 0: 967.4. Samples: 548742. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:45:00,667][00869] Avg episode reward: [(0, '25.732')] +[2023-02-25 14:45:05,458][20479] Updated weights for policy 0, policy_version 1518 (0.0037) +[2023-02-25 14:45:05,662][00869] Fps is (10 sec: 4096.5, 60 sec: 3754.4, 300 sec: 3707.2). Total num frames: 6217728. Throughput: 0: 964.7. Samples: 551918. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:45:05,665][00869] Avg episode reward: [(0, '23.135')] +[2023-02-25 14:45:10,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3754.8, 300 sec: 3693.3). Total num frames: 6230016. Throughput: 0: 921.5. Samples: 556316. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:45:10,667][00869] Avg episode reward: [(0, '22.571')] +[2023-02-25 14:45:15,658][00869] Fps is (10 sec: 3278.1, 60 sec: 3754.7, 300 sec: 3679.5). Total num frames: 6250496. Throughput: 0: 956.0. Samples: 561914. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:45:15,665][00869] Avg episode reward: [(0, '22.832')] +[2023-02-25 14:45:16,948][20479] Updated weights for policy 0, policy_version 1528 (0.0036) +[2023-02-25 14:45:20,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3707.2). Total num frames: 6275072. Throughput: 0: 979.6. Samples: 565402. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:45:20,667][00869] Avg episode reward: [(0, '23.653')] +[2023-02-25 14:45:25,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3707.3). Total num frames: 6291456. Throughput: 0: 968.5. Samples: 571824. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:45:25,661][00869] Avg episode reward: [(0, '22.273')] +[2023-02-25 14:45:27,265][20479] Updated weights for policy 0, policy_version 1538 (0.0026) +[2023-02-25 14:45:30,659][00869] Fps is (10 sec: 3276.4, 60 sec: 3823.4, 300 sec: 3707.2). Total num frames: 6307840. Throughput: 0: 933.8. Samples: 576210. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:45:30,666][00869] Avg episode reward: [(0, '22.400')] +[2023-02-25 14:45:35,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3823.0, 300 sec: 3693.4). Total num frames: 6328320. Throughput: 0: 946.1. Samples: 578932. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:45:35,663][00869] Avg episode reward: [(0, '23.801')] +[2023-02-25 14:45:37,834][20479] Updated weights for policy 0, policy_version 1548 (0.0012) +[2023-02-25 14:45:40,658][00869] Fps is (10 sec: 4506.1, 60 sec: 3891.2, 300 sec: 3721.1). Total num frames: 6352896. Throughput: 0: 986.6. Samples: 586042. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:45:40,664][00869] Avg episode reward: [(0, '23.892')] +[2023-02-25 14:45:45,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3735.0). Total num frames: 6369280. Throughput: 0: 954.4. Samples: 591690. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:45:45,662][00869] Avg episode reward: [(0, '23.680')] +[2023-02-25 14:45:49,108][20479] Updated weights for policy 0, policy_version 1558 (0.0011) +[2023-02-25 14:45:50,659][00869] Fps is (10 sec: 3276.7, 60 sec: 3823.1, 300 sec: 3721.1). Total num frames: 6385664. Throughput: 0: 933.6. Samples: 593926. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:45:50,661][00869] Avg episode reward: [(0, '24.873')] +[2023-02-25 14:45:55,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3823.3, 300 sec: 3721.1). Total num frames: 6406144. Throughput: 0: 959.6. Samples: 599500. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:45:55,666][00869] Avg episode reward: [(0, '23.881')] +[2023-02-25 14:45:58,732][20479] Updated weights for policy 0, policy_version 1568 (0.0015) +[2023-02-25 14:46:00,659][00869] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3762.8). Total num frames: 6430720. Throughput: 0: 995.9. Samples: 606728. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:46:00,665][00869] Avg episode reward: [(0, '23.566')] +[2023-02-25 14:46:05,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3823.2, 300 sec: 3776.7). Total num frames: 6447104. Throughput: 0: 983.7. Samples: 609670. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:46:05,662][00869] Avg episode reward: [(0, '22.727')] +[2023-02-25 14:46:10,425][20479] Updated weights for policy 0, policy_version 1578 (0.0026) +[2023-02-25 14:46:10,658][00869] Fps is (10 sec: 3276.9, 60 sec: 3891.2, 300 sec: 3790.6). Total num frames: 6463488. Throughput: 0: 940.4. Samples: 614144. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:46:10,661][00869] Avg episode reward: [(0, '21.956')] +[2023-02-25 14:46:15,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 6483968. Throughput: 0: 976.8. Samples: 620164. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:46:15,667][00869] Avg episode reward: [(0, '21.478')] +[2023-02-25 14:46:19,571][20479] Updated weights for policy 0, policy_version 1588 (0.0012) +[2023-02-25 14:46:20,659][00869] Fps is (10 sec: 4505.4, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 6508544. Throughput: 0: 995.9. Samples: 623748. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:46:20,666][00869] Avg episode reward: [(0, '22.220')] +[2023-02-25 14:46:25,660][00869] Fps is (10 sec: 4095.3, 60 sec: 3891.1, 300 sec: 3790.5). Total num frames: 6524928. Throughput: 0: 972.3. Samples: 629796. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:46:25,667][00869] Avg episode reward: [(0, '21.812')] +[2023-02-25 14:46:30,658][00869] Fps is (10 sec: 3276.9, 60 sec: 3891.3, 300 sec: 3804.4). Total num frames: 6541312. Throughput: 0: 947.4. Samples: 634324. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:46:30,665][00869] Avg episode reward: [(0, '23.666')] +[2023-02-25 14:46:31,805][20479] Updated weights for policy 0, policy_version 1598 (0.0011) +[2023-02-25 14:46:35,658][00869] Fps is (10 sec: 3687.0, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 6561792. Throughput: 0: 962.6. Samples: 637244. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:46:35,661][00869] Avg episode reward: [(0, '24.688')] +[2023-02-25 14:46:35,669][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001602_6561792.pth... +[2023-02-25 14:46:35,826][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001379_5648384.pth +[2023-02-25 14:46:40,629][20479] Updated weights for policy 0, policy_version 1608 (0.0012) +[2023-02-25 14:46:40,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 6586368. Throughput: 0: 992.0. Samples: 644142. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:46:40,661][00869] Avg episode reward: [(0, '24.632')] +[2023-02-25 14:46:45,659][00869] Fps is (10 sec: 4095.9, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 6602752. Throughput: 0: 956.0. Samples: 649748. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:46:45,665][00869] Avg episode reward: [(0, '24.587')] +[2023-02-25 14:46:50,658][00869] Fps is (10 sec: 2867.2, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 6615040. Throughput: 0: 938.1. Samples: 651884. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:46:50,661][00869] Avg episode reward: [(0, '24.480')] +[2023-02-25 14:46:52,984][20479] Updated weights for policy 0, policy_version 1618 (0.0024) +[2023-02-25 14:46:55,659][00869] Fps is (10 sec: 3686.2, 60 sec: 3891.1, 300 sec: 3804.4). Total num frames: 6639616. Throughput: 0: 966.2. Samples: 657626. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:46:55,663][00869] Avg episode reward: [(0, '24.536')] +[2023-02-25 14:47:00,659][00869] Fps is (10 sec: 4915.1, 60 sec: 3891.2, 300 sec: 3804.5). Total num frames: 6664192. Throughput: 0: 993.1. Samples: 664854. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:47:00,661][00869] Avg episode reward: [(0, '24.569')] +[2023-02-25 14:47:01,436][20479] Updated weights for policy 0, policy_version 1628 (0.0020) +[2023-02-25 14:47:05,658][00869] Fps is (10 sec: 4096.3, 60 sec: 3891.2, 300 sec: 3818.3). Total num frames: 6680576. Throughput: 0: 974.2. Samples: 667588. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:47:05,668][00869] Avg episode reward: [(0, '24.605')] +[2023-02-25 14:47:10,658][00869] Fps is (10 sec: 2867.3, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 6692864. Throughput: 0: 936.2. Samples: 671924. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:47:10,664][00869] Avg episode reward: [(0, '23.945')] +[2023-02-25 14:47:13,847][20479] Updated weights for policy 0, policy_version 1638 (0.0013) +[2023-02-25 14:47:15,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 6717440. Throughput: 0: 976.0. Samples: 678246. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:47:15,661][00869] Avg episode reward: [(0, '23.136')] +[2023-02-25 14:47:20,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3823.0, 300 sec: 3804.4). Total num frames: 6737920. Throughput: 0: 989.4. Samples: 681768. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:47:20,663][00869] Avg episode reward: [(0, '25.066')] +[2023-02-25 14:47:23,078][20479] Updated weights for policy 0, policy_version 1648 (0.0020) +[2023-02-25 14:47:25,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3823.0, 300 sec: 3804.4). Total num frames: 6754304. Throughput: 0: 964.6. Samples: 687548. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:47:25,662][00869] Avg episode reward: [(0, '24.850')] +[2023-02-25 14:47:30,660][00869] Fps is (10 sec: 2866.7, 60 sec: 3754.5, 300 sec: 3790.5). Total num frames: 6766592. Throughput: 0: 917.7. Samples: 691046. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:47:30,663][00869] Avg episode reward: [(0, '23.825')] +[2023-02-25 14:47:35,660][00869] Fps is (10 sec: 2457.1, 60 sec: 3618.0, 300 sec: 3762.7). Total num frames: 6778880. Throughput: 0: 910.1. Samples: 692842. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:47:35,668][00869] Avg episode reward: [(0, '23.766')] +[2023-02-25 14:47:38,562][20479] Updated weights for policy 0, policy_version 1658 (0.0028) +[2023-02-25 14:47:40,658][00869] Fps is (10 sec: 3277.4, 60 sec: 3549.9, 300 sec: 3762.8). Total num frames: 6799360. Throughput: 0: 894.5. Samples: 697878. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:47:40,661][00869] Avg episode reward: [(0, '23.431')] +[2023-02-25 14:47:45,658][00869] Fps is (10 sec: 4096.8, 60 sec: 3618.1, 300 sec: 3776.7). Total num frames: 6819840. Throughput: 0: 883.0. Samples: 704588. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:47:45,661][00869] Avg episode reward: [(0, '23.868')] +[2023-02-25 14:47:48,753][20479] Updated weights for policy 0, policy_version 1668 (0.0012) +[2023-02-25 14:47:50,659][00869] Fps is (10 sec: 3686.3, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 6836224. Throughput: 0: 871.4. Samples: 706800. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:47:50,662][00869] Avg episode reward: [(0, '23.854')] +[2023-02-25 14:47:55,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3748.9). Total num frames: 6852608. Throughput: 0: 874.0. Samples: 711256. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:47:55,660][00869] Avg episode reward: [(0, '22.927')] +[2023-02-25 14:47:59,513][20479] Updated weights for policy 0, policy_version 1678 (0.0015) +[2023-02-25 14:48:00,658][00869] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3762.8). Total num frames: 6877184. Throughput: 0: 887.7. Samples: 718194. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:48:00,661][00869] Avg episode reward: [(0, '23.335')] +[2023-02-25 14:48:05,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3618.1, 300 sec: 3776.7). Total num frames: 6897664. Throughput: 0: 886.2. Samples: 721648. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:48:05,661][00869] Avg episode reward: [(0, '25.028')] +[2023-02-25 14:48:10,662][00869] Fps is (10 sec: 3275.6, 60 sec: 3617.9, 300 sec: 3762.7). Total num frames: 6909952. Throughput: 0: 867.1. Samples: 726570. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:48:10,667][00869] Avg episode reward: [(0, '25.668')] +[2023-02-25 14:48:10,680][20479] Updated weights for policy 0, policy_version 1688 (0.0013) +[2023-02-25 14:48:15,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3748.9). Total num frames: 6930432. Throughput: 0: 897.8. Samples: 731444. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:48:15,661][00869] Avg episode reward: [(0, '25.852')] +[2023-02-25 14:48:20,658][00869] Fps is (10 sec: 4097.5, 60 sec: 3549.9, 300 sec: 3748.9). Total num frames: 6950912. Throughput: 0: 934.7. Samples: 734902. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:48:20,661][00869] Avg episode reward: [(0, '25.444')] +[2023-02-25 14:48:20,701][20479] Updated weights for policy 0, policy_version 1698 (0.0014) +[2023-02-25 14:48:25,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3790.5). Total num frames: 6975488. Throughput: 0: 981.8. Samples: 742058. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:48:25,666][00869] Avg episode reward: [(0, '25.473')] +[2023-02-25 14:48:30,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3686.5, 300 sec: 3762.8). Total num frames: 6987776. Throughput: 0: 930.9. Samples: 746478. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:48:30,663][00869] Avg episode reward: [(0, '24.946')] +[2023-02-25 14:48:32,463][20479] Updated weights for policy 0, policy_version 1708 (0.0022) +[2023-02-25 14:48:35,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3823.1, 300 sec: 3762.8). Total num frames: 7008256. Throughput: 0: 932.9. Samples: 748782. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:48:35,661][00869] Avg episode reward: [(0, '24.671')] +[2023-02-25 14:48:35,673][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001711_7008256.pth... +[2023-02-25 14:48:35,821][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001489_6098944.pth +[2023-02-25 14:48:40,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3762.8). Total num frames: 7028736. Throughput: 0: 982.1. Samples: 755452. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:48:40,661][00869] Avg episode reward: [(0, '23.288')] +[2023-02-25 14:48:41,802][20479] Updated weights for policy 0, policy_version 1718 (0.0022) +[2023-02-25 14:48:45,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7049216. Throughput: 0: 969.3. Samples: 761812. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:48:45,665][00869] Avg episode reward: [(0, '23.377')] +[2023-02-25 14:48:50,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7065600. Throughput: 0: 940.6. Samples: 763974. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:48:50,665][00869] Avg episode reward: [(0, '22.863')] +[2023-02-25 14:48:54,260][20479] Updated weights for policy 0, policy_version 1728 (0.0025) +[2023-02-25 14:48:55,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3762.8). Total num frames: 7081984. Throughput: 0: 935.8. Samples: 768678. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:48:55,666][00869] Avg episode reward: [(0, '24.590')] +[2023-02-25 14:49:00,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7106560. Throughput: 0: 983.8. Samples: 775716. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:49:00,665][00869] Avg episode reward: [(0, '24.845')] +[2023-02-25 14:49:02,996][20479] Updated weights for policy 0, policy_version 1738 (0.0015) +[2023-02-25 14:49:05,659][00869] Fps is (10 sec: 4505.2, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 7127040. Throughput: 0: 985.2. Samples: 779236. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:49:05,662][00869] Avg episode reward: [(0, '24.329')] +[2023-02-25 14:49:10,661][00869] Fps is (10 sec: 3276.1, 60 sec: 3823.0, 300 sec: 3776.6). Total num frames: 7139328. Throughput: 0: 928.3. Samples: 783834. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:49:10,670][00869] Avg episode reward: [(0, '24.950')] +[2023-02-25 14:49:15,495][20479] Updated weights for policy 0, policy_version 1748 (0.0044) +[2023-02-25 14:49:15,658][00869] Fps is (10 sec: 3277.1, 60 sec: 3822.9, 300 sec: 3776.6). Total num frames: 7159808. Throughput: 0: 943.5. Samples: 788934. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:49:15,660][00869] Avg episode reward: [(0, '25.941')] +[2023-02-25 14:49:20,658][00869] Fps is (10 sec: 4096.9, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7180288. Throughput: 0: 967.1. Samples: 792300. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:49:20,666][00869] Avg episode reward: [(0, '26.468')] +[2023-02-25 14:49:24,364][20479] Updated weights for policy 0, policy_version 1758 (0.0012) +[2023-02-25 14:49:25,660][00869] Fps is (10 sec: 4095.3, 60 sec: 3754.6, 300 sec: 3804.5). Total num frames: 7200768. Throughput: 0: 971.6. Samples: 799174. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:49:25,664][00869] Avg episode reward: [(0, '25.715')] +[2023-02-25 14:49:30,660][00869] Fps is (10 sec: 3685.8, 60 sec: 3822.8, 300 sec: 3790.5). Total num frames: 7217152. Throughput: 0: 928.5. Samples: 803594. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:49:30,664][00869] Avg episode reward: [(0, '25.969')] +[2023-02-25 14:49:35,658][00869] Fps is (10 sec: 3277.4, 60 sec: 3754.7, 300 sec: 3776.7). Total num frames: 7233536. Throughput: 0: 931.7. Samples: 805902. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-25 14:49:35,664][00869] Avg episode reward: [(0, '24.729')] +[2023-02-25 14:49:36,618][20479] Updated weights for policy 0, policy_version 1768 (0.0014) +[2023-02-25 14:49:40,658][00869] Fps is (10 sec: 4096.7, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 7258112. Throughput: 0: 978.4. Samples: 812706. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:49:40,666][00869] Avg episode reward: [(0, '24.458')] +[2023-02-25 14:49:45,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3804.5). Total num frames: 7278592. Throughput: 0: 961.6. Samples: 818986. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:49:45,666][00869] Avg episode reward: [(0, '25.354')] +[2023-02-25 14:49:46,240][20479] Updated weights for policy 0, policy_version 1778 (0.0012) +[2023-02-25 14:49:50,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3790.6). Total num frames: 7294976. Throughput: 0: 931.8. Samples: 821166. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:49:50,663][00869] Avg episode reward: [(0, '25.082')] +[2023-02-25 14:49:55,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7311360. Throughput: 0: 940.4. Samples: 826150. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:49:55,661][00869] Avg episode reward: [(0, '23.002')] +[2023-02-25 14:49:57,712][20479] Updated weights for policy 0, policy_version 1788 (0.0039) +[2023-02-25 14:50:00,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3790.6). Total num frames: 7335936. Throughput: 0: 982.0. Samples: 833126. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:50:00,665][00869] Avg episode reward: [(0, '23.666')] +[2023-02-25 14:50:05,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3823.0, 300 sec: 3818.3). Total num frames: 7356416. Throughput: 0: 986.7. Samples: 836702. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:50:05,665][00869] Avg episode reward: [(0, '24.830')] +[2023-02-25 14:50:08,162][20479] Updated weights for policy 0, policy_version 1798 (0.0020) +[2023-02-25 14:50:10,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3823.1, 300 sec: 3790.5). Total num frames: 7368704. Throughput: 0: 930.9. Samples: 841062. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:50:10,662][00869] Avg episode reward: [(0, '25.990')] +[2023-02-25 14:50:15,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7389184. Throughput: 0: 950.7. Samples: 846372. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:50:15,666][00869] Avg episode reward: [(0, '25.484')] +[2023-02-25 14:50:18,839][20479] Updated weights for policy 0, policy_version 1808 (0.0013) +[2023-02-25 14:50:20,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 7413760. Throughput: 0: 977.0. Samples: 849868. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:50:20,661][00869] Avg episode reward: [(0, '25.861')] +[2023-02-25 14:50:25,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3823.0, 300 sec: 3804.4). Total num frames: 7430144. Throughput: 0: 971.6. Samples: 856430. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:50:25,662][00869] Avg episode reward: [(0, '28.039')] +[2023-02-25 14:50:25,681][20465] Saving new best policy, reward=28.039! +[2023-02-25 14:50:30,465][20479] Updated weights for policy 0, policy_version 1818 (0.0021) +[2023-02-25 14:50:30,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3823.0, 300 sec: 3790.5). Total num frames: 7446528. Throughput: 0: 927.5. Samples: 860722. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:50:30,667][00869] Avg episode reward: [(0, '28.219')] +[2023-02-25 14:50:30,673][20465] Saving new best policy, reward=28.219! +[2023-02-25 14:50:35,659][00869] Fps is (10 sec: 3686.3, 60 sec: 3891.2, 300 sec: 3776.6). Total num frames: 7467008. Throughput: 0: 932.9. Samples: 863146. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:50:35,665][00869] Avg episode reward: [(0, '28.830')] +[2023-02-25 14:50:35,676][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001823_7467008.pth... +[2023-02-25 14:50:35,802][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001602_6561792.pth +[2023-02-25 14:50:35,815][20465] Saving new best policy, reward=28.830! +[2023-02-25 14:50:40,110][20479] Updated weights for policy 0, policy_version 1828 (0.0022) +[2023-02-25 14:50:40,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 7487488. Throughput: 0: 974.6. Samples: 870008. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:50:40,661][00869] Avg episode reward: [(0, '27.802')] +[2023-02-25 14:50:45,658][00869] Fps is (10 sec: 3686.5, 60 sec: 3754.7, 300 sec: 3790.5). Total num frames: 7503872. Throughput: 0: 950.2. Samples: 875886. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:50:45,669][00869] Avg episode reward: [(0, '26.841')] +[2023-02-25 14:50:50,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3776.7). Total num frames: 7520256. Throughput: 0: 921.6. Samples: 878172. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:50:50,661][00869] Avg episode reward: [(0, '26.896')] +[2023-02-25 14:50:52,422][20479] Updated weights for policy 0, policy_version 1838 (0.0021) +[2023-02-25 14:50:55,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3762.8). Total num frames: 7540736. Throughput: 0: 941.1. Samples: 883410. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-25 14:50:55,664][00869] Avg episode reward: [(0, '26.795')] +[2023-02-25 14:51:00,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 7565312. Throughput: 0: 983.2. Samples: 890616. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:51:00,661][00869] Avg episode reward: [(0, '25.000')] +[2023-02-25 14:51:01,167][20479] Updated weights for policy 0, policy_version 1848 (0.0019) +[2023-02-25 14:51:05,663][00869] Fps is (10 sec: 4093.9, 60 sec: 3754.4, 300 sec: 3790.5). Total num frames: 7581696. Throughput: 0: 972.9. Samples: 893652. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:51:05,666][00869] Avg episode reward: [(0, '24.367')] +[2023-02-25 14:51:10,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3776.6). Total num frames: 7598080. Throughput: 0: 924.4. Samples: 898030. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:51:10,667][00869] Avg episode reward: [(0, '23.892')] +[2023-02-25 14:51:13,676][20479] Updated weights for policy 0, policy_version 1858 (0.0035) +[2023-02-25 14:51:15,658][00869] Fps is (10 sec: 3688.3, 60 sec: 3822.9, 300 sec: 3762.8). Total num frames: 7618560. Throughput: 0: 955.9. Samples: 903736. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-25 14:51:15,665][00869] Avg episode reward: [(0, '24.266')] +[2023-02-25 14:51:20,658][00869] Fps is (10 sec: 4096.1, 60 sec: 3754.7, 300 sec: 3776.7). Total num frames: 7639040. Throughput: 0: 979.2. Samples: 907212. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:51:20,661][00869] Avg episode reward: [(0, '24.390')] +[2023-02-25 14:51:22,488][20479] Updated weights for policy 0, policy_version 1868 (0.0015) +[2023-02-25 14:51:25,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 7659520. Throughput: 0: 965.5. Samples: 913456. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:51:25,661][00869] Avg episode reward: [(0, '24.879')] +[2023-02-25 14:51:30,658][00869] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7675904. Throughput: 0: 934.2. Samples: 917926. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:51:30,662][00869] Avg episode reward: [(0, '24.752')] +[2023-02-25 14:51:34,753][20479] Updated weights for policy 0, policy_version 1878 (0.0014) +[2023-02-25 14:51:35,658][00869] Fps is (10 sec: 3686.5, 60 sec: 3823.0, 300 sec: 3762.8). Total num frames: 7696384. Throughput: 0: 942.4. Samples: 920580. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:51:35,661][00869] Avg episode reward: [(0, '26.887')] +[2023-02-25 14:51:40,658][00869] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 7716864. Throughput: 0: 986.7. Samples: 927810. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:51:40,666][00869] Avg episode reward: [(0, '25.390')] +[2023-02-25 14:51:43,890][20479] Updated weights for policy 0, policy_version 1888 (0.0022) +[2023-02-25 14:51:45,659][00869] Fps is (10 sec: 4095.9, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 7737344. Throughput: 0: 953.9. Samples: 933540. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-25 14:51:45,666][00869] Avg episode reward: [(0, '23.188')] +[2023-02-25 14:51:50,658][00869] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3762.8). Total num frames: 7749632. Throughput: 0: 935.6. Samples: 935748. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:51:50,664][00869] Avg episode reward: [(0, '22.506')] +[2023-02-25 14:51:55,437][20479] Updated weights for policy 0, policy_version 1898 (0.0015) +[2023-02-25 14:51:55,658][00869] Fps is (10 sec: 3686.5, 60 sec: 3891.2, 300 sec: 3762.8). Total num frames: 7774208. Throughput: 0: 961.9. Samples: 941316. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:51:55,661][00869] Avg episode reward: [(0, '22.045')] +[2023-02-25 14:52:00,658][00869] Fps is (10 sec: 4915.2, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 7798784. Throughput: 0: 992.5. Samples: 948400. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:52:00,661][00869] Avg episode reward: [(0, '21.364')] +[2023-02-25 14:52:05,405][20479] Updated weights for policy 0, policy_version 1908 (0.0013) +[2023-02-25 14:52:05,659][00869] Fps is (10 sec: 4095.6, 60 sec: 3891.5, 300 sec: 3804.4). Total num frames: 7815168. Throughput: 0: 978.9. Samples: 951262. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:52:05,664][00869] Avg episode reward: [(0, '21.200')] +[2023-02-25 14:52:10,660][00869] Fps is (10 sec: 2866.8, 60 sec: 3822.9, 300 sec: 3762.7). Total num frames: 7827456. Throughput: 0: 938.9. Samples: 955708. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:52:10,669][00869] Avg episode reward: [(0, '22.235')] +[2023-02-25 14:52:15,658][00869] Fps is (10 sec: 3686.8, 60 sec: 3891.2, 300 sec: 3776.7). Total num frames: 7852032. Throughput: 0: 973.3. Samples: 961724. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:52:15,661][00869] Avg episode reward: [(0, '25.418')] +[2023-02-25 14:52:16,448][20479] Updated weights for policy 0, policy_version 1918 (0.0016) +[2023-02-25 14:52:20,658][00869] Fps is (10 sec: 4506.2, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 7872512. Throughput: 0: 994.4. Samples: 965330. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:52:20,665][00869] Avg episode reward: [(0, '25.540')] +[2023-02-25 14:52:25,659][00869] Fps is (10 sec: 3686.3, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 7888896. Throughput: 0: 961.2. Samples: 971062. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:52:25,667][00869] Avg episode reward: [(0, '25.835')] +[2023-02-25 14:52:27,552][20479] Updated weights for policy 0, policy_version 1928 (0.0016) +[2023-02-25 14:52:30,658][00869] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 7905280. Throughput: 0: 930.3. Samples: 975404. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-25 14:52:30,661][00869] Avg episode reward: [(0, '26.772')] +[2023-02-25 14:52:35,658][00869] Fps is (10 sec: 3686.5, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 7925760. Throughput: 0: 950.0. Samples: 978498. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:52:35,669][00869] Avg episode reward: [(0, '28.238')] +[2023-02-25 14:52:35,680][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001935_7925760.pth... +[2023-02-25 14:52:35,877][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001711_7008256.pth +[2023-02-25 14:52:37,757][20479] Updated weights for policy 0, policy_version 1938 (0.0014) +[2023-02-25 14:52:40,658][00869] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3832.2). Total num frames: 7950336. Throughput: 0: 983.0. Samples: 985552. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-25 14:52:40,667][00869] Avg episode reward: [(0, '27.272')] +[2023-02-25 14:52:45,659][00869] Fps is (10 sec: 4095.9, 60 sec: 3822.9, 300 sec: 3832.2). Total num frames: 7966720. Throughput: 0: 940.5. Samples: 990724. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-25 14:52:45,670][00869] Avg episode reward: [(0, '26.563')] +[2023-02-25 14:52:49,917][20479] Updated weights for policy 0, policy_version 1948 (0.0019) +[2023-02-25 14:52:50,658][00869] Fps is (10 sec: 2867.2, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 7979008. Throughput: 0: 921.8. Samples: 992740. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-25 14:52:50,661][00869] Avg episode reward: [(0, '26.513')] +[2023-02-25 14:52:55,660][00869] Fps is (10 sec: 3276.2, 60 sec: 3754.5, 300 sec: 3804.4). Total num frames: 7999488. Throughput: 0: 944.1. Samples: 998194. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-25 14:52:55,666][00869] Avg episode reward: [(0, '27.218')] +[2023-02-25 14:52:56,854][20465] Stopping Batcher_0... +[2023-02-25 14:52:56,854][20465] Loop batcher_evt_loop terminating... +[2023-02-25 14:52:56,856][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001955_8007680.pth... +[2023-02-25 14:52:56,855][00869] Component Batcher_0 stopped! +[2023-02-25 14:52:56,904][20479] Weights refcount: 2 0 +[2023-02-25 14:52:56,906][00869] Component InferenceWorker_p0-w0 stopped! +[2023-02-25 14:52:56,912][20479] Stopping InferenceWorker_p0-w0... +[2023-02-25 14:52:56,913][20479] Loop inference_proc0-0_evt_loop terminating... +[2023-02-25 14:52:56,989][20465] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001823_7467008.pth +[2023-02-25 14:52:57,010][20465] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001955_8007680.pth... +[2023-02-25 14:52:57,123][20465] Stopping LearnerWorker_p0... +[2023-02-25 14:52:57,123][20465] Loop learner_proc0_evt_loop terminating... +[2023-02-25 14:52:57,122][00869] Component LearnerWorker_p0 stopped! +[2023-02-25 14:52:57,158][00869] Component RolloutWorker_w5 stopped! +[2023-02-25 14:52:57,164][20480] Stopping RolloutWorker_w0... +[2023-02-25 14:52:57,164][00869] Component RolloutWorker_w0 stopped! +[2023-02-25 14:52:57,169][20492] Stopping RolloutWorker_w5... +[2023-02-25 14:52:57,170][20492] Loop rollout_proc5_evt_loop terminating... +[2023-02-25 14:52:57,172][00869] Component RolloutWorker_w4 stopped! +[2023-02-25 14:52:57,171][20500] Stopping RolloutWorker_w4... +[2023-02-25 14:52:57,176][20500] Loop rollout_proc4_evt_loop terminating... +[2023-02-25 14:52:57,167][20480] Loop rollout_proc0_evt_loop terminating... +[2023-02-25 14:52:57,182][20489] Stopping RolloutWorker_w2... +[2023-02-25 14:52:57,184][00869] Component RolloutWorker_w2 stopped! +[2023-02-25 14:52:57,185][20489] Loop rollout_proc2_evt_loop terminating... +[2023-02-25 14:52:57,191][20498] Stopping RolloutWorker_w6... +[2023-02-25 14:52:57,191][20498] Loop rollout_proc6_evt_loop terminating... +[2023-02-25 14:52:57,192][00869] Component RolloutWorker_w6 stopped! +[2023-02-25 14:52:57,207][20506] Stopping RolloutWorker_w7... +[2023-02-25 14:52:57,207][20506] Loop rollout_proc7_evt_loop terminating... +[2023-02-25 14:52:57,208][00869] Component RolloutWorker_w7 stopped! +[2023-02-25 14:52:57,233][00869] Component RolloutWorker_w3 stopped! +[2023-02-25 14:52:57,242][20490] Stopping RolloutWorker_w3... +[2023-02-25 14:52:57,243][20490] Loop rollout_proc3_evt_loop terminating... +[2023-02-25 14:52:57,259][00869] Component RolloutWorker_w1 stopped! +[2023-02-25 14:52:57,265][00869] Waiting for process learner_proc0 to stop... +[2023-02-25 14:52:57,270][20481] Stopping RolloutWorker_w1... +[2023-02-25 14:52:57,271][20481] Loop rollout_proc1_evt_loop terminating... +[2023-02-25 14:52:59,908][00869] Waiting for process inference_proc0-0 to join... +[2023-02-25 14:52:59,983][00869] Waiting for process rollout_proc0 to join... +[2023-02-25 14:52:59,985][00869] Waiting for process rollout_proc1 to join... +[2023-02-25 14:52:59,988][00869] Waiting for process rollout_proc2 to join... +[2023-02-25 14:52:59,992][00869] Waiting for process rollout_proc3 to join... +[2023-02-25 14:52:59,995][00869] Waiting for process rollout_proc4 to join... +[2023-02-25 14:52:59,999][00869] Waiting for process rollout_proc5 to join... +[2023-02-25 14:53:00,006][00869] Waiting for process rollout_proc6 to join... +[2023-02-25 14:53:00,008][00869] Waiting for process rollout_proc7 to join... +[2023-02-25 14:53:00,010][00869] Batcher 0 profile tree view: +batching: 25.6095, releasing_batches: 0.0239 +[2023-02-25 14:53:00,013][00869] InferenceWorker_p0-w0 profile tree view: +wait_policy: 0.0000 + wait_policy_total: 527.6749 +update_model: 7.4080 + weight_update: 0.0019 +one_step: 0.0081 + handle_policy_step: 502.1283 + deserialize: 14.9062, stack: 2.8832, obs_to_device_normalize: 113.1398, forward: 237.8537, send_messages: 25.6391 + prepare_outputs: 82.7057 + to_cpu: 51.8572 +[2023-02-25 14:53:00,014][00869] Learner 0 profile tree view: +misc: 0.0062, prepare_batch: 18.1951 +train: 79.5026 + epoch_init: 0.0080, minibatch_init: 0.0180, losses_postprocess: 0.6632, kl_divergence: 0.6703, after_optimizer: 3.1131 + calculate_losses: 26.0573 + losses_init: 0.0035, forward_head: 1.8272, bptt_initial: 17.0259, tail: 1.1716, advantages_returns: 0.2958, losses: 3.3208 + bptt: 2.1152 + bptt_forward_core: 2.0204 + update: 48.3142 + clip: 1.4455 +[2023-02-25 14:53:00,015][00869] RolloutWorker_w0 profile tree view: +wait_for_trajectories: 0.3206, enqueue_policy_requests: 137.3416, env_step: 816.8512, overhead: 21.0230, complete_rollouts: 7.2355 +save_policy_outputs: 20.4243 + split_output_tensors: 9.8348 +[2023-02-25 14:53:00,017][00869] RolloutWorker_w7 profile tree view: +wait_for_trajectories: 0.3687, enqueue_policy_requests: 144.2824, env_step: 807.5369, overhead: 20.4800, complete_rollouts: 6.4286 +save_policy_outputs: 19.5378 + split_output_tensors: 9.5174 +[2023-02-25 14:53:00,023][00869] Loop Runner_EvtLoop terminating... +[2023-02-25 14:53:00,024][00869] Runner profile tree view: +main_loop: 1101.9735 +[2023-02-25 14:53:00,025][00869] Collected {0: 8007680}, FPS: 3631.5 +[2023-02-25 14:53:00,069][00869] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-02-25 14:53:00,072][00869] Overriding arg 'num_workers' with value 1 passed from command line +[2023-02-25 14:53:00,073][00869] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-02-25 14:53:00,075][00869] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-02-25 14:53:00,079][00869] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-02-25 14:53:00,082][00869] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-02-25 14:53:00,084][00869] Adding new argument 'max_num_frames'=100000 that is not in the saved config file! +[2023-02-25 14:53:00,085][00869] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-02-25 14:53:00,088][00869] Adding new argument 'push_to_hub'=True that is not in the saved config file! +[2023-02-25 14:53:00,090][00869] Adding new argument 'hf_repository'='chist/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file! +[2023-02-25 14:53:00,091][00869] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-02-25 14:53:00,092][00869] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-02-25 14:53:00,093][00869] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-02-25 14:53:00,095][00869] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-02-25 14:53:00,096][00869] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-02-25 14:53:00,122][00869] RunningMeanStd input shape: (3, 72, 128) +[2023-02-25 14:53:00,125][00869] RunningMeanStd input shape: (1,) +[2023-02-25 14:53:00,142][00869] ConvEncoder: input_channels=3 +[2023-02-25 14:53:00,195][00869] Conv encoder output size: 512 +[2023-02-25 14:53:00,197][00869] Policy head output size: 512 +[2023-02-25 14:53:00,225][00869] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001955_8007680.pth... +[2023-02-25 14:53:01,117][00869] Num frames 100... +[2023-02-25 14:53:01,237][00869] Num frames 200... +[2023-02-25 14:53:01,360][00869] Num frames 300... +[2023-02-25 14:53:01,509][00869] Num frames 400... +[2023-02-25 14:53:01,679][00869] Num frames 500... +[2023-02-25 14:53:01,853][00869] Num frames 600... +[2023-02-25 14:53:02,015][00869] Num frames 700... +[2023-02-25 14:53:02,181][00869] Num frames 800... +[2023-02-25 14:53:02,343][00869] Num frames 900... +[2023-02-25 14:53:02,534][00869] Num frames 1000... +[2023-02-25 14:53:02,697][00869] Num frames 1100... +[2023-02-25 14:53:02,857][00869] Num frames 1200... +[2023-02-25 14:53:03,018][00869] Num frames 1300... +[2023-02-25 14:53:03,184][00869] Num frames 1400... +[2023-02-25 14:53:03,382][00869] Avg episode rewards: #0: 35.850, true rewards: #0: 14.850 +[2023-02-25 14:53:03,384][00869] Avg episode reward: 35.850, avg true_objective: 14.850 +[2023-02-25 14:53:03,410][00869] Num frames 1500... +[2023-02-25 14:53:03,568][00869] Num frames 1600... +[2023-02-25 14:53:03,733][00869] Num frames 1700... +[2023-02-25 14:53:03,896][00869] Num frames 1800... +[2023-02-25 14:53:04,067][00869] Num frames 1900... +[2023-02-25 14:53:04,229][00869] Num frames 2000... +[2023-02-25 14:53:04,396][00869] Num frames 2100... +[2023-02-25 14:53:04,560][00869] Num frames 2200... +[2023-02-25 14:53:04,727][00869] Num frames 2300... +[2023-02-25 14:53:04,899][00869] Num frames 2400... +[2023-02-25 14:53:05,033][00869] Avg episode rewards: #0: 27.725, true rewards: #0: 12.225 +[2023-02-25 14:53:05,035][00869] Avg episode reward: 27.725, avg true_objective: 12.225 +[2023-02-25 14:53:05,104][00869] Num frames 2500... +[2023-02-25 14:53:05,215][00869] Num frames 2600... +[2023-02-25 14:53:05,333][00869] Num frames 2700... +[2023-02-25 14:53:05,409][00869] Avg episode rewards: #0: 20.053, true rewards: #0: 9.053 +[2023-02-25 14:53:05,412][00869] Avg episode reward: 20.053, avg true_objective: 9.053 +[2023-02-25 14:53:05,513][00869] Num frames 2800... +[2023-02-25 14:53:05,625][00869] Num frames 2900... +[2023-02-25 14:53:05,735][00869] Num frames 3000... +[2023-02-25 14:53:05,864][00869] Num frames 3100... +[2023-02-25 14:53:05,978][00869] Num frames 3200... +[2023-02-25 14:53:06,103][00869] Num frames 3300... +[2023-02-25 14:53:06,227][00869] Avg episode rewards: #0: 17.650, true rewards: #0: 8.400 +[2023-02-25 14:53:06,229][00869] Avg episode reward: 17.650, avg true_objective: 8.400 +[2023-02-25 14:53:06,276][00869] Num frames 3400... +[2023-02-25 14:53:06,387][00869] Num frames 3500... +[2023-02-25 14:53:06,509][00869] Num frames 3600... +[2023-02-25 14:53:06,622][00869] Num frames 3700... +[2023-02-25 14:53:06,746][00869] Num frames 3800... +[2023-02-25 14:53:06,863][00869] Num frames 3900... +[2023-02-25 14:53:06,982][00869] Num frames 4000... +[2023-02-25 14:53:07,109][00869] Num frames 4100... +[2023-02-25 14:53:07,219][00869] Num frames 4200... +[2023-02-25 14:53:07,344][00869] Num frames 4300... +[2023-02-25 14:53:07,405][00869] Avg episode rewards: #0: 17.808, true rewards: #0: 8.608 +[2023-02-25 14:53:07,408][00869] Avg episode reward: 17.808, avg true_objective: 8.608 +[2023-02-25 14:53:07,526][00869] Num frames 4400... +[2023-02-25 14:53:07,645][00869] Num frames 4500... +[2023-02-25 14:53:07,766][00869] Num frames 4600... +[2023-02-25 14:53:07,890][00869] Num frames 4700... +[2023-02-25 14:53:08,009][00869] Num frames 4800... +[2023-02-25 14:53:08,127][00869] Num frames 4900... +[2023-02-25 14:53:08,244][00869] Num frames 5000... +[2023-02-25 14:53:08,358][00869] Num frames 5100... +[2023-02-25 14:53:08,479][00869] Num frames 5200... +[2023-02-25 14:53:08,601][00869] Num frames 5300... +[2023-02-25 14:53:08,723][00869] Num frames 5400... +[2023-02-25 14:53:08,838][00869] Num frames 5500... +[2023-02-25 14:53:08,962][00869] Num frames 5600... +[2023-02-25 14:53:09,088][00869] Num frames 5700... +[2023-02-25 14:53:09,204][00869] Num frames 5800... +[2023-02-25 14:53:09,319][00869] Num frames 5900... +[2023-02-25 14:53:09,441][00869] Avg episode rewards: #0: 21.098, true rewards: #0: 9.932 +[2023-02-25 14:53:09,443][00869] Avg episode reward: 21.098, avg true_objective: 9.932 +[2023-02-25 14:53:09,493][00869] Num frames 6000... +[2023-02-25 14:53:09,607][00869] Num frames 6100... +[2023-02-25 14:53:09,726][00869] Num frames 6200... +[2023-02-25 14:53:09,843][00869] Num frames 6300... +[2023-02-25 14:53:09,957][00869] Num frames 6400... +[2023-02-25 14:53:10,077][00869] Num frames 6500... +[2023-02-25 14:53:10,195][00869] Num frames 6600... +[2023-02-25 14:53:10,309][00869] Num frames 6700... +[2023-02-25 14:53:10,428][00869] Num frames 6800... +[2023-02-25 14:53:10,542][00869] Num frames 6900... +[2023-02-25 14:53:10,672][00869] Num frames 7000... +[2023-02-25 14:53:10,784][00869] Num frames 7100... +[2023-02-25 14:53:10,897][00869] Avg episode rewards: #0: 22.643, true rewards: #0: 10.214 +[2023-02-25 14:53:10,898][00869] Avg episode reward: 22.643, avg true_objective: 10.214 +[2023-02-25 14:53:10,959][00869] Num frames 7200... +[2023-02-25 14:53:11,075][00869] Num frames 7300... +[2023-02-25 14:53:11,200][00869] Num frames 7400... +[2023-02-25 14:53:11,314][00869] Num frames 7500... +[2023-02-25 14:53:11,433][00869] Num frames 7600... +[2023-02-25 14:53:11,548][00869] Num frames 7700... +[2023-02-25 14:53:11,662][00869] Num frames 7800... +[2023-02-25 14:53:11,785][00869] Num frames 7900... +[2023-02-25 14:53:11,898][00869] Avg episode rewards: #0: 21.688, true rewards: #0: 9.937 +[2023-02-25 14:53:11,900][00869] Avg episode reward: 21.688, avg true_objective: 9.937 +[2023-02-25 14:53:11,964][00869] Num frames 8000... +[2023-02-25 14:53:12,085][00869] Num frames 8100... +[2023-02-25 14:53:12,210][00869] Num frames 8200... +[2023-02-25 14:53:12,329][00869] Avg episode rewards: #0: 19.840, true rewards: #0: 9.173 +[2023-02-25 14:53:12,330][00869] Avg episode reward: 19.840, avg true_objective: 9.173 +[2023-02-25 14:53:12,386][00869] Num frames 8300... +[2023-02-25 14:53:12,507][00869] Num frames 8400... +[2023-02-25 14:53:12,619][00869] Num frames 8500... +[2023-02-25 14:53:12,754][00869] Num frames 8600... +[2023-02-25 14:53:12,877][00869] Num frames 8700... +[2023-02-25 14:53:12,997][00869] Num frames 8800... +[2023-02-25 14:53:13,114][00869] Num frames 8900... +[2023-02-25 14:53:13,243][00869] Num frames 9000... +[2023-02-25 14:53:13,359][00869] Num frames 9100... +[2023-02-25 14:53:13,473][00869] Num frames 9200... +[2023-02-25 14:53:13,595][00869] Num frames 9300... +[2023-02-25 14:53:13,715][00869] Num frames 9400... +[2023-02-25 14:53:13,832][00869] Num frames 9500... +[2023-02-25 14:53:13,891][00869] Avg episode rewards: #0: 21.101, true rewards: #0: 9.501 +[2023-02-25 14:53:13,892][00869] Avg episode reward: 21.101, avg true_objective: 9.501 +[2023-02-25 14:54:12,964][00869] Replay video saved to /content/train_dir/default_experiment/replay.mp4!