diff --git "a/sf_log.txt" "b/sf_log.txt" --- "a/sf_log.txt" +++ "b/sf_log.txt" @@ -981,3 +981,1426 @@ main_loop: 1176.2878 [2023-02-26 16:20:08,819][00820] Avg episode rewards: #0: 5.024, true rewards: #0: 4.224 [2023-02-26 16:20:08,822][00820] Avg episode reward: 5.024, avg true_objective: 4.224 [2023-02-26 16:20:31,279][00820] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2023-02-26 16:20:44,578][00820] The model has been pushed to https://huggingface.co/mlewand/rl_course_vizdoom_health_gathering_supreme +[2023-02-26 16:22:14,115][00820] Environment doom_basic already registered, overwriting... +[2023-02-26 16:22:14,118][00820] Environment doom_two_colors_easy already registered, overwriting... +[2023-02-26 16:22:14,121][00820] Environment doom_two_colors_hard already registered, overwriting... +[2023-02-26 16:22:14,122][00820] Environment doom_dm already registered, overwriting... +[2023-02-26 16:22:14,124][00820] Environment doom_dwango5 already registered, overwriting... +[2023-02-26 16:22:14,127][00820] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-02-26 16:22:14,128][00820] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-02-26 16:22:14,129][00820] Environment doom_my_way_home already registered, overwriting... +[2023-02-26 16:22:14,131][00820] Environment doom_deadly_corridor already registered, overwriting... +[2023-02-26 16:22:14,133][00820] Environment doom_defend_the_center already registered, overwriting... +[2023-02-26 16:22:14,134][00820] Environment doom_defend_the_line already registered, overwriting... +[2023-02-26 16:22:14,136][00820] Environment doom_health_gathering already registered, overwriting... +[2023-02-26 16:22:14,138][00820] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-02-26 16:22:14,139][00820] Environment doom_battle already registered, overwriting... +[2023-02-26 16:22:14,141][00820] Environment doom_battle2 already registered, overwriting... +[2023-02-26 16:22:14,143][00820] Environment doom_duel_bots already registered, overwriting... +[2023-02-26 16:22:14,145][00820] Environment doom_deathmatch_bots already registered, overwriting... +[2023-02-26 16:22:14,147][00820] Environment doom_duel already registered, overwriting... +[2023-02-26 16:22:14,148][00820] Environment doom_deathmatch_full already registered, overwriting... +[2023-02-26 16:22:14,150][00820] Environment doom_benchmark already registered, overwriting... +[2023-02-26 16:22:14,152][00820] register_encoder_factory: +[2023-02-26 16:22:14,180][00820] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-02-26 16:22:14,182][00820] Overriding arg 'train_for_env_steps' with value 10000000 passed from command line +[2023-02-26 16:22:14,187][00820] Experiment dir /content/train_dir/default_experiment already exists! +[2023-02-26 16:22:14,192][00820] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-02-26 16:22:14,194][00820] Weights and Biases integration disabled +[2023-02-26 16:22:14,197][00820] Environment var CUDA_VISIBLE_DEVICES is 0 + +[2023-02-26 16:22:15,553][00820] Starting experiment with the following configuration: +help=False +algo=APPO +env=doom_health_gathering_supreme +experiment=default_experiment +train_dir=/content/train_dir +restart_behavior=resume +device=gpu +seed=None +num_policies=1 +async_rl=True +serial_mode=False +batched_sampling=False +num_batches_to_accumulate=2 +worker_num_splits=2 +policy_workers_per_policy=1 +max_policy_lag=1000 +num_workers=8 +num_envs_per_worker=4 +batch_size=1024 +num_batches_per_epoch=1 +num_epochs=1 +rollout=32 +recurrence=32 +shuffle_minibatches=False +gamma=0.99 +reward_scale=1.0 +reward_clip=1000.0 +value_bootstrap=False +normalize_returns=True +exploration_loss_coeff=0.001 +value_loss_coeff=0.5 +kl_loss_coeff=0.0 +exploration_loss=symmetric_kl +gae_lambda=0.95 +ppo_clip_ratio=0.1 +ppo_clip_value=0.2 +with_vtrace=False +vtrace_rho=1.0 +vtrace_c=1.0 +optimizer=adam +adam_eps=1e-06 +adam_beta1=0.9 +adam_beta2=0.999 +max_grad_norm=4.0 +learning_rate=0.0001 +lr_schedule=constant +lr_schedule_kl_threshold=0.008 +lr_adaptive_min=1e-06 +lr_adaptive_max=0.01 +obs_subtract_mean=0.0 +obs_scale=255.0 +normalize_input=True +normalize_input_keys=None +decorrelate_experience_max_seconds=0 +decorrelate_envs_on_one_worker=True +actor_worker_gpus=[] +set_workers_cpu_affinity=True +force_envs_single_thread=False +default_niceness=0 +log_to_file=True +experiment_summaries_interval=10 +flush_summaries_interval=30 +stats_avg=100 +summaries_use_frameskip=True +heartbeat_interval=20 +heartbeat_reporting_interval=600 +train_for_env_steps=10000000 +train_for_seconds=10000000000 +save_every_sec=120 +keep_checkpoints=2 +load_checkpoint_kind=latest +save_milestones_sec=-1 +save_best_every_sec=5 +save_best_metric=reward +save_best_after=100000 +benchmark=False +encoder_mlp_layers=[512, 512] +encoder_conv_architecture=convnet_simple +encoder_conv_mlp_layers=[512] +use_rnn=True +rnn_size=512 +rnn_type=gru +rnn_num_layers=1 +decoder_mlp_layers=[] +nonlinearity=elu +policy_initialization=orthogonal +policy_init_gain=1.0 +actor_critic_share_weights=True +adaptive_stddev=True +continuous_tanh_scale=0.0 +initial_stddev=1.0 +use_env_info_cache=False +env_gpu_actions=False +env_gpu_observations=True +env_frameskip=4 +env_framestack=1 +pixel_format=CHW +use_record_episode_statistics=False +with_wandb=False +wandb_user=None +wandb_project=sample_factory +wandb_group=None +wandb_job_type=SF +wandb_tags=[] +with_pbt=False +pbt_mix_policies_in_one_env=True +pbt_period_env_steps=5000000 +pbt_start_mutation=20000000 +pbt_replace_fraction=0.3 +pbt_mutation_rate=0.15 +pbt_replace_reward_gap=0.1 +pbt_replace_reward_gap_absolute=1e-06 +pbt_optimize_gamma=False +pbt_target_objective=true_objective +pbt_perturb_min=1.1 +pbt_perturb_max=1.5 +num_agents=-1 +num_humans=0 +num_bots=-1 +start_bot_difficulty=None +timelimit=None +res_w=128 +res_h=72 +wide_aspect_ratio=False +eval_env_frameskip=1 +fps=35 +command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000 +cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 4000000} +git_hash=unknown +git_repo_name=not a git repository +[2023-02-26 16:22:15,555][00820] Saving configuration to /content/train_dir/default_experiment/config.json... +[2023-02-26 16:22:15,560][00820] Rollout worker 0 uses device cpu +[2023-02-26 16:22:15,562][00820] Rollout worker 1 uses device cpu +[2023-02-26 16:22:15,566][00820] Rollout worker 2 uses device cpu +[2023-02-26 16:22:15,567][00820] Rollout worker 3 uses device cpu +[2023-02-26 16:22:15,568][00820] Rollout worker 4 uses device cpu +[2023-02-26 16:22:15,569][00820] Rollout worker 5 uses device cpu +[2023-02-26 16:22:15,571][00820] Rollout worker 6 uses device cpu +[2023-02-26 16:22:15,572][00820] Rollout worker 7 uses device cpu +[2023-02-26 16:22:15,688][00820] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-26 16:22:15,690][00820] InferenceWorker_p0-w0: min num requests: 2 +[2023-02-26 16:22:15,725][00820] Starting all processes... +[2023-02-26 16:22:15,726][00820] Starting process learner_proc0 +[2023-02-26 16:22:15,866][00820] Starting all processes... +[2023-02-26 16:22:15,876][00820] Starting process inference_proc0-0 +[2023-02-26 16:22:15,876][00820] Starting process rollout_proc0 +[2023-02-26 16:22:15,885][00820] Starting process rollout_proc1 +[2023-02-26 16:22:15,885][00820] Starting process rollout_proc2 +[2023-02-26 16:22:15,886][00820] Starting process rollout_proc3 +[2023-02-26 16:22:15,886][00820] Starting process rollout_proc4 +[2023-02-26 16:22:15,886][00820] Starting process rollout_proc5 +[2023-02-26 16:22:15,886][00820] Starting process rollout_proc6 +[2023-02-26 16:22:15,886][00820] Starting process rollout_proc7 +[2023-02-26 16:22:25,390][19466] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-26 16:22:25,398][19466] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-02-26 16:22:25,510][19466] Num visible devices: 1 +[2023-02-26 16:22:25,574][19466] Starting seed is not provided +[2023-02-26 16:22:25,574][19466] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-26 16:22:25,574][19466] Initializing actor-critic model on device cuda:0 +[2023-02-26 16:22:25,575][19466] RunningMeanStd input shape: (3, 72, 128) +[2023-02-26 16:22:25,581][19466] RunningMeanStd input shape: (1,) +[2023-02-26 16:22:25,740][19466] ConvEncoder: input_channels=3 +[2023-02-26 16:22:27,229][19466] Conv encoder output size: 512 +[2023-02-26 16:22:27,233][19466] Policy head output size: 512 +[2023-02-26 16:22:27,391][19466] Created Actor Critic model with architecture: +[2023-02-26 16:22:27,405][19466] ActorCriticSharedWeights( + (obs_normalizer): ObservationNormalizer( + (running_mean_std): RunningMeanStdDictInPlace( + (running_mean_std): ModuleDict( + (obs): RunningMeanStdInPlace() + ) + ) + ) + (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace) + (encoder): VizdoomEncoder( + (basic_encoder): ConvEncoder( + (enc): RecursiveScriptModule( + original_name=ConvEncoderImpl + (conv_head): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Conv2d) + (1): RecursiveScriptModule(original_name=ELU) + (2): RecursiveScriptModule(original_name=Conv2d) + (3): RecursiveScriptModule(original_name=ELU) + (4): RecursiveScriptModule(original_name=Conv2d) + (5): RecursiveScriptModule(original_name=ELU) + ) + (mlp_layers): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Linear) + (1): RecursiveScriptModule(original_name=ELU) + ) + ) + ) + ) + (core): ModelCoreRNN( + (core): GRU(512, 512) + ) + (decoder): MlpDecoder( + (mlp): Identity() + ) + (critic_linear): Linear(in_features=512, out_features=1, bias=True) + (action_parameterization): ActionParameterizationDefault( + (distribution_linear): Linear(in_features=512, out_features=5, bias=True) + ) +) +[2023-02-26 16:22:27,572][19480] Worker 0 uses CPU cores [0] +[2023-02-26 16:22:27,859][19481] Worker 1 uses CPU cores [1] +[2023-02-26 16:22:28,232][19482] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-26 16:22:28,232][19482] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-02-26 16:22:28,295][19482] Num visible devices: 1 +[2023-02-26 16:22:28,447][19488] Worker 2 uses CPU cores [0] +[2023-02-26 16:22:28,529][19490] Worker 3 uses CPU cores [1] +[2023-02-26 16:22:28,612][19493] Worker 4 uses CPU cores [0] +[2023-02-26 16:22:28,783][19495] Worker 6 uses CPU cores [0] +[2023-02-26 16:22:28,871][19503] Worker 7 uses CPU cores [1] +[2023-02-26 16:22:28,903][19497] Worker 5 uses CPU cores [1] +[2023-02-26 16:22:30,843][19466] Using optimizer +[2023-02-26 16:22:30,844][19466] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth... +[2023-02-26 16:22:30,880][19466] Loading model from checkpoint +[2023-02-26 16:22:30,885][19466] Loaded experiment state at self.train_step=978, self.env_steps=4005888 +[2023-02-26 16:22:30,885][19466] Initialized policy 0 weights for model version 978 +[2023-02-26 16:22:30,888][19466] LearnerWorker_p0 finished initialization! +[2023-02-26 16:22:30,888][19466] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-02-26 16:22:31,007][19482] RunningMeanStd input shape: (3, 72, 128) +[2023-02-26 16:22:31,009][19482] RunningMeanStd input shape: (1,) +[2023-02-26 16:22:31,026][19482] ConvEncoder: input_channels=3 +[2023-02-26 16:22:31,127][19482] Conv encoder output size: 512 +[2023-02-26 16:22:31,127][19482] Policy head output size: 512 +[2023-02-26 16:22:33,481][00820] Inference worker 0-0 is ready! +[2023-02-26 16:22:33,486][00820] All inference workers are ready! Signal rollout workers to start! +[2023-02-26 16:22:33,624][19503] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:33,626][19490] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:33,628][19481] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:33,657][19495] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:33,661][19480] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:33,672][19493] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:33,673][19488] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:33,667][19497] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-02-26 16:22:34,198][00820] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 4005888. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-26 16:22:34,859][19493] Decorrelating experience for 0 frames... +[2023-02-26 16:22:34,861][19480] Decorrelating experience for 0 frames... +[2023-02-26 16:22:34,863][19495] Decorrelating experience for 0 frames... +[2023-02-26 16:22:35,171][19490] Decorrelating experience for 0 frames... +[2023-02-26 16:22:35,182][19481] Decorrelating experience for 0 frames... +[2023-02-26 16:22:35,191][19503] Decorrelating experience for 0 frames... +[2023-02-26 16:22:35,219][19497] Decorrelating experience for 0 frames... +[2023-02-26 16:22:35,681][00820] Heartbeat connected on Batcher_0 +[2023-02-26 16:22:35,686][00820] Heartbeat connected on LearnerWorker_p0 +[2023-02-26 16:22:35,717][00820] Heartbeat connected on InferenceWorker_p0-w0 +[2023-02-26 16:22:35,955][19480] Decorrelating experience for 32 frames... +[2023-02-26 16:22:35,966][19495] Decorrelating experience for 32 frames... +[2023-02-26 16:22:36,295][19490] Decorrelating experience for 32 frames... +[2023-02-26 16:22:36,292][19481] Decorrelating experience for 32 frames... +[2023-02-26 16:22:36,420][19503] Decorrelating experience for 32 frames... +[2023-02-26 16:22:36,455][19493] Decorrelating experience for 32 frames... +[2023-02-26 16:22:36,502][19488] Decorrelating experience for 0 frames... +[2023-02-26 16:22:37,022][19497] Decorrelating experience for 32 frames... +[2023-02-26 16:22:37,243][19490] Decorrelating experience for 64 frames... +[2023-02-26 16:22:37,353][19488] Decorrelating experience for 32 frames... +[2023-02-26 16:22:37,588][19493] Decorrelating experience for 64 frames... +[2023-02-26 16:22:38,380][19503] Decorrelating experience for 64 frames... +[2023-02-26 16:22:38,443][19480] Decorrelating experience for 64 frames... +[2023-02-26 16:22:38,546][19490] Decorrelating experience for 96 frames... +[2023-02-26 16:22:38,815][19495] Decorrelating experience for 64 frames... +[2023-02-26 16:22:38,913][00820] Heartbeat connected on RolloutWorker_w3 +[2023-02-26 16:22:39,198][00820] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-26 16:22:39,415][19493] Decorrelating experience for 96 frames... +[2023-02-26 16:22:39,504][19497] Decorrelating experience for 64 frames... +[2023-02-26 16:22:39,885][00820] Heartbeat connected on RolloutWorker_w4 +[2023-02-26 16:22:41,028][19488] Decorrelating experience for 64 frames... +[2023-02-26 16:22:41,085][19481] Decorrelating experience for 64 frames... +[2023-02-26 16:22:41,106][19503] Decorrelating experience for 96 frames... +[2023-02-26 16:22:41,675][00820] Heartbeat connected on RolloutWorker_w7 +[2023-02-26 16:22:42,936][19497] Decorrelating experience for 96 frames... +[2023-02-26 16:22:43,711][00820] Heartbeat connected on RolloutWorker_w5 +[2023-02-26 16:22:43,725][19480] Decorrelating experience for 96 frames... +[2023-02-26 16:22:43,730][19495] Decorrelating experience for 96 frames... +[2023-02-26 16:22:43,907][19488] Decorrelating experience for 96 frames... +[2023-02-26 16:22:44,198][00820] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 46.2. Samples: 462. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-26 16:22:44,208][00820] Avg episode reward: [(0, '2.737')] +[2023-02-26 16:22:44,304][00820] Heartbeat connected on RolloutWorker_w6 +[2023-02-26 16:22:44,309][00820] Heartbeat connected on RolloutWorker_w0 +[2023-02-26 16:22:44,523][00820] Heartbeat connected on RolloutWorker_w2 +[2023-02-26 16:22:45,101][19481] Decorrelating experience for 96 frames... +[2023-02-26 16:22:46,017][00820] Heartbeat connected on RolloutWorker_w1 +[2023-02-26 16:22:47,346][19466] Signal inference workers to stop experience collection... +[2023-02-26 16:22:47,399][19482] InferenceWorker_p0-w0: stopping experience collection +[2023-02-26 16:22:49,198][00820] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 4005888. Throughput: 0: 169.3. Samples: 2540. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-02-26 16:22:49,200][00820] Avg episode reward: [(0, '2.879')] +[2023-02-26 16:22:50,318][19466] Signal inference workers to resume experience collection... +[2023-02-26 16:22:50,321][19482] InferenceWorker_p0-w0: resuming experience collection +[2023-02-26 16:22:54,198][00820] Fps is (10 sec: 2048.0, 60 sec: 1024.0, 300 sec: 1024.0). Total num frames: 4026368. Throughput: 0: 172.4. Samples: 3448. Policy #0 lag: (min: 0.0, avg: 1.1, max: 3.0) +[2023-02-26 16:22:54,205][00820] Avg episode reward: [(0, '3.833')] +[2023-02-26 16:22:59,202][00820] Fps is (10 sec: 3684.8, 60 sec: 1474.3, 300 sec: 1474.3). Total num frames: 4042752. Throughput: 0: 373.2. Samples: 9332. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:22:59,207][00820] Avg episode reward: [(0, '4.333')] +[2023-02-26 16:22:59,767][19482] Updated weights for policy 0, policy_version 988 (0.0016) +[2023-02-26 16:23:04,199][00820] Fps is (10 sec: 2866.9, 60 sec: 1638.3, 300 sec: 1638.3). Total num frames: 4055040. Throughput: 0: 448.9. Samples: 13466. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:23:04,204][00820] Avg episode reward: [(0, '4.617')] +[2023-02-26 16:23:09,198][00820] Fps is (10 sec: 2868.4, 60 sec: 1872.5, 300 sec: 1872.5). Total num frames: 4071424. Throughput: 0: 443.9. Samples: 15538. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:23:09,203][00820] Avg episode reward: [(0, '4.593')] +[2023-02-26 16:23:12,691][19482] Updated weights for policy 0, policy_version 998 (0.0023) +[2023-02-26 16:23:14,198][00820] Fps is (10 sec: 3686.7, 60 sec: 2150.4, 300 sec: 2150.4). Total num frames: 4091904. Throughput: 0: 521.5. Samples: 20862. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:23:14,200][00820] Avg episode reward: [(0, '4.600')] +[2023-02-26 16:23:19,199][00820] Fps is (10 sec: 4095.6, 60 sec: 2366.5, 300 sec: 2366.5). Total num frames: 4112384. Throughput: 0: 605.4. Samples: 27244. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:23:19,205][00820] Avg episode reward: [(0, '4.570')] +[2023-02-26 16:23:24,198][00820] Fps is (10 sec: 3276.9, 60 sec: 2375.7, 300 sec: 2375.7). Total num frames: 4124672. Throughput: 0: 653.0. Samples: 29384. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-26 16:23:24,201][00820] Avg episode reward: [(0, '4.439')] +[2023-02-26 16:23:24,350][19482] Updated weights for policy 0, policy_version 1008 (0.0013) +[2023-02-26 16:23:29,198][00820] Fps is (10 sec: 2867.5, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 4141056. Throughput: 0: 731.0. Samples: 33358. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:23:29,204][00820] Avg episode reward: [(0, '4.387')] +[2023-02-26 16:23:34,198][00820] Fps is (10 sec: 3686.4, 60 sec: 2594.1, 300 sec: 2594.1). Total num frames: 4161536. Throughput: 0: 805.8. Samples: 38800. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:23:34,200][00820] Avg episode reward: [(0, '4.501')] +[2023-02-26 16:23:36,065][19482] Updated weights for policy 0, policy_version 1018 (0.0013) +[2023-02-26 16:23:39,198][00820] Fps is (10 sec: 4096.0, 60 sec: 2935.5, 300 sec: 2709.7). Total num frames: 4182016. Throughput: 0: 855.9. Samples: 41962. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:23:39,206][00820] Avg episode reward: [(0, '4.848')] +[2023-02-26 16:23:44,201][00820] Fps is (10 sec: 3685.1, 60 sec: 3208.4, 300 sec: 2750.0). Total num frames: 4198400. Throughput: 0: 847.7. Samples: 47476. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:23:44,205][00820] Avg episode reward: [(0, '4.815')] +[2023-02-26 16:23:48,634][19482] Updated weights for policy 0, policy_version 1028 (0.0015) +[2023-02-26 16:23:49,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3413.3, 300 sec: 2730.7). Total num frames: 4210688. Throughput: 0: 846.2. Samples: 51544. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:23:49,201][00820] Avg episode reward: [(0, '4.808')] +[2023-02-26 16:23:54,198][00820] Fps is (10 sec: 3277.9, 60 sec: 3413.3, 300 sec: 2816.0). Total num frames: 4231168. Throughput: 0: 863.3. Samples: 54386. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:23:54,200][00820] Avg episode reward: [(0, '4.789')] +[2023-02-26 16:23:58,162][19482] Updated weights for policy 0, policy_version 1038 (0.0020) +[2023-02-26 16:23:59,198][00820] Fps is (10 sec: 4505.7, 60 sec: 3550.1, 300 sec: 2939.5). Total num frames: 4255744. Throughput: 0: 893.3. Samples: 61062. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:23:59,205][00820] Avg episode reward: [(0, '4.833')] +[2023-02-26 16:24:04,198][00820] Fps is (10 sec: 3686.2, 60 sec: 3549.9, 300 sec: 2912.7). Total num frames: 4268032. Throughput: 0: 860.3. Samples: 65958. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:24:04,204][00820] Avg episode reward: [(0, '4.662')] +[2023-02-26 16:24:09,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 2888.7). Total num frames: 4280320. Throughput: 0: 858.9. Samples: 68034. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:24:09,206][00820] Avg episode reward: [(0, '4.546')] +[2023-02-26 16:24:11,760][19482] Updated weights for policy 0, policy_version 1048 (0.0027) +[2023-02-26 16:24:14,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3481.6, 300 sec: 2949.1). Total num frames: 4300800. Throughput: 0: 880.7. Samples: 72988. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:24:14,203][00820] Avg episode reward: [(0, '4.529')] +[2023-02-26 16:24:14,212][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001050_4300800.pth... +[2023-02-26 16:24:14,374][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000894_3661824.pth +[2023-02-26 16:24:19,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3481.7, 300 sec: 3003.7). Total num frames: 4321280. Throughput: 0: 906.5. Samples: 79594. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:24:19,200][00820] Avg episode reward: [(0, '4.657')] +[2023-02-26 16:24:21,678][19482] Updated weights for policy 0, policy_version 1058 (0.0013) +[2023-02-26 16:24:24,205][00820] Fps is (10 sec: 3683.7, 60 sec: 3549.4, 300 sec: 3015.9). Total num frames: 4337664. Throughput: 0: 900.8. Samples: 82506. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:24:24,208][00820] Avg episode reward: [(0, '4.826')] +[2023-02-26 16:24:29,199][00820] Fps is (10 sec: 3276.4, 60 sec: 3549.8, 300 sec: 3027.4). Total num frames: 4354048. Throughput: 0: 870.5. Samples: 86646. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:24:29,211][00820] Avg episode reward: [(0, '4.657')] +[2023-02-26 16:24:34,198][00820] Fps is (10 sec: 3279.2, 60 sec: 3481.6, 300 sec: 3037.9). Total num frames: 4370432. Throughput: 0: 889.5. Samples: 91572. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:24:34,203][00820] Avg episode reward: [(0, '4.784')] +[2023-02-26 16:24:34,639][19482] Updated weights for policy 0, policy_version 1068 (0.0017) +[2023-02-26 16:24:39,198][00820] Fps is (10 sec: 3686.8, 60 sec: 3481.6, 300 sec: 3080.2). Total num frames: 4390912. Throughput: 0: 896.0. Samples: 94708. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:24:39,206][00820] Avg episode reward: [(0, '4.728')] +[2023-02-26 16:24:44,199][00820] Fps is (10 sec: 3686.0, 60 sec: 3481.7, 300 sec: 3087.7). Total num frames: 4407296. Throughput: 0: 879.8. Samples: 100656. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:24:44,203][00820] Avg episode reward: [(0, '4.529')] +[2023-02-26 16:24:45,854][19482] Updated weights for policy 0, policy_version 1078 (0.0015) +[2023-02-26 16:24:49,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3094.8). Total num frames: 4423680. Throughput: 0: 863.2. Samples: 104802. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:24:49,200][00820] Avg episode reward: [(0, '4.483')] +[2023-02-26 16:24:54,198][00820] Fps is (10 sec: 3277.2, 60 sec: 3481.6, 300 sec: 3101.3). Total num frames: 4440064. Throughput: 0: 865.1. Samples: 106964. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:24:54,201][00820] Avg episode reward: [(0, '4.641')] +[2023-02-26 16:24:57,415][19482] Updated weights for policy 0, policy_version 1088 (0.0034) +[2023-02-26 16:24:59,202][00820] Fps is (10 sec: 3685.0, 60 sec: 3413.1, 300 sec: 3135.5). Total num frames: 4460544. Throughput: 0: 896.1. Samples: 113318. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:24:59,204][00820] Avg episode reward: [(0, '4.680')] +[2023-02-26 16:25:04,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3140.3). Total num frames: 4476928. Throughput: 0: 871.0. Samples: 118790. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:25:04,203][00820] Avg episode reward: [(0, '4.530')] +[2023-02-26 16:25:09,198][00820] Fps is (10 sec: 3278.1, 60 sec: 3549.9, 300 sec: 3144.7). Total num frames: 4493312. Throughput: 0: 852.0. Samples: 120838. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:25:09,200][00820] Avg episode reward: [(0, '4.545')] +[2023-02-26 16:25:10,149][19482] Updated weights for policy 0, policy_version 1098 (0.0019) +[2023-02-26 16:25:14,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3481.6, 300 sec: 3148.8). Total num frames: 4509696. Throughput: 0: 857.3. Samples: 125222. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:25:14,205][00820] Avg episode reward: [(0, '4.616')] +[2023-02-26 16:25:19,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3177.5). Total num frames: 4530176. Throughput: 0: 891.2. Samples: 131674. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:25:19,200][00820] Avg episode reward: [(0, '4.760')] +[2023-02-26 16:25:20,325][19482] Updated weights for policy 0, policy_version 1108 (0.0020) +[2023-02-26 16:25:24,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3550.3, 300 sec: 3204.5). Total num frames: 4550656. Throughput: 0: 892.4. Samples: 134866. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:25:24,201][00820] Avg episode reward: [(0, '4.685')] +[2023-02-26 16:25:29,199][00820] Fps is (10 sec: 3276.4, 60 sec: 3481.6, 300 sec: 3183.2). Total num frames: 4562944. Throughput: 0: 857.3. Samples: 139236. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:25:29,207][00820] Avg episode reward: [(0, '4.833')] +[2023-02-26 16:25:33,644][19482] Updated weights for policy 0, policy_version 1118 (0.0032) +[2023-02-26 16:25:34,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3481.6, 300 sec: 3185.8). Total num frames: 4579328. Throughput: 0: 866.0. Samples: 143772. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:25:34,204][00820] Avg episode reward: [(0, '4.790')] +[2023-02-26 16:25:39,198][00820] Fps is (10 sec: 4096.6, 60 sec: 3549.9, 300 sec: 3232.5). Total num frames: 4603904. Throughput: 0: 891.2. Samples: 147066. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:25:39,201][00820] Avg episode reward: [(0, '4.444')] +[2023-02-26 16:25:43,462][19482] Updated weights for policy 0, policy_version 1128 (0.0012) +[2023-02-26 16:25:44,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3549.9, 300 sec: 3233.7). Total num frames: 4620288. Throughput: 0: 896.0. Samples: 153636. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:25:44,208][00820] Avg episode reward: [(0, '4.416')] +[2023-02-26 16:25:49,198][00820] Fps is (10 sec: 3276.6, 60 sec: 3549.9, 300 sec: 3234.8). Total num frames: 4636672. Throughput: 0: 867.1. Samples: 157810. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:25:49,202][00820] Avg episode reward: [(0, '4.363')] +[2023-02-26 16:25:54,198][00820] Fps is (10 sec: 2867.4, 60 sec: 3481.6, 300 sec: 3215.4). Total num frames: 4648960. Throughput: 0: 867.8. Samples: 159888. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:25:54,201][00820] Avg episode reward: [(0, '4.553')] +[2023-02-26 16:25:56,320][19482] Updated weights for policy 0, policy_version 1138 (0.0020) +[2023-02-26 16:25:59,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3550.1, 300 sec: 3256.8). Total num frames: 4673536. Throughput: 0: 899.9. Samples: 165716. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:25:59,201][00820] Avg episode reward: [(0, '4.780')] +[2023-02-26 16:26:04,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3257.3). Total num frames: 4689920. Throughput: 0: 892.0. Samples: 171814. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:26:04,204][00820] Avg episode reward: [(0, '4.646')] +[2023-02-26 16:26:07,714][19482] Updated weights for policy 0, policy_version 1148 (0.0023) +[2023-02-26 16:26:09,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3257.7). Total num frames: 4706304. Throughput: 0: 866.2. Samples: 173846. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:26:09,203][00820] Avg episode reward: [(0, '4.633')] +[2023-02-26 16:26:14,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3239.6). Total num frames: 4718592. Throughput: 0: 864.3. Samples: 178130. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:26:14,201][00820] Avg episode reward: [(0, '4.593')] +[2023-02-26 16:26:14,270][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001153_4722688.pth... +[2023-02-26 16:26:14,435][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth +[2023-02-26 16:26:19,159][19482] Updated weights for policy 0, policy_version 1158 (0.0015) +[2023-02-26 16:26:19,198][00820] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3276.8). Total num frames: 4743168. Throughput: 0: 899.3. Samples: 184238. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:26:19,201][00820] Avg episode reward: [(0, '4.500')] +[2023-02-26 16:26:24,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3549.9, 300 sec: 3294.6). Total num frames: 4763648. Throughput: 0: 898.0. Samples: 187476. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:26:24,201][00820] Avg episode reward: [(0, '4.332')] +[2023-02-26 16:26:29,201][00820] Fps is (10 sec: 3275.7, 60 sec: 3549.7, 300 sec: 3276.8). Total num frames: 4775936. Throughput: 0: 859.4. Samples: 192310. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:26:29,204][00820] Avg episode reward: [(0, '4.475')] +[2023-02-26 16:26:31,657][19482] Updated weights for policy 0, policy_version 1168 (0.0016) +[2023-02-26 16:26:34,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 3259.7). Total num frames: 4788224. Throughput: 0: 859.4. Samples: 196482. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:26:34,205][00820] Avg episode reward: [(0, '4.498')] +[2023-02-26 16:26:39,198][00820] Fps is (10 sec: 3687.6, 60 sec: 3481.6, 300 sec: 3293.5). Total num frames: 4812800. Throughput: 0: 884.8. Samples: 199702. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:26:39,203][00820] Avg episode reward: [(0, '4.814')] +[2023-02-26 16:26:41,688][19482] Updated weights for policy 0, policy_version 1178 (0.0029) +[2023-02-26 16:26:44,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3549.9, 300 sec: 3309.6). Total num frames: 4833280. Throughput: 0: 903.9. Samples: 206390. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:26:44,201][00820] Avg episode reward: [(0, '4.502')] +[2023-02-26 16:26:49,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3308.9). Total num frames: 4849664. Throughput: 0: 872.4. Samples: 211070. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:26:49,202][00820] Avg episode reward: [(0, '4.488')] +[2023-02-26 16:26:54,202][00820] Fps is (10 sec: 2866.0, 60 sec: 3549.6, 300 sec: 3292.5). Total num frames: 4861952. Throughput: 0: 873.6. Samples: 213160. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:26:54,205][00820] Avg episode reward: [(0, '4.521')] +[2023-02-26 16:26:54,697][19482] Updated weights for policy 0, policy_version 1188 (0.0014) +[2023-02-26 16:26:59,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3307.7). Total num frames: 4882432. Throughput: 0: 900.8. Samples: 218668. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:26:59,204][00820] Avg episode reward: [(0, '4.705')] +[2023-02-26 16:27:04,053][19482] Updated weights for policy 0, policy_version 1198 (0.0020) +[2023-02-26 16:27:04,198][00820] Fps is (10 sec: 4507.5, 60 sec: 3618.1, 300 sec: 3337.5). Total num frames: 4907008. Throughput: 0: 912.3. Samples: 225290. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:27:04,206][00820] Avg episode reward: [(0, '4.574')] +[2023-02-26 16:27:09,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3321.5). Total num frames: 4919296. Throughput: 0: 892.0. Samples: 227616. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:27:09,201][00820] Avg episode reward: [(0, '4.757')] +[2023-02-26 16:27:14,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3549.9, 300 sec: 3306.1). Total num frames: 4931584. Throughput: 0: 874.0. Samples: 231638. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:27:14,201][00820] Avg episode reward: [(0, '4.606')] +[2023-02-26 16:27:17,525][19482] Updated weights for policy 0, policy_version 1208 (0.0021) +[2023-02-26 16:27:19,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3319.9). Total num frames: 4952064. Throughput: 0: 902.8. Samples: 237110. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:27:19,202][00820] Avg episode reward: [(0, '4.687')] +[2023-02-26 16:27:24,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3549.9, 300 sec: 3347.4). Total num frames: 4976640. Throughput: 0: 903.3. Samples: 240350. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:27:24,208][00820] Avg episode reward: [(0, '4.601')] +[2023-02-26 16:27:27,965][19482] Updated weights for policy 0, policy_version 1218 (0.0012) +[2023-02-26 16:27:29,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3550.1, 300 sec: 3332.3). Total num frames: 4988928. Throughput: 0: 881.9. Samples: 246074. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:27:29,201][00820] Avg episode reward: [(0, '4.580')] +[2023-02-26 16:27:34,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3387.9). Total num frames: 5005312. Throughput: 0: 870.3. Samples: 250234. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:27:34,202][00820] Avg episode reward: [(0, '4.580')] +[2023-02-26 16:27:39,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3457.3). Total num frames: 5025792. Throughput: 0: 880.2. Samples: 252764. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:27:39,203][00820] Avg episode reward: [(0, '4.660')] +[2023-02-26 16:27:40,235][19482] Updated weights for policy 0, policy_version 1228 (0.0025) +[2023-02-26 16:27:44,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5046272. Throughput: 0: 898.7. Samples: 259108. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:27:44,204][00820] Avg episode reward: [(0, '4.720')] +[2023-02-26 16:27:49,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 5062656. Throughput: 0: 871.2. Samples: 264496. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:27:49,204][00820] Avg episode reward: [(0, '4.713')] +[2023-02-26 16:27:51,816][19482] Updated weights for policy 0, policy_version 1238 (0.0040) +[2023-02-26 16:27:54,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3550.1, 300 sec: 3499.0). Total num frames: 5074944. Throughput: 0: 866.7. Samples: 266618. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:27:54,205][00820] Avg episode reward: [(0, '4.714')] +[2023-02-26 16:27:59,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5095424. Throughput: 0: 883.8. Samples: 271408. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:27:59,203][00820] Avg episode reward: [(0, '4.766')] +[2023-02-26 16:28:02,588][19482] Updated weights for policy 0, policy_version 1248 (0.0025) +[2023-02-26 16:28:04,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 5115904. Throughput: 0: 904.9. Samples: 277830. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:28:04,206][00820] Avg episode reward: [(0, '4.911')] +[2023-02-26 16:28:09,198][00820] Fps is (10 sec: 3686.3, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5132288. Throughput: 0: 903.3. Samples: 280998. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-26 16:28:09,207][00820] Avg episode reward: [(0, '4.987')] +[2023-02-26 16:28:14,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3512.9). Total num frames: 5148672. Throughput: 0: 871.0. Samples: 285270. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:28:14,206][00820] Avg episode reward: [(0, '4.968')] +[2023-02-26 16:28:14,215][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001257_5148672.pth... +[2023-02-26 16:28:14,431][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001050_4300800.pth +[2023-02-26 16:28:15,507][19482] Updated weights for policy 0, policy_version 1258 (0.0013) +[2023-02-26 16:28:19,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5165056. Throughput: 0: 891.8. Samples: 290366. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:28:19,201][00820] Avg episode reward: [(0, '4.909')] +[2023-02-26 16:28:24,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 5189632. Throughput: 0: 911.9. Samples: 293800. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:28:24,201][00820] Avg episode reward: [(0, '4.693')] +[2023-02-26 16:28:24,715][19482] Updated weights for policy 0, policy_version 1268 (0.0023) +[2023-02-26 16:28:29,203][00820] Fps is (10 sec: 4093.9, 60 sec: 3617.8, 300 sec: 3540.5). Total num frames: 5206016. Throughput: 0: 908.2. Samples: 299980. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:28:29,209][00820] Avg episode reward: [(0, '4.704')] +[2023-02-26 16:28:34,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 5222400. Throughput: 0: 882.8. Samples: 304224. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:28:34,201][00820] Avg episode reward: [(0, '4.619')] +[2023-02-26 16:28:37,909][19482] Updated weights for policy 0, policy_version 1278 (0.0024) +[2023-02-26 16:28:39,198][00820] Fps is (10 sec: 3278.5, 60 sec: 3549.9, 300 sec: 3526.8). Total num frames: 5238784. Throughput: 0: 884.9. Samples: 306438. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:28:39,205][00820] Avg episode reward: [(0, '4.445')] +[2023-02-26 16:28:44,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 5255168. Throughput: 0: 902.9. Samples: 312038. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-26 16:28:44,200][00820] Avg episode reward: [(0, '4.694')] +[2023-02-26 16:28:49,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3512.8). Total num frames: 5267456. Throughput: 0: 849.6. Samples: 316064. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:28:49,202][00820] Avg episode reward: [(0, '4.927')] +[2023-02-26 16:28:51,870][19482] Updated weights for policy 0, policy_version 1288 (0.0023) +[2023-02-26 16:28:54,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3413.3, 300 sec: 3471.2). Total num frames: 5279744. Throughput: 0: 816.8. Samples: 317752. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:28:54,205][00820] Avg episode reward: [(0, '5.010')] +[2023-02-26 16:28:59,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3485.1). Total num frames: 5296128. Throughput: 0: 816.5. Samples: 322014. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:28:59,205][00820] Avg episode reward: [(0, '4.827')] +[2023-02-26 16:29:03,462][19482] Updated weights for policy 0, policy_version 1298 (0.0022) +[2023-02-26 16:29:04,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3512.8). Total num frames: 5316608. Throughput: 0: 840.3. Samples: 328178. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:29:04,206][00820] Avg episode reward: [(0, '4.686')] +[2023-02-26 16:29:09,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 5341184. Throughput: 0: 838.7. Samples: 331540. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:29:09,201][00820] Avg episode reward: [(0, '4.473')] +[2023-02-26 16:29:14,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3499.0). Total num frames: 5353472. Throughput: 0: 814.4. Samples: 336626. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:29:14,203][00820] Avg episode reward: [(0, '4.670')] +[2023-02-26 16:29:15,166][19482] Updated weights for policy 0, policy_version 1308 (0.0024) +[2023-02-26 16:29:19,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3345.1, 300 sec: 3485.2). Total num frames: 5365760. Throughput: 0: 815.2. Samples: 340906. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:29:19,200][00820] Avg episode reward: [(0, '5.062')] +[2023-02-26 16:29:24,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3512.9). Total num frames: 5390336. Throughput: 0: 835.6. Samples: 344040. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:29:24,200][00820] Avg episode reward: [(0, '5.143')] +[2023-02-26 16:29:25,834][19482] Updated weights for policy 0, policy_version 1318 (0.0014) +[2023-02-26 16:29:29,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3413.6, 300 sec: 3526.7). Total num frames: 5410816. Throughput: 0: 859.6. Samples: 350720. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:29:29,202][00820] Avg episode reward: [(0, '4.455')] +[2023-02-26 16:29:34,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3512.8). Total num frames: 5427200. Throughput: 0: 881.2. Samples: 355720. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:29:34,203][00820] Avg episode reward: [(0, '4.523')] +[2023-02-26 16:29:38,150][19482] Updated weights for policy 0, policy_version 1328 (0.0014) +[2023-02-26 16:29:39,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3345.0, 300 sec: 3499.0). Total num frames: 5439488. Throughput: 0: 890.0. Samples: 357802. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:29:39,205][00820] Avg episode reward: [(0, '4.343')] +[2023-02-26 16:29:44,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 5464064. Throughput: 0: 917.0. Samples: 363278. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:29:44,200][00820] Avg episode reward: [(0, '4.655')] +[2023-02-26 16:29:47,768][19482] Updated weights for policy 0, policy_version 1338 (0.0020) +[2023-02-26 16:29:49,198][00820] Fps is (10 sec: 4505.9, 60 sec: 3618.1, 300 sec: 3540.6). Total num frames: 5484544. Throughput: 0: 930.0. Samples: 370028. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:29:49,204][00820] Avg episode reward: [(0, '4.666')] +[2023-02-26 16:29:54,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3526.8). Total num frames: 5500928. Throughput: 0: 912.3. Samples: 372594. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:29:54,201][00820] Avg episode reward: [(0, '4.597')] +[2023-02-26 16:29:59,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3618.1, 300 sec: 3512.8). Total num frames: 5513216. Throughput: 0: 894.5. Samples: 376878. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:29:59,205][00820] Avg episode reward: [(0, '4.616')] +[2023-02-26 16:30:00,832][19482] Updated weights for policy 0, policy_version 1348 (0.0012) +[2023-02-26 16:30:04,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 5533696. Throughput: 0: 922.5. Samples: 382420. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:30:04,200][00820] Avg episode reward: [(0, '4.657')] +[2023-02-26 16:30:09,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 5554176. Throughput: 0: 926.6. Samples: 385736. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:30:09,200][00820] Avg episode reward: [(0, '4.571')] +[2023-02-26 16:30:10,280][19482] Updated weights for policy 0, policy_version 1358 (0.0022) +[2023-02-26 16:30:14,199][00820] Fps is (10 sec: 3685.9, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 5570560. Throughput: 0: 898.8. Samples: 391168. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:30:14,206][00820] Avg episode reward: [(0, '4.656')] +[2023-02-26 16:30:14,216][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001360_5570560.pth... +[2023-02-26 16:30:14,391][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001153_4722688.pth +[2023-02-26 16:30:19,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3512.8). Total num frames: 5586944. Throughput: 0: 881.7. Samples: 395396. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:30:19,201][00820] Avg episode reward: [(0, '4.676')] +[2023-02-26 16:30:23,390][19482] Updated weights for policy 0, policy_version 1368 (0.0022) +[2023-02-26 16:30:24,198][00820] Fps is (10 sec: 3277.2, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5603328. Throughput: 0: 893.5. Samples: 398010. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:30:24,201][00820] Avg episode reward: [(0, '4.451')] +[2023-02-26 16:30:29,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 5627904. Throughput: 0: 920.9. Samples: 404718. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:30:29,201][00820] Avg episode reward: [(0, '4.540')] +[2023-02-26 16:30:33,780][19482] Updated weights for policy 0, policy_version 1378 (0.0018) +[2023-02-26 16:30:34,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 5644288. Throughput: 0: 890.9. Samples: 410120. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:30:34,204][00820] Avg episode reward: [(0, '4.520')] +[2023-02-26 16:30:39,199][00820] Fps is (10 sec: 2866.9, 60 sec: 3618.1, 300 sec: 3512.8). Total num frames: 5656576. Throughput: 0: 881.5. Samples: 412264. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:30:39,205][00820] Avg episode reward: [(0, '4.595')] +[2023-02-26 16:30:44,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5677056. Throughput: 0: 896.4. Samples: 417216. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:30:44,207][00820] Avg episode reward: [(0, '4.923')] +[2023-02-26 16:30:45,564][19482] Updated weights for policy 0, policy_version 1388 (0.0018) +[2023-02-26 16:30:49,198][00820] Fps is (10 sec: 4096.3, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 5697536. Throughput: 0: 913.6. Samples: 423534. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:30:49,201][00820] Avg episode reward: [(0, '4.905')] +[2023-02-26 16:30:54,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5713920. Throughput: 0: 906.5. Samples: 426528. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:30:54,200][00820] Avg episode reward: [(0, '4.620')] +[2023-02-26 16:30:57,490][19482] Updated weights for policy 0, policy_version 1398 (0.0016) +[2023-02-26 16:30:59,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 5730304. Throughput: 0: 877.2. Samples: 430642. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:30:59,205][00820] Avg episode reward: [(0, '4.655')] +[2023-02-26 16:31:04,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5746688. Throughput: 0: 895.4. Samples: 435690. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:31:04,204][00820] Avg episode reward: [(0, '4.751')] +[2023-02-26 16:31:08,732][19482] Updated weights for policy 0, policy_version 1408 (0.0013) +[2023-02-26 16:31:09,198][00820] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 5767168. Throughput: 0: 904.2. Samples: 438700. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:31:09,206][00820] Avg episode reward: [(0, '4.925')] +[2023-02-26 16:31:14,198][00820] Fps is (10 sec: 3686.3, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 5783552. Throughput: 0: 879.2. Samples: 444280. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:31:14,203][00820] Avg episode reward: [(0, '4.891')] +[2023-02-26 16:31:19,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 5795840. Throughput: 0: 846.1. Samples: 448194. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:31:19,205][00820] Avg episode reward: [(0, '5.001')] +[2023-02-26 16:31:22,658][19482] Updated weights for policy 0, policy_version 1418 (0.0013) +[2023-02-26 16:31:24,198][00820] Fps is (10 sec: 2867.3, 60 sec: 3481.6, 300 sec: 3512.9). Total num frames: 5812224. Throughput: 0: 840.6. Samples: 450088. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:31:24,203][00820] Avg episode reward: [(0, '5.063')] +[2023-02-26 16:31:29,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3540.6). Total num frames: 5832704. Throughput: 0: 866.0. Samples: 456188. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:31:29,201][00820] Avg episode reward: [(0, '4.712')] +[2023-02-26 16:31:32,559][19482] Updated weights for policy 0, policy_version 1428 (0.0017) +[2023-02-26 16:31:34,199][00820] Fps is (10 sec: 4095.5, 60 sec: 3481.5, 300 sec: 3526.7). Total num frames: 5853184. Throughput: 0: 854.6. Samples: 461992. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:31:34,205][00820] Avg episode reward: [(0, '4.649')] +[2023-02-26 16:31:39,201][00820] Fps is (10 sec: 3275.9, 60 sec: 3481.5, 300 sec: 3498.9). Total num frames: 5865472. Throughput: 0: 829.9. Samples: 463878. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:31:39,205][00820] Avg episode reward: [(0, '4.594')] +[2023-02-26 16:31:44,198][00820] Fps is (10 sec: 2457.9, 60 sec: 3345.1, 300 sec: 3485.1). Total num frames: 5877760. Throughput: 0: 824.8. Samples: 467758. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:31:44,201][00820] Avg episode reward: [(0, '4.608')] +[2023-02-26 16:31:46,399][19482] Updated weights for policy 0, policy_version 1438 (0.0033) +[2023-02-26 16:31:49,198][00820] Fps is (10 sec: 3277.7, 60 sec: 3345.1, 300 sec: 3512.9). Total num frames: 5898240. Throughput: 0: 848.8. Samples: 473886. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:31:49,206][00820] Avg episode reward: [(0, '4.522')] +[2023-02-26 16:31:54,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3413.3, 300 sec: 3512.8). Total num frames: 5918720. Throughput: 0: 854.6. Samples: 477158. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:31:54,206][00820] Avg episode reward: [(0, '4.780')] +[2023-02-26 16:31:57,785][19482] Updated weights for policy 0, policy_version 1448 (0.0021) +[2023-02-26 16:31:59,198][00820] Fps is (10 sec: 3276.6, 60 sec: 3345.0, 300 sec: 3471.2). Total num frames: 5931008. Throughput: 0: 832.5. Samples: 481744. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:31:59,205][00820] Avg episode reward: [(0, '4.929')] +[2023-02-26 16:32:04,198][00820] Fps is (10 sec: 2867.3, 60 sec: 3345.1, 300 sec: 3485.1). Total num frames: 5947392. Throughput: 0: 841.1. Samples: 486044. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:32:04,206][00820] Avg episode reward: [(0, '4.936')] +[2023-02-26 16:32:09,064][19482] Updated weights for policy 0, policy_version 1458 (0.0028) +[2023-02-26 16:32:09,198][00820] Fps is (10 sec: 4096.2, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 5971968. Throughput: 0: 873.2. Samples: 489380. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:32:09,201][00820] Avg episode reward: [(0, '4.605')] +[2023-02-26 16:32:14,198][00820] Fps is (10 sec: 4505.5, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 5992448. Throughput: 0: 882.4. Samples: 495896. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:32:14,201][00820] Avg episode reward: [(0, '4.695')] +[2023-02-26 16:32:14,223][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001463_5992448.pth... +[2023-02-26 16:32:14,447][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001257_5148672.pth +[2023-02-26 16:32:19,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 6004736. Throughput: 0: 849.2. Samples: 500206. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:32:19,204][00820] Avg episode reward: [(0, '4.728')] +[2023-02-26 16:32:21,625][19482] Updated weights for policy 0, policy_version 1468 (0.0012) +[2023-02-26 16:32:24,198][00820] Fps is (10 sec: 2457.7, 60 sec: 3413.3, 300 sec: 3485.1). Total num frames: 6017024. Throughput: 0: 852.3. Samples: 502228. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:32:24,200][00820] Avg episode reward: [(0, '4.694')] +[2023-02-26 16:32:29,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 6041600. Throughput: 0: 892.9. Samples: 507938. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:32:29,205][00820] Avg episode reward: [(0, '4.759')] +[2023-02-26 16:32:31,688][19482] Updated weights for policy 0, policy_version 1478 (0.0017) +[2023-02-26 16:32:34,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3481.7, 300 sec: 3512.8). Total num frames: 6062080. Throughput: 0: 902.8. Samples: 514512. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:32:34,201][00820] Avg episode reward: [(0, '4.732')] +[2023-02-26 16:32:39,200][00820] Fps is (10 sec: 3276.0, 60 sec: 3481.6, 300 sec: 3485.0). Total num frames: 6074368. Throughput: 0: 878.4. Samples: 516690. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:32:39,204][00820] Avg episode reward: [(0, '4.749')] +[2023-02-26 16:32:44,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 6086656. Throughput: 0: 855.8. Samples: 520254. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:32:44,203][00820] Avg episode reward: [(0, '4.774')] +[2023-02-26 16:32:45,716][19482] Updated weights for policy 0, policy_version 1488 (0.0018) +[2023-02-26 16:32:49,198][00820] Fps is (10 sec: 3277.6, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 6107136. Throughput: 0: 884.3. Samples: 525836. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:32:49,209][00820] Avg episode reward: [(0, '4.517')] +[2023-02-26 16:32:54,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 6127616. Throughput: 0: 881.5. Samples: 529048. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:32:54,202][00820] Avg episode reward: [(0, '4.387')] +[2023-02-26 16:32:55,751][19482] Updated weights for policy 0, policy_version 1498 (0.0015) +[2023-02-26 16:32:59,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 6144000. Throughput: 0: 850.2. Samples: 534154. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:32:59,200][00820] Avg episode reward: [(0, '4.437')] +[2023-02-26 16:33:04,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 6156288. Throughput: 0: 847.2. Samples: 538328. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:33:04,201][00820] Avg episode reward: [(0, '4.401')] +[2023-02-26 16:33:08,970][19482] Updated weights for policy 0, policy_version 1508 (0.0027) +[2023-02-26 16:33:09,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3485.1). Total num frames: 6176768. Throughput: 0: 855.6. Samples: 540728. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:33:09,200][00820] Avg episode reward: [(0, '4.593')] +[2023-02-26 16:33:14,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3499.0). Total num frames: 6197248. Throughput: 0: 864.4. Samples: 546836. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:33:14,200][00820] Avg episode reward: [(0, '4.575')] +[2023-02-26 16:33:19,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3457.3). Total num frames: 6209536. Throughput: 0: 829.2. Samples: 551824. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:33:19,201][00820] Avg episode reward: [(0, '4.624')] +[2023-02-26 16:33:20,998][19482] Updated weights for policy 0, policy_version 1518 (0.0020) +[2023-02-26 16:33:24,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3457.4). Total num frames: 6225920. Throughput: 0: 824.8. Samples: 553806. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:33:24,207][00820] Avg episode reward: [(0, '4.678')] +[2023-02-26 16:33:29,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3457.3). Total num frames: 6242304. Throughput: 0: 853.2. Samples: 558650. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:33:29,205][00820] Avg episode reward: [(0, '4.861')] +[2023-02-26 16:33:32,348][19482] Updated weights for policy 0, policy_version 1528 (0.0016) +[2023-02-26 16:33:34,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3485.1). Total num frames: 6266880. Throughput: 0: 872.4. Samples: 565096. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:33:34,204][00820] Avg episode reward: [(0, '4.868')] +[2023-02-26 16:33:39,199][00820] Fps is (10 sec: 4095.7, 60 sec: 3481.7, 300 sec: 3485.1). Total num frames: 6283264. Throughput: 0: 872.8. Samples: 568326. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:33:39,205][00820] Avg episode reward: [(0, '4.869')] +[2023-02-26 16:33:44,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 6295552. Throughput: 0: 848.2. Samples: 572324. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:33:44,202][00820] Avg episode reward: [(0, '4.871')] +[2023-02-26 16:33:44,930][19482] Updated weights for policy 0, policy_version 1538 (0.0032) +[2023-02-26 16:33:49,198][00820] Fps is (10 sec: 2867.4, 60 sec: 3413.3, 300 sec: 3499.0). Total num frames: 6311936. Throughput: 0: 865.0. Samples: 577254. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:33:49,201][00820] Avg episode reward: [(0, '4.773')] +[2023-02-26 16:33:54,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 6336512. Throughput: 0: 888.0. Samples: 580690. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:33:54,201][00820] Avg episode reward: [(0, '5.080')] +[2023-02-26 16:33:54,864][19482] Updated weights for policy 0, policy_version 1548 (0.0019) +[2023-02-26 16:33:59,198][00820] Fps is (10 sec: 4095.8, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 6352896. Throughput: 0: 889.7. Samples: 586874. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:33:59,203][00820] Avg episode reward: [(0, '5.061')] +[2023-02-26 16:34:04,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 6369280. Throughput: 0: 873.7. Samples: 591140. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:34:04,203][00820] Avg episode reward: [(0, '4.912')] +[2023-02-26 16:34:08,036][19482] Updated weights for policy 0, policy_version 1558 (0.0029) +[2023-02-26 16:34:09,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 6385664. Throughput: 0: 874.7. Samples: 593166. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:34:09,204][00820] Avg episode reward: [(0, '4.775')] +[2023-02-26 16:34:14,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 6406144. Throughput: 0: 904.9. Samples: 599372. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:34:14,201][00820] Avg episode reward: [(0, '4.801')] +[2023-02-26 16:34:14,213][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001564_6406144.pth... +[2023-02-26 16:34:14,384][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001360_5570560.pth +[2023-02-26 16:34:17,388][19482] Updated weights for policy 0, policy_version 1568 (0.0012) +[2023-02-26 16:34:19,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3618.1, 300 sec: 3512.8). Total num frames: 6426624. Throughput: 0: 896.4. Samples: 605434. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:34:19,201][00820] Avg episode reward: [(0, '4.948')] +[2023-02-26 16:34:24,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 6438912. Throughput: 0: 870.8. Samples: 607512. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-02-26 16:34:24,200][00820] Avg episode reward: [(0, '5.053')] +[2023-02-26 16:34:29,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 6455296. Throughput: 0: 876.6. Samples: 611770. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:34:29,200][00820] Avg episode reward: [(0, '4.791')] +[2023-02-26 16:34:30,853][19482] Updated weights for policy 0, policy_version 1578 (0.0019) +[2023-02-26 16:34:34,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 6475776. Throughput: 0: 905.6. Samples: 618004. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:34:34,200][00820] Avg episode reward: [(0, '4.790')] +[2023-02-26 16:34:39,199][00820] Fps is (10 sec: 4095.3, 60 sec: 3549.8, 300 sec: 3498.9). Total num frames: 6496256. Throughput: 0: 903.3. Samples: 621340. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:34:39,209][00820] Avg episode reward: [(0, '4.852')] +[2023-02-26 16:34:41,035][19482] Updated weights for policy 0, policy_version 1588 (0.0024) +[2023-02-26 16:34:44,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3485.1). Total num frames: 6512640. Throughput: 0: 872.2. Samples: 626124. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:34:44,200][00820] Avg episode reward: [(0, '4.761')] +[2023-02-26 16:34:49,198][00820] Fps is (10 sec: 2867.7, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 6524928. Throughput: 0: 871.4. Samples: 630354. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:34:49,206][00820] Avg episode reward: [(0, '4.626')] +[2023-02-26 16:34:53,279][19482] Updated weights for policy 0, policy_version 1598 (0.0016) +[2023-02-26 16:34:54,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 6545408. Throughput: 0: 896.1. Samples: 633492. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:34:54,204][00820] Avg episode reward: [(0, '4.635')] +[2023-02-26 16:34:59,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3618.2, 300 sec: 3512.8). Total num frames: 6569984. Throughput: 0: 904.3. Samples: 640064. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:34:59,202][00820] Avg episode reward: [(0, '4.835')] +[2023-02-26 16:35:04,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 6582272. Throughput: 0: 874.1. Samples: 644770. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:35:04,203][00820] Avg episode reward: [(0, '4.935')] +[2023-02-26 16:35:04,802][19482] Updated weights for policy 0, policy_version 1608 (0.0014) +[2023-02-26 16:35:09,199][00820] Fps is (10 sec: 2866.8, 60 sec: 3549.8, 300 sec: 3485.1). Total num frames: 6598656. Throughput: 0: 873.0. Samples: 646798. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:35:09,203][00820] Avg episode reward: [(0, '4.693')] +[2023-02-26 16:35:14,198][00820] Fps is (10 sec: 3686.3, 60 sec: 3549.8, 300 sec: 3499.0). Total num frames: 6619136. Throughput: 0: 898.4. Samples: 652198. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:35:14,201][00820] Avg episode reward: [(0, '4.455')] +[2023-02-26 16:35:16,250][19482] Updated weights for policy 0, policy_version 1618 (0.0032) +[2023-02-26 16:35:19,198][00820] Fps is (10 sec: 4096.5, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 6639616. Throughput: 0: 899.5. Samples: 658482. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-26 16:35:19,202][00820] Avg episode reward: [(0, '4.571')] +[2023-02-26 16:35:24,201][00820] Fps is (10 sec: 3275.8, 60 sec: 3549.7, 300 sec: 3471.1). Total num frames: 6651904. Throughput: 0: 881.5. Samples: 661008. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:35:24,205][00820] Avg episode reward: [(0, '4.731')] +[2023-02-26 16:35:28,890][19482] Updated weights for policy 0, policy_version 1628 (0.0015) +[2023-02-26 16:35:29,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 6668288. Throughput: 0: 865.8. Samples: 665086. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:35:29,206][00820] Avg episode reward: [(0, '4.783')] +[2023-02-26 16:35:34,198][00820] Fps is (10 sec: 3277.9, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 6684672. Throughput: 0: 889.4. Samples: 670376. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:35:34,207][00820] Avg episode reward: [(0, '4.847')] +[2023-02-26 16:35:39,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.7, 300 sec: 3485.1). Total num frames: 6705152. Throughput: 0: 892.2. Samples: 673642. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:35:39,200][00820] Avg episode reward: [(0, '5.089')] +[2023-02-26 16:35:39,305][19482] Updated weights for policy 0, policy_version 1638 (0.0012) +[2023-02-26 16:35:44,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 6721536. Throughput: 0: 874.8. Samples: 679432. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:35:44,206][00820] Avg episode reward: [(0, '4.955')] +[2023-02-26 16:35:49,200][00820] Fps is (10 sec: 3276.0, 60 sec: 3549.7, 300 sec: 3471.2). Total num frames: 6737920. Throughput: 0: 861.6. Samples: 683544. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:35:49,203][00820] Avg episode reward: [(0, '4.823')] +[2023-02-26 16:35:52,674][19482] Updated weights for policy 0, policy_version 1648 (0.0017) +[2023-02-26 16:35:54,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 6754304. Throughput: 0: 863.3. Samples: 685644. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:35:54,201][00820] Avg episode reward: [(0, '4.589')] +[2023-02-26 16:35:59,198][00820] Fps is (10 sec: 4097.0, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 6778880. Throughput: 0: 888.0. Samples: 692158. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0) +[2023-02-26 16:35:59,206][00820] Avg episode reward: [(0, '4.598')] +[2023-02-26 16:36:02,117][19482] Updated weights for policy 0, policy_version 1658 (0.0016) +[2023-02-26 16:36:04,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 6795264. Throughput: 0: 876.2. Samples: 697912. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:36:04,201][00820] Avg episode reward: [(0, '4.906')] +[2023-02-26 16:36:09,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.7, 300 sec: 3471.2). Total num frames: 6807552. Throughput: 0: 857.4. Samples: 699586. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:36:09,204][00820] Avg episode reward: [(0, '4.859')] +[2023-02-26 16:36:14,199][00820] Fps is (10 sec: 2047.8, 60 sec: 3276.8, 300 sec: 3457.3). Total num frames: 6815744. Throughput: 0: 841.4. Samples: 702952. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:36:14,205][00820] Avg episode reward: [(0, '4.926')] +[2023-02-26 16:36:14,215][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001664_6815744.pth... +[2023-02-26 16:36:14,449][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001463_5992448.pth +[2023-02-26 16:36:19,198][00820] Fps is (10 sec: 2048.0, 60 sec: 3140.3, 300 sec: 3443.4). Total num frames: 6828032. Throughput: 0: 804.3. Samples: 706568. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:36:19,200][00820] Avg episode reward: [(0, '4.686')] +[2023-02-26 16:36:19,290][19482] Updated weights for policy 0, policy_version 1668 (0.0013) +[2023-02-26 16:36:24,198][00820] Fps is (10 sec: 3686.7, 60 sec: 3345.2, 300 sec: 3457.3). Total num frames: 6852608. Throughput: 0: 804.3. Samples: 709834. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0) +[2023-02-26 16:36:24,200][00820] Avg episode reward: [(0, '4.899')] +[2023-02-26 16:36:29,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3345.0, 300 sec: 3443.4). Total num frames: 6868992. Throughput: 0: 816.9. Samples: 716192. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0) +[2023-02-26 16:36:29,201][00820] Avg episode reward: [(0, '4.948')] +[2023-02-26 16:36:29,351][19482] Updated weights for policy 0, policy_version 1678 (0.0019) +[2023-02-26 16:36:34,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3345.1, 300 sec: 3457.3). Total num frames: 6885376. Throughput: 0: 819.3. Samples: 720410. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:36:34,206][00820] Avg episode reward: [(0, '4.756')] +[2023-02-26 16:36:39,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3276.8, 300 sec: 3471.2). Total num frames: 6901760. Throughput: 0: 818.0. Samples: 722456. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:36:39,207][00820] Avg episode reward: [(0, '4.655')] +[2023-02-26 16:36:41,601][19482] Updated weights for policy 0, policy_version 1688 (0.0034) +[2023-02-26 16:36:44,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3471.2). Total num frames: 6922240. Throughput: 0: 809.1. Samples: 728566. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:36:44,209][00820] Avg episode reward: [(0, '4.782')] +[2023-02-26 16:36:49,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3413.5, 300 sec: 3471.2). Total num frames: 6942720. Throughput: 0: 820.0. Samples: 734814. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:36:49,200][00820] Avg episode reward: [(0, '4.813')] +[2023-02-26 16:36:52,672][19482] Updated weights for policy 0, policy_version 1698 (0.0012) +[2023-02-26 16:36:54,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3471.2). Total num frames: 6955008. Throughput: 0: 829.7. Samples: 736922. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:36:54,206][00820] Avg episode reward: [(0, '4.688')] +[2023-02-26 16:36:59,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3471.2). Total num frames: 6971392. Throughput: 0: 848.4. Samples: 741128. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:36:59,201][00820] Avg episode reward: [(0, '4.606')] +[2023-02-26 16:37:04,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3457.3). Total num frames: 6991872. Throughput: 0: 905.5. Samples: 747316. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:37:04,200][00820] Avg episode reward: [(0, '4.619')] +[2023-02-26 16:37:04,385][19482] Updated weights for policy 0, policy_version 1708 (0.0019) +[2023-02-26 16:37:09,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 7016448. Throughput: 0: 904.9. Samples: 750552. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-26 16:37:09,205][00820] Avg episode reward: [(0, '4.830')] +[2023-02-26 16:37:14,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 7028736. Throughput: 0: 873.2. Samples: 755484. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:37:14,200][00820] Avg episode reward: [(0, '4.929')] +[2023-02-26 16:37:16,713][19482] Updated weights for policy 0, policy_version 1718 (0.0023) +[2023-02-26 16:37:19,198][00820] Fps is (10 sec: 2457.4, 60 sec: 3549.8, 300 sec: 3471.2). Total num frames: 7041024. Throughput: 0: 870.6. Samples: 759586. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:37:19,207][00820] Avg episode reward: [(0, '4.947')] +[2023-02-26 16:37:24,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3457.3). Total num frames: 7061504. Throughput: 0: 890.7. Samples: 762538. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:37:24,205][00820] Avg episode reward: [(0, '4.468')] +[2023-02-26 16:37:27,114][19482] Updated weights for policy 0, policy_version 1728 (0.0017) +[2023-02-26 16:37:29,198][00820] Fps is (10 sec: 4505.8, 60 sec: 3618.1, 300 sec: 3471.2). Total num frames: 7086080. Throughput: 0: 899.7. Samples: 769052. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:37:29,206][00820] Avg episode reward: [(0, '4.442')] +[2023-02-26 16:37:34,198][00820] Fps is (10 sec: 3686.3, 60 sec: 3549.8, 300 sec: 3471.2). Total num frames: 7098368. Throughput: 0: 870.0. Samples: 773964. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:37:34,207][00820] Avg episode reward: [(0, '4.927')] +[2023-02-26 16:37:39,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7114752. Throughput: 0: 868.4. Samples: 776002. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:37:39,201][00820] Avg episode reward: [(0, '4.981')] +[2023-02-26 16:37:40,251][19482] Updated weights for policy 0, policy_version 1738 (0.0022) +[2023-02-26 16:37:44,198][00820] Fps is (10 sec: 3686.6, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7135232. Throughput: 0: 892.5. Samples: 781290. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:37:44,200][00820] Avg episode reward: [(0, '4.766')] +[2023-02-26 16:37:49,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7155712. Throughput: 0: 899.2. Samples: 787780. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:37:49,201][00820] Avg episode reward: [(0, '4.733')] +[2023-02-26 16:37:49,662][19482] Updated weights for policy 0, policy_version 1748 (0.0023) +[2023-02-26 16:37:54,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3485.1). Total num frames: 7172096. Throughput: 0: 886.4. Samples: 790442. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:37:54,204][00820] Avg episode reward: [(0, '4.882')] +[2023-02-26 16:37:59,199][00820] Fps is (10 sec: 2866.9, 60 sec: 3549.8, 300 sec: 3485.1). Total num frames: 7184384. Throughput: 0: 867.2. Samples: 794510. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:37:59,210][00820] Avg episode reward: [(0, '4.719')] +[2023-02-26 16:38:02,925][19482] Updated weights for policy 0, policy_version 1758 (0.0013) +[2023-02-26 16:38:04,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7204864. Throughput: 0: 893.5. Samples: 799794. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:38:04,207][00820] Avg episode reward: [(0, '4.616')] +[2023-02-26 16:38:09,198][00820] Fps is (10 sec: 4096.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7225344. Throughput: 0: 899.6. Samples: 803022. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:38:09,201][00820] Avg episode reward: [(0, '4.552')] +[2023-02-26 16:38:13,787][19482] Updated weights for policy 0, policy_version 1768 (0.0024) +[2023-02-26 16:38:14,199][00820] Fps is (10 sec: 3685.9, 60 sec: 3549.8, 300 sec: 3498.9). Total num frames: 7241728. Throughput: 0: 880.7. Samples: 808686. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:38:14,202][00820] Avg episode reward: [(0, '4.551')] +[2023-02-26 16:38:14,215][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001768_7241728.pth... +[2023-02-26 16:38:14,432][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001564_6406144.pth +[2023-02-26 16:38:19,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7254016. Throughput: 0: 860.4. Samples: 812682. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:38:19,203][00820] Avg episode reward: [(0, '4.698')] +[2023-02-26 16:38:24,198][00820] Fps is (10 sec: 2867.6, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7270400. Throughput: 0: 863.9. Samples: 814878. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:38:24,206][00820] Avg episode reward: [(0, '4.696')] +[2023-02-26 16:38:26,047][19482] Updated weights for policy 0, policy_version 1778 (0.0016) +[2023-02-26 16:38:29,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7294976. Throughput: 0: 890.1. Samples: 821346. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:38:29,201][00820] Avg episode reward: [(0, '4.639')] +[2023-02-26 16:38:34,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7311360. Throughput: 0: 872.8. Samples: 827054. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:38:34,200][00820] Avg episode reward: [(0, '4.632')] +[2023-02-26 16:38:37,785][19482] Updated weights for policy 0, policy_version 1788 (0.0015) +[2023-02-26 16:38:39,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7323648. Throughput: 0: 859.5. Samples: 829120. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:38:39,202][00820] Avg episode reward: [(0, '4.625')] +[2023-02-26 16:38:44,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 7344128. Throughput: 0: 868.9. Samples: 833608. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:38:44,201][00820] Avg episode reward: [(0, '4.685')] +[2023-02-26 16:38:48,618][19482] Updated weights for policy 0, policy_version 1798 (0.0014) +[2023-02-26 16:38:49,199][00820] Fps is (10 sec: 4095.7, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7364608. Throughput: 0: 896.7. Samples: 840148. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:38:49,205][00820] Avg episode reward: [(0, '4.604')] +[2023-02-26 16:38:54,202][00820] Fps is (10 sec: 3684.8, 60 sec: 3481.3, 300 sec: 3485.0). Total num frames: 7380992. Throughput: 0: 894.4. Samples: 843276. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:38:54,210][00820] Avg episode reward: [(0, '4.653')] +[2023-02-26 16:38:59,198][00820] Fps is (10 sec: 3277.0, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7397376. Throughput: 0: 862.6. Samples: 847500. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:38:59,205][00820] Avg episode reward: [(0, '4.676')] +[2023-02-26 16:39:01,723][19482] Updated weights for policy 0, policy_version 1808 (0.0012) +[2023-02-26 16:39:04,198][00820] Fps is (10 sec: 3278.2, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7413760. Throughput: 0: 873.9. Samples: 852006. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:39:04,201][00820] Avg episode reward: [(0, '4.827')] +[2023-02-26 16:39:09,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7434240. Throughput: 0: 896.9. Samples: 855240. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:39:09,205][00820] Avg episode reward: [(0, '4.934')] +[2023-02-26 16:39:11,531][19482] Updated weights for policy 0, policy_version 1818 (0.0013) +[2023-02-26 16:39:14,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7454720. Throughput: 0: 895.5. Samples: 861644. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:39:14,206][00820] Avg episode reward: [(0, '4.862')] +[2023-02-26 16:39:19,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7467008. Throughput: 0: 862.9. Samples: 865886. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:39:19,201][00820] Avg episode reward: [(0, '4.783')] +[2023-02-26 16:39:24,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3549.8, 300 sec: 3485.1). Total num frames: 7483392. Throughput: 0: 861.1. Samples: 867868. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:39:24,203][00820] Avg episode reward: [(0, '4.800')] +[2023-02-26 16:39:25,225][19482] Updated weights for policy 0, policy_version 1828 (0.0032) +[2023-02-26 16:39:29,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7503872. Throughput: 0: 886.5. Samples: 873502. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:39:29,201][00820] Avg episode reward: [(0, '4.765')] +[2023-02-26 16:39:34,198][00820] Fps is (10 sec: 4096.2, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7524352. Throughput: 0: 883.3. Samples: 879898. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:39:34,205][00820] Avg episode reward: [(0, '4.498')] +[2023-02-26 16:39:35,528][19482] Updated weights for policy 0, policy_version 1838 (0.0013) +[2023-02-26 16:39:39,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 7536640. Throughput: 0: 859.5. Samples: 881950. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:39:39,201][00820] Avg episode reward: [(0, '4.400')] +[2023-02-26 16:39:44,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7553024. Throughput: 0: 860.4. Samples: 886220. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:39:44,204][00820] Avg episode reward: [(0, '4.399')] +[2023-02-26 16:39:47,902][19482] Updated weights for policy 0, policy_version 1848 (0.0015) +[2023-02-26 16:39:49,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7573504. Throughput: 0: 891.0. Samples: 892100. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:39:49,203][00820] Avg episode reward: [(0, '4.531')] +[2023-02-26 16:39:54,199][00820] Fps is (10 sec: 4095.4, 60 sec: 3550.0, 300 sec: 3471.2). Total num frames: 7593984. Throughput: 0: 892.8. Samples: 895418. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:39:54,213][00820] Avg episode reward: [(0, '4.758')] +[2023-02-26 16:39:59,107][19482] Updated weights for policy 0, policy_version 1858 (0.0012) +[2023-02-26 16:39:59,198][00820] Fps is (10 sec: 3686.2, 60 sec: 3549.8, 300 sec: 3485.1). Total num frames: 7610368. Throughput: 0: 867.5. Samples: 900682. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:39:59,207][00820] Avg episode reward: [(0, '4.684')] +[2023-02-26 16:40:04,198][00820] Fps is (10 sec: 2867.5, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 7622656. Throughput: 0: 867.4. Samples: 904918. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:40:04,201][00820] Avg episode reward: [(0, '4.741')] +[2023-02-26 16:40:09,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 7643136. Throughput: 0: 887.4. Samples: 907802. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:40:09,209][00820] Avg episode reward: [(0, '4.665')] +[2023-02-26 16:40:10,348][19482] Updated weights for policy 0, policy_version 1868 (0.0013) +[2023-02-26 16:40:14,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 7663616. Throughput: 0: 907.1. Samples: 914320. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:40:14,201][00820] Avg episode reward: [(0, '4.312')] +[2023-02-26 16:40:14,222][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001872_7667712.pth... +[2023-02-26 16:40:14,356][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001664_6815744.pth +[2023-02-26 16:40:19,198][00820] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7680000. Throughput: 0: 875.8. Samples: 919310. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:40:19,203][00820] Avg episode reward: [(0, '4.398')] +[2023-02-26 16:40:22,730][19482] Updated weights for policy 0, policy_version 1878 (0.0013) +[2023-02-26 16:40:24,199][00820] Fps is (10 sec: 2866.8, 60 sec: 3481.5, 300 sec: 3471.2). Total num frames: 7692288. Throughput: 0: 873.4. Samples: 921254. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:40:24,205][00820] Avg episode reward: [(0, '4.402')] +[2023-02-26 16:40:29,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 7712768. Throughput: 0: 893.1. Samples: 926408. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:40:29,205][00820] Avg episode reward: [(0, '4.628')] +[2023-02-26 16:40:33,178][19482] Updated weights for policy 0, policy_version 1888 (0.0012) +[2023-02-26 16:40:34,198][00820] Fps is (10 sec: 4506.2, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 7737344. Throughput: 0: 908.9. Samples: 933002. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:40:34,206][00820] Avg episode reward: [(0, '4.672')] +[2023-02-26 16:40:39,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3499.0). Total num frames: 7753728. Throughput: 0: 897.1. Samples: 935784. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:40:39,207][00820] Avg episode reward: [(0, '4.533')] +[2023-02-26 16:40:44,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.8, 300 sec: 3485.1). Total num frames: 7766016. Throughput: 0: 873.7. Samples: 940000. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:40:44,201][00820] Avg episode reward: [(0, '4.445')] +[2023-02-26 16:40:46,145][19482] Updated weights for policy 0, policy_version 1898 (0.0017) +[2023-02-26 16:40:49,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 7786496. Throughput: 0: 895.4. Samples: 945210. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:40:49,200][00820] Avg episode reward: [(0, '4.454')] +[2023-02-26 16:40:54,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7806976. Throughput: 0: 902.8. Samples: 948430. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:40:54,201][00820] Avg episode reward: [(0, '4.589')] +[2023-02-26 16:40:55,563][19482] Updated weights for policy 0, policy_version 1908 (0.0017) +[2023-02-26 16:40:59,198][00820] Fps is (10 sec: 3686.3, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 7823360. Throughput: 0: 887.7. Samples: 954268. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:40:59,207][00820] Avg episode reward: [(0, '4.724')] +[2023-02-26 16:41:04,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3499.0). Total num frames: 7839744. Throughput: 0: 872.0. Samples: 958552. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:41:04,200][00820] Avg episode reward: [(0, '4.606')] +[2023-02-26 16:41:08,712][19482] Updated weights for policy 0, policy_version 1918 (0.0021) +[2023-02-26 16:41:09,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 7856128. Throughput: 0: 876.6. Samples: 960698. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:41:09,203][00820] Avg episode reward: [(0, '4.580')] +[2023-02-26 16:41:14,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 7880704. Throughput: 0: 909.9. Samples: 967354. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:41:14,205][00820] Avg episode reward: [(0, '4.476')] +[2023-02-26 16:41:18,706][19482] Updated weights for policy 0, policy_version 1928 (0.0022) +[2023-02-26 16:41:19,203][00820] Fps is (10 sec: 4093.7, 60 sec: 3617.8, 300 sec: 3540.5). Total num frames: 7897088. Throughput: 0: 891.9. Samples: 973142. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:41:19,212][00820] Avg episode reward: [(0, '4.423')] +[2023-02-26 16:41:24,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3618.2, 300 sec: 3526.7). Total num frames: 7909376. Throughput: 0: 875.3. Samples: 975174. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:41:24,200][00820] Avg episode reward: [(0, '4.528')] +[2023-02-26 16:41:29,198][00820] Fps is (10 sec: 2868.7, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 7925760. Throughput: 0: 880.0. Samples: 979600. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:41:29,205][00820] Avg episode reward: [(0, '4.767')] +[2023-02-26 16:41:31,331][19482] Updated weights for policy 0, policy_version 1938 (0.0021) +[2023-02-26 16:41:34,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 7950336. Throughput: 0: 909.7. Samples: 986146. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:41:34,206][00820] Avg episode reward: [(0, '5.109')] +[2023-02-26 16:41:39,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 7966720. Throughput: 0: 912.8. Samples: 989504. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:41:39,205][00820] Avg episode reward: [(0, '5.125')] +[2023-02-26 16:41:42,367][19482] Updated weights for policy 0, policy_version 1948 (0.0021) +[2023-02-26 16:41:44,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 7983104. Throughput: 0: 879.9. Samples: 993862. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:41:44,202][00820] Avg episode reward: [(0, '4.978')] +[2023-02-26 16:41:49,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 7999488. Throughput: 0: 890.2. Samples: 998610. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:41:49,200][00820] Avg episode reward: [(0, '4.709')] +[2023-02-26 16:41:53,776][19482] Updated weights for policy 0, policy_version 1958 (0.0017) +[2023-02-26 16:41:54,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 8019968. Throughput: 0: 913.7. Samples: 1001816. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:41:54,201][00820] Avg episode reward: [(0, '4.673')] +[2023-02-26 16:41:59,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3554.5). Total num frames: 8040448. Throughput: 0: 908.6. Samples: 1008240. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:41:59,201][00820] Avg episode reward: [(0, '4.771')] +[2023-02-26 16:42:04,205][00820] Fps is (10 sec: 3274.5, 60 sec: 3549.4, 300 sec: 3512.8). Total num frames: 8052736. Throughput: 0: 874.0. Samples: 1012474. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:42:04,208][00820] Avg episode reward: [(0, '4.593')] +[2023-02-26 16:42:06,092][19482] Updated weights for policy 0, policy_version 1968 (0.0017) +[2023-02-26 16:42:09,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 8069120. Throughput: 0: 875.5. Samples: 1014572. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:42:09,201][00820] Avg episode reward: [(0, '4.654')] +[2023-02-26 16:42:14,198][00820] Fps is (10 sec: 3689.1, 60 sec: 3481.6, 300 sec: 3554.5). Total num frames: 8089600. Throughput: 0: 910.8. Samples: 1020586. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:42:14,201][00820] Avg episode reward: [(0, '4.703')] +[2023-02-26 16:42:14,210][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001975_8089600.pth... +[2023-02-26 16:42:14,428][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001768_7241728.pth +[2023-02-26 16:42:16,438][19482] Updated weights for policy 0, policy_version 1978 (0.0014) +[2023-02-26 16:42:19,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3550.2, 300 sec: 3554.5). Total num frames: 8110080. Throughput: 0: 899.5. Samples: 1026624. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:42:19,208][00820] Avg episode reward: [(0, '4.694')] +[2023-02-26 16:42:24,198][00820] Fps is (10 sec: 3276.7, 60 sec: 3549.8, 300 sec: 3512.8). Total num frames: 8122368. Throughput: 0: 871.3. Samples: 1028712. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:42:24,201][00820] Avg episode reward: [(0, '4.566')] +[2023-02-26 16:42:29,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 8138752. Throughput: 0: 864.0. Samples: 1032744. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:42:29,205][00820] Avg episode reward: [(0, '4.550')] +[2023-02-26 16:42:29,645][19482] Updated weights for policy 0, policy_version 1988 (0.0017) +[2023-02-26 16:42:34,200][00820] Fps is (10 sec: 3685.7, 60 sec: 3481.5, 300 sec: 3540.6). Total num frames: 8159232. Throughput: 0: 898.0. Samples: 1039024. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:42:34,205][00820] Avg episode reward: [(0, '4.792')] +[2023-02-26 16:42:39,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 8179712. Throughput: 0: 900.7. Samples: 1042346. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:42:39,206][00820] Avg episode reward: [(0, '4.622')] +[2023-02-26 16:42:39,228][19482] Updated weights for policy 0, policy_version 1998 (0.0018) +[2023-02-26 16:42:44,198][00820] Fps is (10 sec: 3687.3, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 8196096. Throughput: 0: 866.9. Samples: 1047250. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:42:44,201][00820] Avg episode reward: [(0, '4.565')] +[2023-02-26 16:42:49,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 8208384. Throughput: 0: 865.0. Samples: 1051392. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:42:49,205][00820] Avg episode reward: [(0, '4.686')] +[2023-02-26 16:42:52,243][19482] Updated weights for policy 0, policy_version 2008 (0.0018) +[2023-02-26 16:42:54,198][00820] Fps is (10 sec: 3276.7, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 8228864. Throughput: 0: 886.8. Samples: 1054478. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:42:54,204][00820] Avg episode reward: [(0, '4.807')] +[2023-02-26 16:42:59,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 8253440. Throughput: 0: 898.6. Samples: 1061022. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:42:59,202][00820] Avg episode reward: [(0, '4.613')] +[2023-02-26 16:43:03,225][19482] Updated weights for policy 0, policy_version 2018 (0.0027) +[2023-02-26 16:43:04,198][00820] Fps is (10 sec: 3686.5, 60 sec: 3550.3, 300 sec: 3526.7). Total num frames: 8265728. Throughput: 0: 867.3. Samples: 1065652. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:43:04,204][00820] Avg episode reward: [(0, '4.596')] +[2023-02-26 16:43:09,198][00820] Fps is (10 sec: 2867.0, 60 sec: 3549.8, 300 sec: 3526.7). Total num frames: 8282112. Throughput: 0: 867.5. Samples: 1067750. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:43:09,207][00820] Avg episode reward: [(0, '4.659')] +[2023-02-26 16:43:14,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 8294400. Throughput: 0: 866.6. Samples: 1071740. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:43:14,202][00820] Avg episode reward: [(0, '4.584')] +[2023-02-26 16:43:18,380][19482] Updated weights for policy 0, policy_version 2028 (0.0015) +[2023-02-26 16:43:19,198][00820] Fps is (10 sec: 2457.7, 60 sec: 3276.8, 300 sec: 3512.8). Total num frames: 8306688. Throughput: 0: 819.3. Samples: 1075890. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:43:19,205][00820] Avg episode reward: [(0, '4.590')] +[2023-02-26 16:43:24,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3276.8, 300 sec: 3471.2). Total num frames: 8318976. Throughput: 0: 791.8. Samples: 1077976. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:43:24,201][00820] Avg episode reward: [(0, '4.639')] +[2023-02-26 16:43:29,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3471.2). Total num frames: 8335360. Throughput: 0: 772.8. Samples: 1082028. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:43:29,201][00820] Avg episode reward: [(0, '4.663')] +[2023-02-26 16:43:32,313][19482] Updated weights for policy 0, policy_version 2038 (0.0023) +[2023-02-26 16:43:34,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3208.7, 300 sec: 3485.1). Total num frames: 8351744. Throughput: 0: 800.4. Samples: 1087412. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:43:34,201][00820] Avg episode reward: [(0, '4.823')] +[2023-02-26 16:43:39,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3276.8, 300 sec: 3499.0). Total num frames: 8376320. Throughput: 0: 802.8. Samples: 1090602. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:43:39,200][00820] Avg episode reward: [(0, '4.760')] +[2023-02-26 16:43:42,248][19482] Updated weights for policy 0, policy_version 2048 (0.0012) +[2023-02-26 16:43:44,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3276.8, 300 sec: 3485.1). Total num frames: 8392704. Throughput: 0: 787.2. Samples: 1096446. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:43:44,200][00820] Avg episode reward: [(0, '4.633')] +[2023-02-26 16:43:49,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3471.2). Total num frames: 8404992. Throughput: 0: 776.7. Samples: 1100604. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:43:49,202][00820] Avg episode reward: [(0, '4.566')] +[2023-02-26 16:43:54,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3276.8, 300 sec: 3485.1). Total num frames: 8425472. Throughput: 0: 779.7. Samples: 1102834. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:43:54,201][00820] Avg episode reward: [(0, '4.589')] +[2023-02-26 16:43:55,035][19482] Updated weights for policy 0, policy_version 2058 (0.0017) +[2023-02-26 16:43:59,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3208.5, 300 sec: 3499.0). Total num frames: 8445952. Throughput: 0: 833.5. Samples: 1109246. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:43:59,205][00820] Avg episode reward: [(0, '4.839')] +[2023-02-26 16:44:04,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3485.1). Total num frames: 8462336. Throughput: 0: 863.5. Samples: 1114748. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:44:04,205][00820] Avg episode reward: [(0, '4.836')] +[2023-02-26 16:44:06,384][19482] Updated weights for policy 0, policy_version 2068 (0.0028) +[2023-02-26 16:44:09,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3471.2). Total num frames: 8478720. Throughput: 0: 864.4. Samples: 1116874. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:44:09,202][00820] Avg episode reward: [(0, '4.689')] +[2023-02-26 16:44:14,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3485.1). Total num frames: 8495104. Throughput: 0: 874.4. Samples: 1121374. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:44:14,200][00820] Avg episode reward: [(0, '4.489')] +[2023-02-26 16:44:14,218][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002074_8495104.pth... +[2023-02-26 16:44:14,365][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001872_7667712.pth +[2023-02-26 16:44:17,591][19482] Updated weights for policy 0, policy_version 2078 (0.0012) +[2023-02-26 16:44:19,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 8515584. Throughput: 0: 899.7. Samples: 1127898. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:44:19,201][00820] Avg episode reward: [(0, '4.466')] +[2023-02-26 16:44:24,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3499.0). Total num frames: 8536064. Throughput: 0: 901.4. Samples: 1131166. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:44:24,205][00820] Avg episode reward: [(0, '4.339')] +[2023-02-26 16:44:29,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 8548352. Throughput: 0: 862.1. Samples: 1135238. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:44:29,203][00820] Avg episode reward: [(0, '4.459')] +[2023-02-26 16:44:30,396][19482] Updated weights for policy 0, policy_version 2088 (0.0041) +[2023-02-26 16:44:34,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 8564736. Throughput: 0: 872.0. Samples: 1139844. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:44:34,203][00820] Avg episode reward: [(0, '4.579')] +[2023-02-26 16:44:39,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 8585216. Throughput: 0: 894.3. Samples: 1143078. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:44:39,204][00820] Avg episode reward: [(0, '4.777')] +[2023-02-26 16:44:40,554][19482] Updated weights for policy 0, policy_version 2098 (0.0014) +[2023-02-26 16:44:44,201][00820] Fps is (10 sec: 4094.9, 60 sec: 3549.7, 300 sec: 3498.9). Total num frames: 8605696. Throughput: 0: 899.1. Samples: 1149710. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:44:44,203][00820] Avg episode reward: [(0, '4.968')] +[2023-02-26 16:44:49,200][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 8617984. Throughput: 0: 871.6. Samples: 1153972. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:44:49,203][00820] Avg episode reward: [(0, '4.877')] +[2023-02-26 16:44:53,883][19482] Updated weights for policy 0, policy_version 2108 (0.0013) +[2023-02-26 16:44:54,198][00820] Fps is (10 sec: 2867.9, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 8634368. Throughput: 0: 869.4. Samples: 1155996. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:44:54,201][00820] Avg episode reward: [(0, '4.766')] +[2023-02-26 16:44:59,213][00820] Fps is (10 sec: 3680.8, 60 sec: 3480.7, 300 sec: 3498.8). Total num frames: 8654848. Throughput: 0: 897.1. Samples: 1161756. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:44:59,218][00820] Avg episode reward: [(0, '4.706')] +[2023-02-26 16:45:03,159][19482] Updated weights for policy 0, policy_version 2118 (0.0016) +[2023-02-26 16:45:04,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 8675328. Throughput: 0: 896.6. Samples: 1168244. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:45:04,205][00820] Avg episode reward: [(0, '4.861')] +[2023-02-26 16:45:09,198][00820] Fps is (10 sec: 3692.1, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 8691712. Throughput: 0: 868.5. Samples: 1170250. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:45:09,202][00820] Avg episode reward: [(0, '4.870')] +[2023-02-26 16:45:14,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 8704000. Throughput: 0: 865.7. Samples: 1174196. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:45:14,201][00820] Avg episode reward: [(0, '4.693')] +[2023-02-26 16:45:16,733][19482] Updated weights for policy 0, policy_version 2128 (0.0024) +[2023-02-26 16:45:19,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 8724480. Throughput: 0: 892.7. Samples: 1180016. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:45:19,201][00820] Avg episode reward: [(0, '4.589')] +[2023-02-26 16:45:24,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 8744960. Throughput: 0: 892.3. Samples: 1183232. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:45:24,200][00820] Avg episode reward: [(0, '4.718')] +[2023-02-26 16:45:27,688][19482] Updated weights for policy 0, policy_version 2138 (0.0012) +[2023-02-26 16:45:29,200][00820] Fps is (10 sec: 3685.7, 60 sec: 3549.8, 300 sec: 3471.2). Total num frames: 8761344. Throughput: 0: 859.7. Samples: 1188394. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-26 16:45:29,202][00820] Avg episode reward: [(0, '4.785')] +[2023-02-26 16:45:34,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3481.6, 300 sec: 3457.3). Total num frames: 8773632. Throughput: 0: 856.3. Samples: 1192506. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:45:34,201][00820] Avg episode reward: [(0, '4.530')] +[2023-02-26 16:45:39,198][00820] Fps is (10 sec: 3277.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 8794112. Throughput: 0: 876.2. Samples: 1195426. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:45:39,209][00820] Avg episode reward: [(0, '4.958')] +[2023-02-26 16:45:39,462][19482] Updated weights for policy 0, policy_version 2148 (0.0016) +[2023-02-26 16:45:44,198][00820] Fps is (10 sec: 4505.7, 60 sec: 3550.0, 300 sec: 3499.0). Total num frames: 8818688. Throughput: 0: 893.7. Samples: 1201960. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:45:44,201][00820] Avg episode reward: [(0, '5.159')] +[2023-02-26 16:45:49,201][00820] Fps is (10 sec: 3685.2, 60 sec: 3549.7, 300 sec: 3471.1). Total num frames: 8830976. Throughput: 0: 863.1. Samples: 1207088. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:45:49,206][00820] Avg episode reward: [(0, '5.016')] +[2023-02-26 16:45:50,882][19482] Updated weights for policy 0, policy_version 2158 (0.0012) +[2023-02-26 16:45:54,198][00820] Fps is (10 sec: 2867.1, 60 sec: 3549.8, 300 sec: 3471.2). Total num frames: 8847360. Throughput: 0: 866.2. Samples: 1209230. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:45:54,209][00820] Avg episode reward: [(0, '4.980')] +[2023-02-26 16:45:59,198][00820] Fps is (10 sec: 3277.8, 60 sec: 3482.5, 300 sec: 3471.2). Total num frames: 8863744. Throughput: 0: 889.3. Samples: 1214216. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:45:59,201][00820] Avg episode reward: [(0, '4.857')] +[2023-02-26 16:46:01,968][19482] Updated weights for policy 0, policy_version 2168 (0.0024) +[2023-02-26 16:46:04,198][00820] Fps is (10 sec: 4096.2, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 8888320. Throughput: 0: 906.8. Samples: 1220820. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:46:04,201][00820] Avg episode reward: [(0, '4.790')] +[2023-02-26 16:46:09,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3457.3). Total num frames: 8900608. Throughput: 0: 891.2. Samples: 1223338. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:46:09,204][00820] Avg episode reward: [(0, '4.751')] +[2023-02-26 16:46:14,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3457.4). Total num frames: 8916992. Throughput: 0: 865.1. Samples: 1227324. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:46:14,208][00820] Avg episode reward: [(0, '4.827')] +[2023-02-26 16:46:14,220][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002177_8916992.pth... +[2023-02-26 16:46:14,438][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001975_8089600.pth +[2023-02-26 16:46:15,749][19482] Updated weights for policy 0, policy_version 2178 (0.0014) +[2023-02-26 16:46:19,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3481.6, 300 sec: 3471.2). Total num frames: 8933376. Throughput: 0: 886.1. Samples: 1232380. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:46:19,201][00820] Avg episode reward: [(0, '4.684')] +[2023-02-26 16:46:24,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 8953856. Throughput: 0: 892.0. Samples: 1235566. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:46:24,201][00820] Avg episode reward: [(0, '4.653')] +[2023-02-26 16:46:25,166][19482] Updated weights for policy 0, policy_version 2188 (0.0014) +[2023-02-26 16:46:29,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.7, 300 sec: 3457.3). Total num frames: 8970240. Throughput: 0: 874.0. Samples: 1241292. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:46:29,201][00820] Avg episode reward: [(0, '4.662')] +[2023-02-26 16:46:34,198][00820] Fps is (10 sec: 3276.7, 60 sec: 3549.9, 300 sec: 3457.3). Total num frames: 8986624. Throughput: 0: 853.2. Samples: 1245480. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:46:34,207][00820] Avg episode reward: [(0, '4.587')] +[2023-02-26 16:46:38,547][19482] Updated weights for policy 0, policy_version 2198 (0.0019) +[2023-02-26 16:46:39,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3457.3). Total num frames: 9003008. Throughput: 0: 855.8. Samples: 1247742. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:46:39,200][00820] Avg episode reward: [(0, '4.876')] +[2023-02-26 16:46:44,198][00820] Fps is (10 sec: 4096.1, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 9027584. Throughput: 0: 892.9. Samples: 1254394. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:46:44,207][00820] Avg episode reward: [(0, '4.829')] +[2023-02-26 16:46:48,701][19482] Updated weights for policy 0, policy_version 2208 (0.0020) +[2023-02-26 16:46:49,198][00820] Fps is (10 sec: 4095.9, 60 sec: 3550.0, 300 sec: 3471.2). Total num frames: 9043968. Throughput: 0: 869.8. Samples: 1259960. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:46:49,201][00820] Avg episode reward: [(0, '4.746')] +[2023-02-26 16:46:54,202][00820] Fps is (10 sec: 2866.0, 60 sec: 3481.4, 300 sec: 3443.4). Total num frames: 9056256. Throughput: 0: 860.9. Samples: 1262084. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:46:54,207][00820] Avg episode reward: [(0, '4.862')] +[2023-02-26 16:46:59,198][00820] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3471.3). Total num frames: 9076736. Throughput: 0: 872.8. Samples: 1266602. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:46:59,201][00820] Avg episode reward: [(0, '4.924')] +[2023-02-26 16:47:00,807][19482] Updated weights for policy 0, policy_version 2218 (0.0012) +[2023-02-26 16:47:04,198][00820] Fps is (10 sec: 4097.7, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 9097216. Throughput: 0: 910.1. Samples: 1273336. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:47:04,201][00820] Avg episode reward: [(0, '4.821')] +[2023-02-26 16:47:09,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 9113600. Throughput: 0: 913.1. Samples: 1276654. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:47:09,204][00820] Avg episode reward: [(0, '4.654')] +[2023-02-26 16:47:12,465][19482] Updated weights for policy 0, policy_version 2228 (0.0017) +[2023-02-26 16:47:14,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3457.3). Total num frames: 9129984. Throughput: 0: 875.6. Samples: 1280696. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:47:14,206][00820] Avg episode reward: [(0, '4.333')] +[2023-02-26 16:47:19,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 9146368. Throughput: 0: 893.0. Samples: 1285664. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:47:19,201][00820] Avg episode reward: [(0, '4.504')] +[2023-02-26 16:47:23,261][19482] Updated weights for policy 0, policy_version 2238 (0.0014) +[2023-02-26 16:47:24,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3499.0). Total num frames: 9170944. Throughput: 0: 916.0. Samples: 1288962. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:47:24,201][00820] Avg episode reward: [(0, '4.614')] +[2023-02-26 16:47:29,199][00820] Fps is (10 sec: 4095.4, 60 sec: 3618.0, 300 sec: 3485.1). Total num frames: 9187328. Throughput: 0: 907.7. Samples: 1295242. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:47:29,202][00820] Avg episode reward: [(0, '4.619')] +[2023-02-26 16:47:34,199][00820] Fps is (10 sec: 2866.8, 60 sec: 3549.8, 300 sec: 3457.3). Total num frames: 9199616. Throughput: 0: 875.6. Samples: 1299364. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:47:34,206][00820] Avg episode reward: [(0, '4.623')] +[2023-02-26 16:47:35,904][19482] Updated weights for policy 0, policy_version 2248 (0.0012) +[2023-02-26 16:47:39,198][00820] Fps is (10 sec: 3277.3, 60 sec: 3618.1, 300 sec: 3471.2). Total num frames: 9220096. Throughput: 0: 875.7. Samples: 1301486. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:47:39,206][00820] Avg episode reward: [(0, '4.592')] +[2023-02-26 16:47:44,198][00820] Fps is (10 sec: 4096.5, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 9240576. Throughput: 0: 918.0. Samples: 1307910. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:47:44,201][00820] Avg episode reward: [(0, '4.413')] +[2023-02-26 16:47:45,604][19482] Updated weights for policy 0, policy_version 2258 (0.0028) +[2023-02-26 16:47:49,199][00820] Fps is (10 sec: 4095.6, 60 sec: 3618.1, 300 sec: 3498.9). Total num frames: 9261056. Throughput: 0: 905.6. Samples: 1314088. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:47:49,205][00820] Avg episode reward: [(0, '4.329')] +[2023-02-26 16:47:54,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.4, 300 sec: 3457.3). Total num frames: 9273344. Throughput: 0: 878.9. Samples: 1316206. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:47:54,201][00820] Avg episode reward: [(0, '4.498')] +[2023-02-26 16:47:58,847][19482] Updated weights for policy 0, policy_version 2268 (0.0012) +[2023-02-26 16:47:59,198][00820] Fps is (10 sec: 2867.4, 60 sec: 3549.9, 300 sec: 3471.2). Total num frames: 9289728. Throughput: 0: 881.7. Samples: 1320374. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:47:59,200][00820] Avg episode reward: [(0, '4.722')] +[2023-02-26 16:48:04,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3485.1). Total num frames: 9310208. Throughput: 0: 912.1. Samples: 1326710. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:48:04,201][00820] Avg episode reward: [(0, '4.631')] +[2023-02-26 16:48:08,131][19482] Updated weights for policy 0, policy_version 2278 (0.0012) +[2023-02-26 16:48:09,202][00820] Fps is (10 sec: 4094.2, 60 sec: 3617.9, 300 sec: 3512.8). Total num frames: 9330688. Throughput: 0: 912.1. Samples: 1330010. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:48:09,205][00820] Avg episode reward: [(0, '4.737')] +[2023-02-26 16:48:14,198][00820] Fps is (10 sec: 3686.3, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 9347072. Throughput: 0: 878.6. Samples: 1334780. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:48:14,206][00820] Avg episode reward: [(0, '4.730')] +[2023-02-26 16:48:14,222][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002282_9347072.pth... +[2023-02-26 16:48:14,401][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002074_8495104.pth +[2023-02-26 16:48:19,198][00820] Fps is (10 sec: 2868.5, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 9359360. Throughput: 0: 878.9. Samples: 1338912. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:48:19,201][00820] Avg episode reward: [(0, '4.645')] +[2023-02-26 16:48:21,434][19482] Updated weights for policy 0, policy_version 2288 (0.0012) +[2023-02-26 16:48:24,198][00820] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 9383936. Throughput: 0: 903.5. Samples: 1342144. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:48:24,201][00820] Avg episode reward: [(0, '4.656')] +[2023-02-26 16:48:29,198][00820] Fps is (10 sec: 4505.4, 60 sec: 3618.2, 300 sec: 3568.4). Total num frames: 9404416. Throughput: 0: 908.0. Samples: 1348772. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:48:29,201][00820] Avg episode reward: [(0, '4.741')] +[2023-02-26 16:48:31,991][19482] Updated weights for policy 0, policy_version 2298 (0.0014) +[2023-02-26 16:48:34,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3526.7). Total num frames: 9416704. Throughput: 0: 870.4. Samples: 1353256. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:48:34,203][00820] Avg episode reward: [(0, '4.696')] +[2023-02-26 16:48:39,198][00820] Fps is (10 sec: 2867.3, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 9433088. Throughput: 0: 870.9. Samples: 1355396. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:48:39,207][00820] Avg episode reward: [(0, '4.601')] +[2023-02-26 16:48:43,735][19482] Updated weights for policy 0, policy_version 2308 (0.0025) +[2023-02-26 16:48:44,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 9453568. Throughput: 0: 905.1. Samples: 1361102. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:48:44,205][00820] Avg episode reward: [(0, '4.497')] +[2023-02-26 16:48:49,204][00820] Fps is (10 sec: 4093.7, 60 sec: 3549.6, 300 sec: 3554.4). Total num frames: 9474048. Throughput: 0: 911.0. Samples: 1367708. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:48:49,210][00820] Avg episode reward: [(0, '4.451')] +[2023-02-26 16:48:54,199][00820] Fps is (10 sec: 3276.4, 60 sec: 3549.8, 300 sec: 3526.7). Total num frames: 9486336. Throughput: 0: 887.1. Samples: 1369926. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:48:54,211][00820] Avg episode reward: [(0, '4.547')] +[2023-02-26 16:48:55,621][19482] Updated weights for policy 0, policy_version 2318 (0.0015) +[2023-02-26 16:48:59,198][00820] Fps is (10 sec: 2868.8, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 9502720. Throughput: 0: 874.3. Samples: 1374124. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:48:59,203][00820] Avg episode reward: [(0, '4.735')] +[2023-02-26 16:49:04,198][00820] Fps is (10 sec: 3686.9, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 9523200. Throughput: 0: 912.3. Samples: 1379964. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:49:04,205][00820] Avg episode reward: [(0, '4.906')] +[2023-02-26 16:49:06,191][19482] Updated weights for policy 0, policy_version 2328 (0.0017) +[2023-02-26 16:49:09,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3618.4, 300 sec: 3568.4). Total num frames: 9547776. Throughput: 0: 912.8. Samples: 1383218. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:49:09,200][00820] Avg episode reward: [(0, '4.620')] +[2023-02-26 16:49:14,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 9560064. Throughput: 0: 883.7. Samples: 1388540. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:49:14,203][00820] Avg episode reward: [(0, '4.517')] +[2023-02-26 16:49:19,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 9572352. Throughput: 0: 872.5. Samples: 1392520. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:49:19,203][00820] Avg episode reward: [(0, '4.610')] +[2023-02-26 16:49:19,337][19482] Updated weights for policy 0, policy_version 2338 (0.0027) +[2023-02-26 16:49:24,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 9592832. Throughput: 0: 887.0. Samples: 1395312. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-02-26 16:49:24,200][00820] Avg episode reward: [(0, '4.650')] +[2023-02-26 16:49:28,925][19482] Updated weights for policy 0, policy_version 2348 (0.0017) +[2023-02-26 16:49:29,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 9617408. Throughput: 0: 908.1. Samples: 1401966. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:49:29,200][00820] Avg episode reward: [(0, '4.535')] +[2023-02-26 16:49:34,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 9633792. Throughput: 0: 878.7. Samples: 1407246. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:49:34,203][00820] Avg episode reward: [(0, '4.644')] +[2023-02-26 16:49:39,198][00820] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3526.8). Total num frames: 9646080. Throughput: 0: 876.2. Samples: 1409352. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:49:39,202][00820] Avg episode reward: [(0, '4.604')] +[2023-02-26 16:49:41,864][19482] Updated weights for policy 0, policy_version 2358 (0.0012) +[2023-02-26 16:49:44,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 9666560. Throughput: 0: 898.2. Samples: 1414542. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:49:44,200][00820] Avg episode reward: [(0, '4.536')] +[2023-02-26 16:49:49,198][00820] Fps is (10 sec: 4505.6, 60 sec: 3618.5, 300 sec: 3582.3). Total num frames: 9691136. Throughput: 0: 918.2. Samples: 1421284. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:49:49,200][00820] Avg episode reward: [(0, '4.754')] +[2023-02-26 16:49:51,297][19482] Updated weights for policy 0, policy_version 2368 (0.0018) +[2023-02-26 16:49:54,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3686.5, 300 sec: 3568.6). Total num frames: 9707520. Throughput: 0: 910.1. Samples: 1424172. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:49:54,201][00820] Avg episode reward: [(0, '4.959')] +[2023-02-26 16:49:59,199][00820] Fps is (10 sec: 2866.8, 60 sec: 3618.0, 300 sec: 3540.6). Total num frames: 9719808. Throughput: 0: 887.5. Samples: 1428478. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-26 16:49:59,207][00820] Avg episode reward: [(0, '4.842')] +[2023-02-26 16:50:04,101][19482] Updated weights for policy 0, policy_version 2378 (0.0030) +[2023-02-26 16:50:04,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 9740288. Throughput: 0: 912.4. Samples: 1433580. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-02-26 16:50:04,204][00820] Avg episode reward: [(0, '5.044')] +[2023-02-26 16:50:09,198][00820] Fps is (10 sec: 4096.6, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 9760768. Throughput: 0: 927.0. Samples: 1437026. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:50:09,207][00820] Avg episode reward: [(0, '4.628')] +[2023-02-26 16:50:14,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 9773056. Throughput: 0: 892.8. Samples: 1442140. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:50:14,205][00820] Avg episode reward: [(0, '4.516')] +[2023-02-26 16:50:14,219][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002386_9773056.pth... +[2023-02-26 16:50:14,503][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002177_8916992.pth +[2023-02-26 16:50:16,626][19482] Updated weights for policy 0, policy_version 2388 (0.0017) +[2023-02-26 16:50:19,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 9785344. Throughput: 0: 849.6. Samples: 1445480. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:50:19,201][00820] Avg episode reward: [(0, '4.424')] +[2023-02-26 16:50:24,198][00820] Fps is (10 sec: 2457.6, 60 sec: 3413.3, 300 sec: 3512.9). Total num frames: 9797632. Throughput: 0: 841.9. Samples: 1447238. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:50:24,201][00820] Avg episode reward: [(0, '4.622')] +[2023-02-26 16:50:29,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 9818112. Throughput: 0: 829.5. Samples: 1451868. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:50:29,205][00820] Avg episode reward: [(0, '4.781')] +[2023-02-26 16:50:30,028][19482] Updated weights for policy 0, policy_version 2398 (0.0029) +[2023-02-26 16:50:34,198][00820] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3540.6). Total num frames: 9838592. Throughput: 0: 828.8. Samples: 1458582. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:50:34,201][00820] Avg episode reward: [(0, '4.761')] +[2023-02-26 16:50:39,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 9854976. Throughput: 0: 832.8. Samples: 1461650. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-02-26 16:50:39,203][00820] Avg episode reward: [(0, '4.869')] +[2023-02-26 16:50:41,113][19482] Updated weights for policy 0, policy_version 2408 (0.0012) +[2023-02-26 16:50:44,199][00820] Fps is (10 sec: 3276.4, 60 sec: 3413.3, 300 sec: 3526.8). Total num frames: 9871360. Throughput: 0: 830.8. Samples: 1465864. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:50:44,203][00820] Avg episode reward: [(0, '5.038')] +[2023-02-26 16:50:49,198][00820] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3526.7). Total num frames: 9887744. Throughput: 0: 832.7. Samples: 1471052. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-02-26 16:50:49,201][00820] Avg episode reward: [(0, '4.941')] +[2023-02-26 16:50:52,308][19482] Updated weights for policy 0, policy_version 2418 (0.0018) +[2023-02-26 16:50:54,198][00820] Fps is (10 sec: 4096.4, 60 sec: 3413.3, 300 sec: 3554.5). Total num frames: 9912320. Throughput: 0: 829.4. Samples: 1474348. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-02-26 16:50:54,200][00820] Avg episode reward: [(0, '4.877')] +[2023-02-26 16:50:59,199][00820] Fps is (10 sec: 4095.6, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 9928704. Throughput: 0: 849.9. Samples: 1480388. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:50:59,202][00820] Avg episode reward: [(0, '4.778')] +[2023-02-26 16:51:04,199][00820] Fps is (10 sec: 2866.9, 60 sec: 3345.0, 300 sec: 3526.7). Total num frames: 9940992. Throughput: 0: 866.5. Samples: 1484472. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:51:04,204][00820] Avg episode reward: [(0, '4.887')] +[2023-02-26 16:51:04,665][19482] Updated weights for policy 0, policy_version 2428 (0.0013) +[2023-02-26 16:51:09,198][00820] Fps is (10 sec: 3277.1, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 9961472. Throughput: 0: 873.4. Samples: 1486542. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-02-26 16:51:09,201][00820] Avg episode reward: [(0, '4.993')] +[2023-02-26 16:51:14,198][00820] Fps is (10 sec: 4096.5, 60 sec: 3481.6, 300 sec: 3554.5). Total num frames: 9981952. Throughput: 0: 917.0. Samples: 1493134. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-02-26 16:51:14,203][00820] Avg episode reward: [(0, '5.221')] +[2023-02-26 16:51:14,852][19482] Updated weights for policy 0, policy_version 2438 (0.0014) +[2023-02-26 16:51:19,198][00820] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 9998336. Throughput: 0: 895.8. Samples: 1498892. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-02-26 16:51:19,200][00820] Avg episode reward: [(0, '5.189')] +[2023-02-26 16:51:21,006][19466] Stopping Batcher_0... +[2023-02-26 16:51:21,007][19466] Loop batcher_evt_loop terminating... +[2023-02-26 16:51:21,007][00820] Component Batcher_0 stopped! +[2023-02-26 16:51:21,012][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth... +[2023-02-26 16:51:21,117][19482] Weights refcount: 2 0 +[2023-02-26 16:51:21,124][00820] Component InferenceWorker_p0-w0 stopped! +[2023-02-26 16:51:21,120][19482] Stopping InferenceWorker_p0-w0... +[2023-02-26 16:51:21,134][19482] Loop inference_proc0-0_evt_loop terminating... +[2023-02-26 16:51:21,163][19466] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002282_9347072.pth +[2023-02-26 16:51:21,172][19466] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth... +[2023-02-26 16:51:21,194][00820] Component RolloutWorker_w3 stopped! +[2023-02-26 16:51:21,196][19490] Stopping RolloutWorker_w3... +[2023-02-26 16:51:21,197][19490] Loop rollout_proc3_evt_loop terminating... +[2023-02-26 16:51:21,211][00820] Component RolloutWorker_w7 stopped! +[2023-02-26 16:51:21,213][19503] Stopping RolloutWorker_w7... +[2023-02-26 16:51:21,215][00820] Component RolloutWorker_w5 stopped! +[2023-02-26 16:51:21,217][19497] Stopping RolloutWorker_w5... +[2023-02-26 16:51:21,218][19497] Loop rollout_proc5_evt_loop terminating... +[2023-02-26 16:51:21,214][19503] Loop rollout_proc7_evt_loop terminating... +[2023-02-26 16:51:21,237][19480] Stopping RolloutWorker_w0... +[2023-02-26 16:51:21,238][19480] Loop rollout_proc0_evt_loop terminating... +[2023-02-26 16:51:21,252][00820] Component RolloutWorker_w0 stopped! +[2023-02-26 16:51:21,259][00820] Component RolloutWorker_w1 stopped! +[2023-02-26 16:51:21,261][19481] Stopping RolloutWorker_w1... +[2023-02-26 16:51:21,262][19481] Loop rollout_proc1_evt_loop terminating... +[2023-02-26 16:51:21,293][19495] Stopping RolloutWorker_w6... +[2023-02-26 16:51:21,293][19495] Loop rollout_proc6_evt_loop terminating... +[2023-02-26 16:51:21,293][00820] Component RolloutWorker_w6 stopped! +[2023-02-26 16:51:21,299][00820] Component RolloutWorker_w2 stopped! +[2023-02-26 16:51:21,300][19488] Stopping RolloutWorker_w2... +[2023-02-26 16:51:21,306][19488] Loop rollout_proc2_evt_loop terminating... +[2023-02-26 16:51:21,353][19493] Stopping RolloutWorker_w4... +[2023-02-26 16:51:21,350][00820] Component RolloutWorker_w4 stopped! +[2023-02-26 16:51:21,354][19493] Loop rollout_proc4_evt_loop terminating... +[2023-02-26 16:51:21,540][19466] Stopping LearnerWorker_p0... +[2023-02-26 16:51:21,541][19466] Loop learner_proc0_evt_loop terminating... +[2023-02-26 16:51:21,540][00820] Component LearnerWorker_p0 stopped! +[2023-02-26 16:51:21,543][00820] Waiting for process learner_proc0 to stop... +[2023-02-26 16:51:24,670][00820] Waiting for process inference_proc0-0 to join... +[2023-02-26 16:51:24,799][00820] Waiting for process rollout_proc0 to join... +[2023-02-26 16:51:25,244][00820] Waiting for process rollout_proc1 to join... +[2023-02-26 16:51:25,245][00820] Waiting for process rollout_proc2 to join... +[2023-02-26 16:51:25,253][00820] Waiting for process rollout_proc3 to join... +[2023-02-26 16:51:25,254][00820] Waiting for process rollout_proc4 to join... +[2023-02-26 16:51:25,255][00820] Waiting for process rollout_proc5 to join... +[2023-02-26 16:51:25,256][00820] Waiting for process rollout_proc6 to join... +[2023-02-26 16:51:25,261][00820] Waiting for process rollout_proc7 to join... +[2023-02-26 16:51:25,262][00820] Batcher 0 profile tree view: +batching: 39.8190, releasing_batches: 0.0350 +[2023-02-26 16:51:25,263][00820] InferenceWorker_p0-w0 profile tree view: +wait_policy: 0.0000 + wait_policy_total: 839.7376 +update_model: 11.2331 + weight_update: 0.0026 +one_step: 0.0190 + handle_policy_step: 811.9186 + deserialize: 23.0237, stack: 4.6509, obs_to_device_normalize: 175.8804, forward: 393.0427, send_messages: 39.0279 + prepare_outputs: 135.6572 + to_cpu: 85.2689 +[2023-02-26 16:51:25,264][00820] Learner 0 profile tree view: +misc: 0.0101, prepare_batch: 22.9651 +train: 122.7619 + epoch_init: 0.0139, minibatch_init: 0.0153, losses_postprocess: 0.9202, kl_divergence: 0.8940, after_optimizer: 4.8352 + calculate_losses: 41.9998 + losses_init: 0.0252, forward_head: 2.6810, bptt_initial: 27.6608, tail: 1.7127, advantages_returns: 0.4869, losses: 5.3503 + bptt: 3.5297 + bptt_forward_core: 3.3478 + update: 73.0317 + clip: 2.1132 +[2023-02-26 16:51:25,266][00820] RolloutWorker_w0 profile tree view: +wait_for_trajectories: 0.5288, enqueue_policy_requests: 230.8253, env_step: 1300.6767, overhead: 33.6267, complete_rollouts: 10.7723 +save_policy_outputs: 32.5288 + split_output_tensors: 15.9167 +[2023-02-26 16:51:25,267][00820] RolloutWorker_w7 profile tree view: +wait_for_trajectories: 0.4977, enqueue_policy_requests: 230.6601, env_step: 1303.9735, overhead: 33.3603, complete_rollouts: 11.2789 +save_policy_outputs: 32.9736 + split_output_tensors: 16.2617 +[2023-02-26 16:51:25,268][00820] Loop Runner_EvtLoop terminating... +[2023-02-26 16:51:25,270][00820] Runner profile tree view: +main_loop: 1749.5453 +[2023-02-26 16:51:25,271][00820] Collected {0: 10006528}, FPS: 3429.8 +[2023-02-26 16:51:25,313][00820] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-02-26 16:51:25,315][00820] Overriding arg 'num_workers' with value 1 passed from command line +[2023-02-26 16:51:25,316][00820] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-02-26 16:51:25,317][00820] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-02-26 16:51:25,319][00820] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-02-26 16:51:25,320][00820] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-02-26 16:51:25,321][00820] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file! +[2023-02-26 16:51:25,322][00820] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-02-26 16:51:25,323][00820] Adding new argument 'push_to_hub'=False that is not in the saved config file! +[2023-02-26 16:51:25,324][00820] Adding new argument 'hf_repository'=None that is not in the saved config file! +[2023-02-26 16:51:25,325][00820] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-02-26 16:51:25,326][00820] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-02-26 16:51:25,327][00820] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-02-26 16:51:25,329][00820] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-02-26 16:51:25,330][00820] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-02-26 16:51:25,359][00820] RunningMeanStd input shape: (3, 72, 128) +[2023-02-26 16:51:25,361][00820] RunningMeanStd input shape: (1,) +[2023-02-26 16:51:25,379][00820] ConvEncoder: input_channels=3 +[2023-02-26 16:51:25,430][00820] Conv encoder output size: 512 +[2023-02-26 16:51:25,432][00820] Policy head output size: 512 +[2023-02-26 16:51:25,459][00820] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth... +[2023-02-26 16:51:26,275][00820] Num frames 100... +[2023-02-26 16:51:26,387][00820] Num frames 200... +[2023-02-26 16:51:26,503][00820] Num frames 300... +[2023-02-26 16:51:26,652][00820] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840 +[2023-02-26 16:51:26,656][00820] Avg episode reward: 3.840, avg true_objective: 3.840 +[2023-02-26 16:51:26,680][00820] Num frames 400... +[2023-02-26 16:51:26,802][00820] Num frames 500... +[2023-02-26 16:51:26,912][00820] Num frames 600... +[2023-02-26 16:51:27,018][00820] Avg episode rewards: #0: 3.215, true rewards: #0: 3.215 +[2023-02-26 16:51:27,020][00820] Avg episode reward: 3.215, avg true_objective: 3.215 +[2023-02-26 16:51:27,097][00820] Num frames 700... +[2023-02-26 16:51:27,211][00820] Num frames 800... +[2023-02-26 16:51:27,332][00820] Num frames 900... +[2023-02-26 16:51:27,445][00820] Num frames 1000... +[2023-02-26 16:51:27,560][00820] Num frames 1100... +[2023-02-26 16:51:27,687][00820] Num frames 1200... +[2023-02-26 16:51:27,836][00820] Avg episode rewards: #0: 5.277, true rewards: #0: 4.277 +[2023-02-26 16:51:27,839][00820] Avg episode reward: 5.277, avg true_objective: 4.277 +[2023-02-26 16:51:27,862][00820] Num frames 1300... +[2023-02-26 16:51:27,975][00820] Num frames 1400... +[2023-02-26 16:51:28,099][00820] Num frames 1500... +[2023-02-26 16:51:28,213][00820] Num frames 1600... +[2023-02-26 16:51:28,329][00820] Num frames 1700... +[2023-02-26 16:51:28,445][00820] Num frames 1800... +[2023-02-26 16:51:28,613][00820] Avg episode rewards: #0: 6.228, true rewards: #0: 4.727 +[2023-02-26 16:51:28,615][00820] Avg episode reward: 6.228, avg true_objective: 4.727 +[2023-02-26 16:51:28,630][00820] Num frames 1900... +[2023-02-26 16:51:28,744][00820] Num frames 2000... +[2023-02-26 16:51:28,863][00820] Num frames 2100... +[2023-02-26 16:51:28,978][00820] Num frames 2200... +[2023-02-26 16:51:29,090][00820] Num frames 2300... +[2023-02-26 16:51:29,194][00820] Avg episode rewards: #0: 6.078, true rewards: #0: 4.678 +[2023-02-26 16:51:29,195][00820] Avg episode reward: 6.078, avg true_objective: 4.678 +[2023-02-26 16:51:29,271][00820] Num frames 2400... +[2023-02-26 16:51:29,384][00820] Num frames 2500... +[2023-02-26 16:51:29,497][00820] Num frames 2600... +[2023-02-26 16:51:29,610][00820] Num frames 2700... +[2023-02-26 16:51:29,692][00820] Avg episode rewards: #0: 5.705, true rewards: #0: 4.538 +[2023-02-26 16:51:29,694][00820] Avg episode reward: 5.705, avg true_objective: 4.538 +[2023-02-26 16:51:29,791][00820] Num frames 2800... +[2023-02-26 16:51:29,904][00820] Num frames 2900... +[2023-02-26 16:51:30,015][00820] Num frames 3000... +[2023-02-26 16:51:30,127][00820] Num frames 3100... +[2023-02-26 16:51:30,195][00820] Avg episode rewards: #0: 5.439, true rewards: #0: 4.439 +[2023-02-26 16:51:30,197][00820] Avg episode reward: 5.439, avg true_objective: 4.439 +[2023-02-26 16:51:30,310][00820] Num frames 3200... +[2023-02-26 16:51:30,426][00820] Num frames 3300... +[2023-02-26 16:51:30,546][00820] Num frames 3400... +[2023-02-26 16:51:30,669][00820] Avg episode rewards: #0: 5.324, true rewards: #0: 4.324 +[2023-02-26 16:51:30,670][00820] Avg episode reward: 5.324, avg true_objective: 4.324 +[2023-02-26 16:51:30,722][00820] Num frames 3500... +[2023-02-26 16:51:30,834][00820] Num frames 3600... +[2023-02-26 16:51:30,944][00820] Num frames 3700... +[2023-02-26 16:51:31,059][00820] Num frames 3800... +[2023-02-26 16:51:31,171][00820] Avg episode rewards: #0: 5.159, true rewards: #0: 4.270 +[2023-02-26 16:51:31,172][00820] Avg episode reward: 5.159, avg true_objective: 4.270 +[2023-02-26 16:51:31,241][00820] Num frames 3900... +[2023-02-26 16:51:31,359][00820] Num frames 4000... +[2023-02-26 16:51:31,475][00820] Num frames 4100... +[2023-02-26 16:51:31,604][00820] Num frames 4200... +[2023-02-26 16:51:31,761][00820] Avg episode rewards: #0: 5.191, true rewards: #0: 4.291 +[2023-02-26 16:51:31,763][00820] Avg episode reward: 5.191, avg true_objective: 4.291 +[2023-02-26 16:51:57,077][00820] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2023-02-26 16:53:56,572][00820] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-02-26 16:53:56,576][00820] Overriding arg 'num_workers' with value 1 passed from command line +[2023-02-26 16:53:56,579][00820] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-02-26 16:53:56,582][00820] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-02-26 16:53:56,585][00820] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-02-26 16:53:56,587][00820] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-02-26 16:53:56,589][00820] Adding new argument 'max_num_frames'=100000 that is not in the saved config file! +[2023-02-26 16:53:56,590][00820] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-02-26 16:53:56,592][00820] Adding new argument 'push_to_hub'=True that is not in the saved config file! +[2023-02-26 16:53:56,593][00820] Adding new argument 'hf_repository'='mlewand/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file! +[2023-02-26 16:53:56,595][00820] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-02-26 16:53:56,596][00820] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-02-26 16:53:56,597][00820] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-02-26 16:53:56,599][00820] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-02-26 16:53:56,601][00820] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-02-26 16:53:56,633][00820] RunningMeanStd input shape: (3, 72, 128) +[2023-02-26 16:53:56,634][00820] RunningMeanStd input shape: (1,) +[2023-02-26 16:53:56,656][00820] ConvEncoder: input_channels=3 +[2023-02-26 16:53:56,713][00820] Conv encoder output size: 512 +[2023-02-26 16:53:56,716][00820] Policy head output size: 512 +[2023-02-26 16:53:56,743][00820] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth... +[2023-02-26 16:53:57,398][00820] Num frames 100... +[2023-02-26 16:53:57,515][00820] Num frames 200... +[2023-02-26 16:53:57,626][00820] Num frames 300... +[2023-02-26 16:53:57,742][00820] Num frames 400... +[2023-02-26 16:53:57,850][00820] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480 +[2023-02-26 16:53:57,855][00820] Avg episode reward: 5.480, avg true_objective: 4.480 +[2023-02-26 16:53:57,917][00820] Num frames 500... +[2023-02-26 16:53:58,030][00820] Num frames 600... +[2023-02-26 16:53:58,143][00820] Num frames 700... +[2023-02-26 16:53:58,263][00820] Num frames 800... +[2023-02-26 16:53:58,357][00820] Avg episode rewards: #0: 4.660, true rewards: #0: 4.160 +[2023-02-26 16:53:58,359][00820] Avg episode reward: 4.660, avg true_objective: 4.160 +[2023-02-26 16:53:58,445][00820] Num frames 900... +[2023-02-26 16:53:58,564][00820] Num frames 1000... +[2023-02-26 16:53:58,674][00820] Num frames 1100... +[2023-02-26 16:53:58,784][00820] Avg episode rewards: #0: 4.170, true rewards: #0: 3.837 +[2023-02-26 16:53:58,786][00820] Avg episode reward: 4.170, avg true_objective: 3.837 +[2023-02-26 16:53:58,848][00820] Num frames 1200... +[2023-02-26 16:53:58,972][00820] Num frames 1300... +[2023-02-26 16:53:59,084][00820] Num frames 1400... +[2023-02-26 16:53:59,221][00820] Avg episode rewards: #0: 4.428, true rewards: #0: 3.677 +[2023-02-26 16:53:59,227][00820] Avg episode reward: 4.428, avg true_objective: 3.677 +[2023-02-26 16:53:59,264][00820] Num frames 1500... +[2023-02-26 16:53:59,379][00820] Num frames 1600... +[2023-02-26 16:53:59,529][00820] Num frames 1700... +[2023-02-26 16:53:59,621][00820] Avg episode rewards: #0: 4.054, true rewards: #0: 3.454 +[2023-02-26 16:53:59,623][00820] Avg episode reward: 4.054, avg true_objective: 3.454 +[2023-02-26 16:53:59,707][00820] Num frames 1800... +[2023-02-26 16:53:59,819][00820] Num frames 1900... +[2023-02-26 16:53:59,934][00820] Num frames 2000... +[2023-02-26 16:54:00,057][00820] Num frames 2100... +[2023-02-26 16:54:00,200][00820] Avg episode rewards: #0: 4.292, true rewards: #0: 3.625 +[2023-02-26 16:54:00,202][00820] Avg episode reward: 4.292, avg true_objective: 3.625 +[2023-02-26 16:54:00,237][00820] Num frames 2200... +[2023-02-26 16:54:00,360][00820] Num frames 2300... +[2023-02-26 16:54:00,483][00820] Num frames 2400... +[2023-02-26 16:54:00,600][00820] Num frames 2500... +[2023-02-26 16:54:00,725][00820] Avg episode rewards: #0: 4.227, true rewards: #0: 3.656 +[2023-02-26 16:54:00,726][00820] Avg episode reward: 4.227, avg true_objective: 3.656 +[2023-02-26 16:54:00,779][00820] Num frames 2600... +[2023-02-26 16:54:00,891][00820] Num frames 2700... +[2023-02-26 16:54:01,003][00820] Num frames 2800... +[2023-02-26 16:54:01,113][00820] Num frames 2900... +[2023-02-26 16:54:01,260][00820] Avg episode rewards: #0: 4.219, true rewards: #0: 3.719 +[2023-02-26 16:54:01,261][00820] Avg episode reward: 4.219, avg true_objective: 3.719 +[2023-02-26 16:54:01,296][00820] Num frames 3000... +[2023-02-26 16:54:01,416][00820] Num frames 3100... +[2023-02-26 16:54:01,539][00820] Num frames 3200... +[2023-02-26 16:54:01,629][00820] Avg episode rewards: #0: 4.034, true rewards: #0: 3.590 +[2023-02-26 16:54:01,631][00820] Avg episode reward: 4.034, avg true_objective: 3.590 +[2023-02-26 16:54:01,714][00820] Num frames 3300... +[2023-02-26 16:54:01,823][00820] Num frames 3400... +[2023-02-26 16:54:01,942][00820] Num frames 3500... +[2023-02-26 16:54:02,053][00820] Num frames 3600... +[2023-02-26 16:54:02,127][00820] Avg episode rewards: #0: 4.015, true rewards: #0: 3.615 +[2023-02-26 16:54:02,129][00820] Avg episode reward: 4.015, avg true_objective: 3.615 +[2023-02-26 16:54:21,860][00820] Replay video saved to /content/train_dir/default_experiment/replay.mp4!