diff --git "a/sf_log.txt" "b/sf_log.txt" --- "a/sf_log.txt" +++ "b/sf_log.txt" @@ -1508,3 +1508,1178 @@ main_loop: 348.8191 [2023-09-05 00:01:55,734][01455] Avg episode rewards: #0: 12.904, true rewards: #0: 7.104 [2023-09-05 00:01:55,738][01455] Avg episode reward: 12.904, avg true_objective: 7.104 [2023-09-05 00:02:42,526][01455] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2023-09-05 00:02:46,093][01455] The model has been pushed to https://huggingface.co/dimitarrskv/rl_course_vizdoom_health_gathering_supreme +[2023-09-05 00:04:22,549][01455] Environment doom_basic already registered, overwriting... +[2023-09-05 00:04:22,552][01455] Environment doom_two_colors_easy already registered, overwriting... +[2023-09-05 00:04:22,555][01455] Environment doom_two_colors_hard already registered, overwriting... +[2023-09-05 00:04:22,559][01455] Environment doom_dm already registered, overwriting... +[2023-09-05 00:04:22,561][01455] Environment doom_dwango5 already registered, overwriting... +[2023-09-05 00:04:22,562][01455] Environment doom_my_way_home_flat_actions already registered, overwriting... +[2023-09-05 00:04:22,564][01455] Environment doom_defend_the_center_flat_actions already registered, overwriting... +[2023-09-05 00:04:22,568][01455] Environment doom_my_way_home already registered, overwriting... +[2023-09-05 00:04:22,569][01455] Environment doom_deadly_corridor already registered, overwriting... +[2023-09-05 00:04:22,570][01455] Environment doom_defend_the_center already registered, overwriting... +[2023-09-05 00:04:22,571][01455] Environment doom_defend_the_line already registered, overwriting... +[2023-09-05 00:04:22,575][01455] Environment doom_health_gathering already registered, overwriting... +[2023-09-05 00:04:22,577][01455] Environment doom_health_gathering_supreme already registered, overwriting... +[2023-09-05 00:04:22,578][01455] Environment doom_battle already registered, overwriting... +[2023-09-05 00:04:22,579][01455] Environment doom_battle2 already registered, overwriting... +[2023-09-05 00:04:22,581][01455] Environment doom_duel_bots already registered, overwriting... +[2023-09-05 00:04:22,585][01455] Environment doom_deathmatch_bots already registered, overwriting... +[2023-09-05 00:04:22,586][01455] Environment doom_duel already registered, overwriting... +[2023-09-05 00:04:22,587][01455] Environment doom_deathmatch_full already registered, overwriting... +[2023-09-05 00:04:22,588][01455] Environment doom_benchmark already registered, overwriting... +[2023-09-05 00:04:22,590][01455] register_encoder_factory: +[2023-09-05 00:04:22,622][01455] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-09-05 00:04:22,627][01455] Overriding arg 'train_for_env_steps' with value 5000000 passed from command line +[2023-09-05 00:04:22,635][01455] Experiment dir /content/train_dir/default_experiment already exists! +[2023-09-05 00:04:22,636][01455] Resuming existing experiment from /content/train_dir/default_experiment... +[2023-09-05 00:04:22,638][01455] Weights and Biases integration disabled +[2023-09-05 00:04:22,643][01455] Environment var CUDA_VISIBLE_DEVICES is 0 + +[2023-09-05 00:04:25,592][01455] Starting experiment with the following configuration: +help=False +algo=APPO +env=doom_health_gathering_supreme +experiment=default_experiment +train_dir=/content/train_dir +restart_behavior=resume +device=gpu +seed=None +num_policies=1 +async_rl=True +serial_mode=False +batched_sampling=False +num_batches_to_accumulate=2 +worker_num_splits=2 +policy_workers_per_policy=1 +max_policy_lag=1000 +num_workers=8 +num_envs_per_worker=4 +batch_size=1024 +num_batches_per_epoch=1 +num_epochs=1 +rollout=32 +recurrence=32 +shuffle_minibatches=False +gamma=0.99 +reward_scale=1.0 +reward_clip=1000.0 +value_bootstrap=False +normalize_returns=True +exploration_loss_coeff=0.001 +value_loss_coeff=0.5 +kl_loss_coeff=0.0 +exploration_loss=symmetric_kl +gae_lambda=0.95 +ppo_clip_ratio=0.1 +ppo_clip_value=0.2 +with_vtrace=False +vtrace_rho=1.0 +vtrace_c=1.0 +optimizer=adam +adam_eps=1e-06 +adam_beta1=0.9 +adam_beta2=0.999 +max_grad_norm=4.0 +learning_rate=0.0001 +lr_schedule=constant +lr_schedule_kl_threshold=0.008 +lr_adaptive_min=1e-06 +lr_adaptive_max=0.01 +obs_subtract_mean=0.0 +obs_scale=255.0 +normalize_input=True +normalize_input_keys=None +decorrelate_experience_max_seconds=0 +decorrelate_envs_on_one_worker=True +actor_worker_gpus=[] +set_workers_cpu_affinity=True +force_envs_single_thread=False +default_niceness=0 +log_to_file=True +experiment_summaries_interval=10 +flush_summaries_interval=30 +stats_avg=100 +summaries_use_frameskip=True +heartbeat_interval=20 +heartbeat_reporting_interval=600 +train_for_env_steps=5000000 +train_for_seconds=10000000000 +save_every_sec=120 +keep_checkpoints=2 +load_checkpoint_kind=latest +save_milestones_sec=-1 +save_best_every_sec=5 +save_best_metric=reward +save_best_after=100000 +benchmark=False +encoder_mlp_layers=[512, 512] +encoder_conv_architecture=convnet_simple +encoder_conv_mlp_layers=[512] +use_rnn=True +rnn_size=512 +rnn_type=gru +rnn_num_layers=1 +decoder_mlp_layers=[] +nonlinearity=elu +policy_initialization=orthogonal +policy_init_gain=1.0 +actor_critic_share_weights=True +adaptive_stddev=True +continuous_tanh_scale=0.0 +initial_stddev=1.0 +use_env_info_cache=False +env_gpu_actions=False +env_gpu_observations=True +env_frameskip=4 +env_framestack=1 +pixel_format=CHW +use_record_episode_statistics=False +with_wandb=False +wandb_user=None +wandb_project=sample_factory +wandb_group=None +wandb_job_type=SF +wandb_tags=[] +with_pbt=False +pbt_mix_policies_in_one_env=True +pbt_period_env_steps=5000000 +pbt_start_mutation=20000000 +pbt_replace_fraction=0.3 +pbt_mutation_rate=0.15 +pbt_replace_reward_gap=0.1 +pbt_replace_reward_gap_absolute=1e-06 +pbt_optimize_gamma=False +pbt_target_objective=true_objective +pbt_perturb_min=1.1 +pbt_perturb_max=1.5 +num_agents=-1 +num_humans=0 +num_bots=-1 +start_bot_difficulty=None +timelimit=None +res_w=128 +res_h=72 +wide_aspect_ratio=False +eval_env_frameskip=1 +fps=35 +command_line=--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=1100000 +cli_args={'env': 'doom_health_gathering_supreme', 'num_workers': 8, 'num_envs_per_worker': 4, 'train_for_env_steps': 1100000} +git_hash=unknown +git_repo_name=not a git repository +[2023-09-05 00:04:25,596][01455] Saving configuration to /content/train_dir/default_experiment/config.json... +[2023-09-05 00:04:25,602][01455] Rollout worker 0 uses device cpu +[2023-09-05 00:04:25,603][01455] Rollout worker 1 uses device cpu +[2023-09-05 00:04:25,605][01455] Rollout worker 2 uses device cpu +[2023-09-05 00:04:25,606][01455] Rollout worker 3 uses device cpu +[2023-09-05 00:04:25,608][01455] Rollout worker 4 uses device cpu +[2023-09-05 00:04:25,610][01455] Rollout worker 5 uses device cpu +[2023-09-05 00:04:25,611][01455] Rollout worker 6 uses device cpu +[2023-09-05 00:04:25,612][01455] Rollout worker 7 uses device cpu +[2023-09-05 00:04:25,689][01455] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-09-05 00:04:25,690][01455] InferenceWorker_p0-w0: min num requests: 2 +[2023-09-05 00:04:25,724][01455] Starting all processes... +[2023-09-05 00:04:25,725][01455] Starting process learner_proc0 +[2023-09-05 00:04:25,774][01455] Starting all processes... +[2023-09-05 00:04:25,778][01455] Starting process inference_proc0-0 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc0 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc1 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc2 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc3 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc4 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc5 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc6 +[2023-09-05 00:04:25,780][01455] Starting process rollout_proc7 +[2023-09-05 00:04:41,903][21252] Worker 7 uses CPU cores [1] +[2023-09-05 00:04:42,047][21245] Worker 0 uses CPU cores [0] +[2023-09-05 00:04:42,145][21247] Worker 1 uses CPU cores [1] +[2023-09-05 00:04:42,173][21231] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-09-05 00:04:42,176][21231] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-09-05 00:04:42,208][21231] Num visible devices: 1 +[2023-09-05 00:04:42,235][21231] Starting seed is not provided +[2023-09-05 00:04:42,236][21231] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-09-05 00:04:42,237][21231] Initializing actor-critic model on device cuda:0 +[2023-09-05 00:04:42,237][21231] RunningMeanStd input shape: (3, 72, 128) +[2023-09-05 00:04:42,239][21231] RunningMeanStd input shape: (1,) +[2023-09-05 00:04:42,295][21231] ConvEncoder: input_channels=3 +[2023-09-05 00:04:42,353][21248] Worker 3 uses CPU cores [1] +[2023-09-05 00:04:42,525][21249] Worker 5 uses CPU cores [1] +[2023-09-05 00:04:42,539][21251] Worker 6 uses CPU cores [0] +[2023-09-05 00:04:42,605][21246] Worker 2 uses CPU cores [0] +[2023-09-05 00:04:42,607][21244] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-09-05 00:04:42,608][21244] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-09-05 00:04:42,615][21250] Worker 4 uses CPU cores [0] +[2023-09-05 00:04:42,633][21244] Num visible devices: 1 +[2023-09-05 00:04:42,666][21231] Conv encoder output size: 512 +[2023-09-05 00:04:42,666][21231] Policy head output size: 512 +[2023-09-05 00:04:42,683][21231] Created Actor Critic model with architecture: +[2023-09-05 00:04:42,683][21231] ActorCriticSharedWeights( + (obs_normalizer): ObservationNormalizer( + (running_mean_std): RunningMeanStdDictInPlace( + (running_mean_std): ModuleDict( + (obs): RunningMeanStdInPlace() + ) + ) + ) + (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace) + (encoder): VizdoomEncoder( + (basic_encoder): ConvEncoder( + (enc): RecursiveScriptModule( + original_name=ConvEncoderImpl + (conv_head): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Conv2d) + (1): RecursiveScriptModule(original_name=ELU) + (2): RecursiveScriptModule(original_name=Conv2d) + (3): RecursiveScriptModule(original_name=ELU) + (4): RecursiveScriptModule(original_name=Conv2d) + (5): RecursiveScriptModule(original_name=ELU) + ) + (mlp_layers): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Linear) + (1): RecursiveScriptModule(original_name=ELU) + ) + ) + ) + ) + (core): ModelCoreRNN( + (core): GRU(512, 512) + ) + (decoder): MlpDecoder( + (mlp): Identity() + ) + (critic_linear): Linear(in_features=512, out_features=1, bias=True) + (action_parameterization): ActionParameterizationDefault( + (distribution_linear): Linear(in_features=512, out_features=5, bias=True) + ) +) +[2023-09-05 00:04:42,906][21231] Using optimizer +[2023-09-05 00:04:42,907][21231] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000514_2105344.pth... +[2023-09-05 00:04:42,944][21231] Loading model from checkpoint +[2023-09-05 00:04:42,948][21231] Loaded experiment state at self.train_step=514, self.env_steps=2105344 +[2023-09-05 00:04:42,949][21231] Initialized policy 0 weights for model version 514 +[2023-09-05 00:04:42,951][21231] LearnerWorker_p0 finished initialization! +[2023-09-05 00:04:42,952][21231] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-09-05 00:04:43,139][21244] RunningMeanStd input shape: (3, 72, 128) +[2023-09-05 00:04:43,140][21244] RunningMeanStd input shape: (1,) +[2023-09-05 00:04:43,153][21244] ConvEncoder: input_channels=3 +[2023-09-05 00:04:43,257][21244] Conv encoder output size: 512 +[2023-09-05 00:04:43,257][21244] Policy head output size: 512 +[2023-09-05 00:04:43,319][01455] Inference worker 0-0 is ready! +[2023-09-05 00:04:43,321][01455] All inference workers are ready! Signal rollout workers to start! +[2023-09-05 00:04:43,536][21247] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:43,538][21249] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:43,540][21252] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:43,541][21248] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:43,562][21250] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:43,568][21251] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:43,572][21245] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:43,571][21246] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-09-05 00:04:44,953][21245] Decorrelating experience for 0 frames... +[2023-09-05 00:04:44,957][21250] Decorrelating experience for 0 frames... +[2023-09-05 00:04:44,961][21251] Decorrelating experience for 0 frames... +[2023-09-05 00:04:45,006][21247] Decorrelating experience for 0 frames... +[2023-09-05 00:04:45,012][21249] Decorrelating experience for 0 frames... +[2023-09-05 00:04:45,014][21252] Decorrelating experience for 0 frames... +[2023-09-05 00:04:45,681][01455] Heartbeat connected on Batcher_0 +[2023-09-05 00:04:45,688][01455] Heartbeat connected on LearnerWorker_p0 +[2023-09-05 00:04:45,739][01455] Heartbeat connected on InferenceWorker_p0-w0 +[2023-09-05 00:04:45,812][21249] Decorrelating experience for 32 frames... +[2023-09-05 00:04:45,889][21248] Decorrelating experience for 0 frames... +[2023-09-05 00:04:46,138][21251] Decorrelating experience for 32 frames... +[2023-09-05 00:04:46,143][21250] Decorrelating experience for 32 frames... +[2023-09-05 00:04:46,147][21246] Decorrelating experience for 0 frames... +[2023-09-05 00:04:47,013][21248] Decorrelating experience for 32 frames... +[2023-09-05 00:04:47,229][21249] Decorrelating experience for 64 frames... +[2023-09-05 00:04:47,372][21246] Decorrelating experience for 32 frames... +[2023-09-05 00:04:47,383][21252] Decorrelating experience for 32 frames... +[2023-09-05 00:04:47,643][01455] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 2105344. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-09-05 00:04:47,790][21250] Decorrelating experience for 64 frames... +[2023-09-05 00:04:47,795][21251] Decorrelating experience for 64 frames... +[2023-09-05 00:04:48,421][21248] Decorrelating experience for 64 frames... +[2023-09-05 00:04:48,714][21252] Decorrelating experience for 64 frames... +[2023-09-05 00:04:48,935][21249] Decorrelating experience for 96 frames... +[2023-09-05 00:04:49,108][21245] Decorrelating experience for 32 frames... +[2023-09-05 00:04:49,121][01455] Heartbeat connected on RolloutWorker_w5 +[2023-09-05 00:04:49,669][21246] Decorrelating experience for 64 frames... +[2023-09-05 00:04:49,804][21250] Decorrelating experience for 96 frames... +[2023-09-05 00:04:49,806][21251] Decorrelating experience for 96 frames... +[2023-09-05 00:04:50,028][21248] Decorrelating experience for 96 frames... +[2023-09-05 00:04:50,146][01455] Heartbeat connected on RolloutWorker_w4 +[2023-09-05 00:04:50,149][01455] Heartbeat connected on RolloutWorker_w6 +[2023-09-05 00:04:50,292][01455] Heartbeat connected on RolloutWorker_w3 +[2023-09-05 00:04:50,628][21247] Decorrelating experience for 32 frames... +[2023-09-05 00:04:52,643][01455] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 2105344. Throughput: 0: 238.0. Samples: 1190. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-09-05 00:04:52,646][01455] Avg episode reward: [(0, '2.010')] +[2023-09-05 00:04:53,059][21245] Decorrelating experience for 64 frames... +[2023-09-05 00:04:53,430][21246] Decorrelating experience for 96 frames... +[2023-09-05 00:04:54,445][01455] Heartbeat connected on RolloutWorker_w2 +[2023-09-05 00:04:56,345][21252] Decorrelating experience for 96 frames... +[2023-09-05 00:04:57,431][01455] Heartbeat connected on RolloutWorker_w7 +[2023-09-05 00:04:57,643][01455] Fps is (10 sec: 409.6, 60 sec: 409.6, 300 sec: 409.6). Total num frames: 2109440. Throughput: 0: 177.0. Samples: 1770. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0) +[2023-09-05 00:04:57,650][01455] Avg episode reward: [(0, '5.631')] +[2023-09-05 00:04:58,151][21247] Decorrelating experience for 64 frames... +[2023-09-05 00:05:00,427][21245] Decorrelating experience for 96 frames... +[2023-09-05 00:05:01,312][01455] Heartbeat connected on RolloutWorker_w0 +[2023-09-05 00:05:02,643][01455] Fps is (10 sec: 1228.8, 60 sec: 819.2, 300 sec: 819.2). Total num frames: 2117632. Throughput: 0: 242.4. Samples: 3636. Policy #0 lag: (min: 0.0, avg: 0.8, max: 1.0) +[2023-09-05 00:05:02,646][01455] Avg episode reward: [(0, '6.790')] +[2023-09-05 00:05:03,369][21247] Decorrelating experience for 96 frames... +[2023-09-05 00:05:03,878][01455] Heartbeat connected on RolloutWorker_w1 +[2023-09-05 00:05:07,643][01455] Fps is (10 sec: 2867.2, 60 sec: 1638.4, 300 sec: 1638.4). Total num frames: 2138112. Throughput: 0: 445.2. Samples: 8904. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:05:07,645][01455] Avg episode reward: [(0, '8.059')] +[2023-09-05 00:05:09,322][21244] Updated weights for policy 0, policy_version 524 (0.0024) +[2023-09-05 00:05:12,653][01455] Fps is (10 sec: 4092.0, 60 sec: 2129.1, 300 sec: 2129.1). Total num frames: 2158592. Throughput: 0: 473.3. Samples: 11838. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-09-05 00:05:12,655][01455] Avg episode reward: [(0, '10.942')] +[2023-09-05 00:05:17,643][01455] Fps is (10 sec: 3276.8, 60 sec: 2184.5, 300 sec: 2184.5). Total num frames: 2170880. Throughput: 0: 544.9. Samples: 16348. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-09-05 00:05:17,652][01455] Avg episode reward: [(0, '12.827')] +[2023-09-05 00:05:22,643][01455] Fps is (10 sec: 2460.0, 60 sec: 2223.5, 300 sec: 2223.5). Total num frames: 2183168. Throughput: 0: 574.6. Samples: 20110. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:05:22,649][01455] Avg episode reward: [(0, '14.370')] +[2023-09-05 00:05:23,396][21244] Updated weights for policy 0, policy_version 534 (0.0023) +[2023-09-05 00:05:27,643][01455] Fps is (10 sec: 3276.8, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 2203648. Throughput: 0: 569.6. Samples: 22784. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:05:27,646][01455] Avg episode reward: [(0, '16.087')] +[2023-09-05 00:05:27,647][21231] Saving new best policy, reward=16.087! +[2023-09-05 00:05:32,643][01455] Fps is (10 sec: 3686.4, 60 sec: 2548.6, 300 sec: 2548.6). Total num frames: 2220032. Throughput: 0: 634.6. Samples: 28556. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:05:32,646][01455] Avg episode reward: [(0, '18.188')] +[2023-09-05 00:05:32,656][21231] Saving new best policy, reward=18.188! +[2023-09-05 00:05:34,481][21244] Updated weights for policy 0, policy_version 544 (0.0013) +[2023-09-05 00:05:37,643][01455] Fps is (10 sec: 2867.1, 60 sec: 2539.5, 300 sec: 2539.5). Total num frames: 2232320. Throughput: 0: 704.9. Samples: 32912. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:05:37,648][01455] Avg episode reward: [(0, '18.340')] +[2023-09-05 00:05:37,651][21231] Saving new best policy, reward=18.340! +[2023-09-05 00:05:42,643][01455] Fps is (10 sec: 2457.6, 60 sec: 2532.1, 300 sec: 2532.1). Total num frames: 2244608. Throughput: 0: 731.6. Samples: 34694. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:05:42,647][01455] Avg episode reward: [(0, '17.984')] +[2023-09-05 00:05:47,643][01455] Fps is (10 sec: 3276.9, 60 sec: 2662.4, 300 sec: 2662.4). Total num frames: 2265088. Throughput: 0: 794.3. Samples: 39380. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:05:47,651][01455] Avg episode reward: [(0, '16.965')] +[2023-09-05 00:05:48,670][21244] Updated weights for policy 0, policy_version 554 (0.0024) +[2023-09-05 00:05:52,643][01455] Fps is (10 sec: 3686.4, 60 sec: 2935.5, 300 sec: 2709.7). Total num frames: 2281472. Throughput: 0: 810.5. Samples: 45376. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:05:52,653][01455] Avg episode reward: [(0, '16.425')] +[2023-09-05 00:05:57,645][01455] Fps is (10 sec: 3276.3, 60 sec: 3140.2, 300 sec: 2750.1). Total num frames: 2297856. Throughput: 0: 798.5. Samples: 47762. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:05:57,650][01455] Avg episode reward: [(0, '16.128')] +[2023-09-05 00:06:02,053][21244] Updated weights for policy 0, policy_version 564 (0.0028) +[2023-09-05 00:06:02,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 2730.7). Total num frames: 2310144. Throughput: 0: 777.6. Samples: 51342. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:06:02,646][01455] Avg episode reward: [(0, '16.178')] +[2023-09-05 00:06:07,643][01455] Fps is (10 sec: 2867.7, 60 sec: 3140.3, 300 sec: 2764.8). Total num frames: 2326528. Throughput: 0: 797.7. Samples: 56006. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-09-05 00:06:07,651][01455] Avg episode reward: [(0, '15.549')] +[2023-09-05 00:06:12,647][01455] Fps is (10 sec: 3685.0, 60 sec: 3140.6, 300 sec: 2843.0). Total num frames: 2347008. Throughput: 0: 804.7. Samples: 58998. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:06:12,649][01455] Avg episode reward: [(0, '16.993')] +[2023-09-05 00:06:13,509][21244] Updated weights for policy 0, policy_version 574 (0.0017) +[2023-09-05 00:06:17,647][01455] Fps is (10 sec: 3685.1, 60 sec: 3208.3, 300 sec: 2867.1). Total num frames: 2363392. Throughput: 0: 797.8. Samples: 64458. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:06:17,652][01455] Avg episode reward: [(0, '16.950')] +[2023-09-05 00:06:22,649][01455] Fps is (10 sec: 2866.7, 60 sec: 3208.2, 300 sec: 2845.5). Total num frames: 2375680. Throughput: 0: 784.3. Samples: 68210. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:06:22,655][01455] Avg episode reward: [(0, '16.837')] +[2023-09-05 00:06:22,672][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000580_2375680.pth... +[2023-09-05 00:06:22,825][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000431_1765376.pth +[2023-09-05 00:06:27,643][01455] Fps is (10 sec: 2458.5, 60 sec: 3072.0, 300 sec: 2826.2). Total num frames: 2387968. Throughput: 0: 788.1. Samples: 70160. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:06:27,646][01455] Avg episode reward: [(0, '16.772')] +[2023-09-05 00:06:27,652][21244] Updated weights for policy 0, policy_version 584 (0.0030) +[2023-09-05 00:06:32,643][01455] Fps is (10 sec: 3278.6, 60 sec: 3140.3, 300 sec: 2886.7). Total num frames: 2408448. Throughput: 0: 811.6. Samples: 75900. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:06:32,646][01455] Avg episode reward: [(0, '17.230')] +[2023-09-05 00:06:37,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.6, 300 sec: 2904.4). Total num frames: 2424832. Throughput: 0: 796.9. Samples: 81238. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:06:37,646][01455] Avg episode reward: [(0, '16.086')] +[2023-09-05 00:06:39,475][21244] Updated weights for policy 0, policy_version 594 (0.0019) +[2023-09-05 00:06:42,645][01455] Fps is (10 sec: 3276.2, 60 sec: 3276.7, 300 sec: 2920.6). Total num frames: 2441216. Throughput: 0: 784.9. Samples: 83082. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:06:42,648][01455] Avg episode reward: [(0, '15.270')] +[2023-09-05 00:06:47,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 2901.3). Total num frames: 2453504. Throughput: 0: 788.8. Samples: 86840. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:06:47,649][01455] Avg episode reward: [(0, '14.914')] +[2023-09-05 00:06:52,556][21244] Updated weights for policy 0, policy_version 604 (0.0026) +[2023-09-05 00:06:52,643][01455] Fps is (10 sec: 3277.4, 60 sec: 3208.5, 300 sec: 2949.1). Total num frames: 2473984. Throughput: 0: 814.1. Samples: 92642. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-09-05 00:06:52,651][01455] Avg episode reward: [(0, '16.445')] +[2023-09-05 00:06:57,643][01455] Fps is (10 sec: 3686.3, 60 sec: 3208.6, 300 sec: 2961.7). Total num frames: 2490368. Throughput: 0: 813.0. Samples: 95582. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:06:57,652][01455] Avg episode reward: [(0, '18.209')] +[2023-09-05 00:07:02,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 2943.1). Total num frames: 2502656. Throughput: 0: 786.5. Samples: 99846. Policy #0 lag: (min: 0.0, avg: 0.3, max: 2.0) +[2023-09-05 00:07:02,646][01455] Avg episode reward: [(0, '18.650')] +[2023-09-05 00:07:02,733][21231] Saving new best policy, reward=18.650! +[2023-09-05 00:07:06,002][21244] Updated weights for policy 0, policy_version 614 (0.0027) +[2023-09-05 00:07:07,644][01455] Fps is (10 sec: 2867.1, 60 sec: 3208.5, 300 sec: 2955.0). Total num frames: 2519040. Throughput: 0: 787.4. Samples: 103638. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:07:07,646][01455] Avg episode reward: [(0, '17.978')] +[2023-09-05 00:07:12,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3140.5, 300 sec: 2966.1). Total num frames: 2535424. Throughput: 0: 805.6. Samples: 106410. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:07:12,646][01455] Avg episode reward: [(0, '19.612')] +[2023-09-05 00:07:12,658][21231] Saving new best policy, reward=19.612! +[2023-09-05 00:07:17,174][21244] Updated weights for policy 0, policy_version 624 (0.0028) +[2023-09-05 00:07:17,643][01455] Fps is (10 sec: 3686.6, 60 sec: 3208.7, 300 sec: 3003.7). Total num frames: 2555904. Throughput: 0: 809.7. Samples: 112336. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:07:17,646][01455] Avg episode reward: [(0, '19.975')] +[2023-09-05 00:07:17,647][21231] Saving new best policy, reward=19.975! +[2023-09-05 00:07:22,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.8, 300 sec: 2986.1). Total num frames: 2568192. Throughput: 0: 787.6. Samples: 116682. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:07:22,649][01455] Avg episode reward: [(0, '19.130')] +[2023-09-05 00:07:27,643][01455] Fps is (10 sec: 2457.6, 60 sec: 3208.5, 300 sec: 2969.6). Total num frames: 2580480. Throughput: 0: 787.1. Samples: 118498. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:07:27,647][01455] Avg episode reward: [(0, '19.057')] +[2023-09-05 00:07:31,805][21244] Updated weights for policy 0, policy_version 634 (0.0016) +[2023-09-05 00:07:32,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 2978.9). Total num frames: 2596864. Throughput: 0: 808.1. Samples: 123206. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:07:32,646][01455] Avg episode reward: [(0, '19.572')] +[2023-09-05 00:07:37,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.5, 300 sec: 3011.8). Total num frames: 2617344. Throughput: 0: 811.1. Samples: 129142. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:07:37,651][01455] Avg episode reward: [(0, '19.563')] +[2023-09-05 00:07:42,647][01455] Fps is (10 sec: 3685.1, 60 sec: 3208.4, 300 sec: 3019.3). Total num frames: 2633728. Throughput: 0: 801.0. Samples: 131630. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:07:42,651][01455] Avg episode reward: [(0, '19.821')] +[2023-09-05 00:07:43,852][21244] Updated weights for policy 0, policy_version 644 (0.0019) +[2023-09-05 00:07:47,646][01455] Fps is (10 sec: 2866.5, 60 sec: 3208.4, 300 sec: 3003.7). Total num frames: 2646016. Throughput: 0: 788.8. Samples: 135344. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-09-05 00:07:47,653][01455] Avg episode reward: [(0, '19.702')] +[2023-09-05 00:07:52,643][01455] Fps is (10 sec: 2868.3, 60 sec: 3140.3, 300 sec: 3011.1). Total num frames: 2662400. Throughput: 0: 810.6. Samples: 140116. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-09-05 00:07:52,650][01455] Avg episode reward: [(0, '20.017')] +[2023-09-05 00:07:52,660][21231] Saving new best policy, reward=20.017! +[2023-09-05 00:07:56,114][21244] Updated weights for policy 0, policy_version 654 (0.0021) +[2023-09-05 00:07:57,643][01455] Fps is (10 sec: 3687.2, 60 sec: 3208.6, 300 sec: 3039.7). Total num frames: 2682880. Throughput: 0: 814.4. Samples: 143060. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:07:57,650][01455] Avg episode reward: [(0, '18.994')] +[2023-09-05 00:08:02,646][01455] Fps is (10 sec: 3685.5, 60 sec: 3276.7, 300 sec: 3045.7). Total num frames: 2699264. Throughput: 0: 803.6. Samples: 148498. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:08:02,648][01455] Avg episode reward: [(0, '18.579')] +[2023-09-05 00:08:07,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.6, 300 sec: 3031.0). Total num frames: 2711552. Throughput: 0: 792.7. Samples: 152352. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:08:07,650][01455] Avg episode reward: [(0, '19.328')] +[2023-09-05 00:08:10,236][21244] Updated weights for policy 0, policy_version 664 (0.0033) +[2023-09-05 00:08:12,643][01455] Fps is (10 sec: 2867.9, 60 sec: 3208.5, 300 sec: 3037.0). Total num frames: 2727936. Throughput: 0: 794.3. Samples: 154242. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:08:12,650][01455] Avg episode reward: [(0, '20.022')] +[2023-09-05 00:08:12,662][21231] Saving new best policy, reward=20.022! +[2023-09-05 00:08:17,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 3042.7). Total num frames: 2744320. Throughput: 0: 817.7. Samples: 160002. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:08:17,655][01455] Avg episode reward: [(0, '20.123')] +[2023-09-05 00:08:17,707][21231] Saving new best policy, reward=20.123! +[2023-09-05 00:08:20,816][21244] Updated weights for policy 0, policy_version 674 (0.0024) +[2023-09-05 00:08:22,647][01455] Fps is (10 sec: 3685.1, 60 sec: 3276.6, 300 sec: 3067.2). Total num frames: 2764800. Throughput: 0: 806.0. Samples: 165414. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:08:22,659][01455] Avg episode reward: [(0, '21.366')] +[2023-09-05 00:08:22,675][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000675_2764800.pth... +[2023-09-05 00:08:22,819][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000514_2105344.pth +[2023-09-05 00:08:22,851][21231] Saving new best policy, reward=21.366! +[2023-09-05 00:08:27,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3053.4). Total num frames: 2777088. Throughput: 0: 789.7. Samples: 167162. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:08:27,647][01455] Avg episode reward: [(0, '20.816')] +[2023-09-05 00:08:32,643][01455] Fps is (10 sec: 2458.5, 60 sec: 3208.5, 300 sec: 3040.1). Total num frames: 2789376. Throughput: 0: 791.1. Samples: 170944. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:08:32,646][01455] Avg episode reward: [(0, '20.639')] +[2023-09-05 00:08:35,175][21244] Updated weights for policy 0, policy_version 684 (0.0028) +[2023-09-05 00:08:37,643][01455] Fps is (10 sec: 3276.7, 60 sec: 3208.5, 300 sec: 3063.1). Total num frames: 2809856. Throughput: 0: 813.2. Samples: 176710. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:08:37,651][01455] Avg episode reward: [(0, '19.619')] +[2023-09-05 00:08:42,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.7, 300 sec: 3067.6). Total num frames: 2826240. Throughput: 0: 813.8. Samples: 179682. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:08:42,648][01455] Avg episode reward: [(0, '18.855')] +[2023-09-05 00:08:47,645][01455] Fps is (10 sec: 2866.7, 60 sec: 3208.6, 300 sec: 3054.9). Total num frames: 2838528. Throughput: 0: 786.3. Samples: 183882. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:08:47,648][01455] Avg episode reward: [(0, '18.399')] +[2023-09-05 00:08:47,676][21244] Updated weights for policy 0, policy_version 694 (0.0013) +[2023-09-05 00:08:52,643][01455] Fps is (10 sec: 2457.6, 60 sec: 3140.3, 300 sec: 3042.7). Total num frames: 2850816. Throughput: 0: 785.8. Samples: 187712. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:08:52,645][01455] Avg episode reward: [(0, '18.202')] +[2023-09-05 00:08:57,643][01455] Fps is (10 sec: 3277.4, 60 sec: 3140.3, 300 sec: 3063.8). Total num frames: 2871296. Throughput: 0: 805.7. Samples: 190500. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:08:57,651][01455] Avg episode reward: [(0, '19.047')] +[2023-09-05 00:09:00,010][21244] Updated weights for policy 0, policy_version 704 (0.0023) +[2023-09-05 00:09:02,643][01455] Fps is (10 sec: 4096.0, 60 sec: 3208.7, 300 sec: 3084.0). Total num frames: 2891776. Throughput: 0: 810.1. Samples: 196456. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:09:02,652][01455] Avg episode reward: [(0, '18.689')] +[2023-09-05 00:09:07,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3072.0). Total num frames: 2904064. Throughput: 0: 782.3. Samples: 200614. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:09:07,650][01455] Avg episode reward: [(0, '18.555')] +[2023-09-05 00:09:12,643][01455] Fps is (10 sec: 2457.6, 60 sec: 3140.3, 300 sec: 3060.4). Total num frames: 2916352. Throughput: 0: 785.7. Samples: 202520. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:09:12,649][01455] Avg episode reward: [(0, '19.455')] +[2023-09-05 00:09:14,427][21244] Updated weights for policy 0, policy_version 714 (0.0055) +[2023-09-05 00:09:17,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3079.6). Total num frames: 2936832. Throughput: 0: 809.4. Samples: 207366. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:09:17,646][01455] Avg episode reward: [(0, '20.906')] +[2023-09-05 00:09:22,643][01455] Fps is (10 sec: 4096.0, 60 sec: 3208.7, 300 sec: 3098.1). Total num frames: 2957312. Throughput: 0: 816.8. Samples: 213464. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:09:22,645][01455] Avg episode reward: [(0, '21.788')] +[2023-09-05 00:09:22,657][21231] Saving new best policy, reward=21.788! +[2023-09-05 00:09:25,188][21244] Updated weights for policy 0, policy_version 724 (0.0013) +[2023-09-05 00:09:27,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3086.6). Total num frames: 2969600. Throughput: 0: 801.6. Samples: 215754. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:09:27,647][01455] Avg episode reward: [(0, '21.768')] +[2023-09-05 00:09:32,643][01455] Fps is (10 sec: 2457.6, 60 sec: 3208.5, 300 sec: 3075.6). Total num frames: 2981888. Throughput: 0: 792.3. Samples: 219536. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:09:32,649][01455] Avg episode reward: [(0, '22.810')] +[2023-09-05 00:09:32,665][21231] Saving new best policy, reward=22.810! +[2023-09-05 00:09:37,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 3079.1). Total num frames: 2998272. Throughput: 0: 811.6. Samples: 224236. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:09:37,649][01455] Avg episode reward: [(0, '21.432')] +[2023-09-05 00:09:38,958][21244] Updated weights for policy 0, policy_version 734 (0.0022) +[2023-09-05 00:09:42,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.5, 300 sec: 3096.3). Total num frames: 3018752. Throughput: 0: 815.9. Samples: 227216. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:09:42,648][01455] Avg episode reward: [(0, '20.783')] +[2023-09-05 00:09:47,645][01455] Fps is (10 sec: 3685.8, 60 sec: 3276.8, 300 sec: 3151.8). Total num frames: 3035136. Throughput: 0: 803.0. Samples: 232594. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:09:47,648][01455] Avg episode reward: [(0, '20.648')] +[2023-09-05 00:09:51,664][21244] Updated weights for policy 0, policy_version 744 (0.0021) +[2023-09-05 00:09:52,648][01455] Fps is (10 sec: 2865.9, 60 sec: 3276.6, 300 sec: 3179.6). Total num frames: 3047424. Throughput: 0: 795.6. Samples: 236418. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:09:52,651][01455] Avg episode reward: [(0, '20.420')] +[2023-09-05 00:09:57,643][01455] Fps is (10 sec: 2867.6, 60 sec: 3208.5, 300 sec: 3207.4). Total num frames: 3063808. Throughput: 0: 795.4. Samples: 238314. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:09:57,648][01455] Avg episode reward: [(0, '19.663')] +[2023-09-05 00:10:02,643][01455] Fps is (10 sec: 3688.1, 60 sec: 3208.5, 300 sec: 3207.4). Total num frames: 3084288. Throughput: 0: 818.3. Samples: 244188. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:10:02,646][01455] Avg episode reward: [(0, '19.543')] +[2023-09-05 00:10:03,417][21244] Updated weights for policy 0, policy_version 754 (0.0028) +[2023-09-05 00:10:07,645][01455] Fps is (10 sec: 3685.8, 60 sec: 3276.7, 300 sec: 3193.6). Total num frames: 3100672. Throughput: 0: 803.3. Samples: 249612. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:10:07,647][01455] Avg episode reward: [(0, '20.223')] +[2023-09-05 00:10:12,645][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3193.5). Total num frames: 3112960. Throughput: 0: 793.8. Samples: 251474. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:10:12,654][01455] Avg episode reward: [(0, '20.360')] +[2023-09-05 00:10:17,643][01455] Fps is (10 sec: 2458.0, 60 sec: 3140.3, 300 sec: 3193.5). Total num frames: 3125248. Throughput: 0: 793.2. Samples: 255230. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:10:17,645][01455] Avg episode reward: [(0, '21.639')] +[2023-09-05 00:10:17,948][21244] Updated weights for policy 0, policy_version 764 (0.0024) +[2023-09-05 00:10:22,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 3193.5). Total num frames: 3145728. Throughput: 0: 819.6. Samples: 261118. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:10:22,646][01455] Avg episode reward: [(0, '21.599')] +[2023-09-05 00:10:22,658][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000768_3145728.pth... +[2023-09-05 00:10:22,776][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000580_2375680.pth +[2023-09-05 00:10:27,643][01455] Fps is (10 sec: 4096.0, 60 sec: 3276.8, 300 sec: 3207.4). Total num frames: 3166208. Throughput: 0: 818.2. Samples: 264034. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0) +[2023-09-05 00:10:27,649][01455] Avg episode reward: [(0, '21.347')] +[2023-09-05 00:10:28,790][21244] Updated weights for policy 0, policy_version 774 (0.0019) +[2023-09-05 00:10:32,645][01455] Fps is (10 sec: 3276.3, 60 sec: 3276.7, 300 sec: 3207.4). Total num frames: 3178496. Throughput: 0: 798.8. Samples: 268538. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:10:32,654][01455] Avg episode reward: [(0, '22.888')] +[2023-09-05 00:10:32,668][21231] Saving new best policy, reward=22.888! +[2023-09-05 00:10:37,643][01455] Fps is (10 sec: 2457.6, 60 sec: 3208.5, 300 sec: 3207.4). Total num frames: 3190784. Throughput: 0: 795.9. Samples: 272232. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:10:37,650][01455] Avg episode reward: [(0, '23.017')] +[2023-09-05 00:10:37,658][21231] Saving new best policy, reward=23.017! +[2023-09-05 00:10:42,534][21244] Updated weights for policy 0, policy_version 784 (0.0023) +[2023-09-05 00:10:42,643][01455] Fps is (10 sec: 3277.3, 60 sec: 3208.5, 300 sec: 3207.4). Total num frames: 3211264. Throughput: 0: 812.2. Samples: 274864. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:10:42,648][01455] Avg episode reward: [(0, '22.696')] +[2023-09-05 00:10:47,644][01455] Fps is (10 sec: 4095.8, 60 sec: 3276.9, 300 sec: 3221.3). Total num frames: 3231744. Throughput: 0: 817.1. Samples: 280956. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:10:47,649][01455] Avg episode reward: [(0, '22.603')] +[2023-09-05 00:10:52,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3277.0, 300 sec: 3207.4). Total num frames: 3244032. Throughput: 0: 796.4. Samples: 285448. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:10:52,646][01455] Avg episode reward: [(0, '22.896')] +[2023-09-05 00:10:55,330][21244] Updated weights for policy 0, policy_version 794 (0.0020) +[2023-09-05 00:10:57,643][01455] Fps is (10 sec: 2457.7, 60 sec: 3208.5, 300 sec: 3207.4). Total num frames: 3256320. Throughput: 0: 795.5. Samples: 287270. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:10:57,646][01455] Avg episode reward: [(0, '22.279')] +[2023-09-05 00:11:02,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 3207.4). Total num frames: 3272704. Throughput: 0: 816.3. Samples: 291964. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:11:02,646][01455] Avg episode reward: [(0, '22.698')] +[2023-09-05 00:11:06,911][21244] Updated weights for policy 0, policy_version 804 (0.0024) +[2023-09-05 00:11:07,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.6, 300 sec: 3207.4). Total num frames: 3293184. Throughput: 0: 820.4. Samples: 298036. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:11:07,646][01455] Avg episode reward: [(0, '21.385')] +[2023-09-05 00:11:12,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3207.4). Total num frames: 3309568. Throughput: 0: 811.6. Samples: 300558. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:11:12,649][01455] Avg episode reward: [(0, '21.613')] +[2023-09-05 00:11:17,643][01455] Fps is (10 sec: 2867.1, 60 sec: 3276.8, 300 sec: 3207.4). Total num frames: 3321856. Throughput: 0: 798.2. Samples: 304456. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:11:17,651][01455] Avg episode reward: [(0, '21.563')] +[2023-09-05 00:11:20,962][21244] Updated weights for policy 0, policy_version 814 (0.0013) +[2023-09-05 00:11:22,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3221.3). Total num frames: 3338240. Throughput: 0: 820.7. Samples: 309164. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:11:22,646][01455] Avg episode reward: [(0, '21.900')] +[2023-09-05 00:11:27,643][01455] Fps is (10 sec: 3686.5, 60 sec: 3208.5, 300 sec: 3221.3). Total num frames: 3358720. Throughput: 0: 828.8. Samples: 312160. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:11:27,646][01455] Avg episode reward: [(0, '21.555')] +[2023-09-05 00:11:31,907][21244] Updated weights for policy 0, policy_version 824 (0.0018) +[2023-09-05 00:11:32,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.9, 300 sec: 3221.3). Total num frames: 3375104. Throughput: 0: 817.2. Samples: 317730. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:11:32,647][01455] Avg episode reward: [(0, '21.736')] +[2023-09-05 00:11:37,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3207.4). Total num frames: 3387392. Throughput: 0: 800.0. Samples: 321450. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:11:37,650][01455] Avg episode reward: [(0, '22.036')] +[2023-09-05 00:11:42,643][01455] Fps is (10 sec: 2867.1, 60 sec: 3208.5, 300 sec: 3221.3). Total num frames: 3403776. Throughput: 0: 801.2. Samples: 323322. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:11:42,651][01455] Avg episode reward: [(0, '21.637')] +[2023-09-05 00:11:45,259][21244] Updated weights for policy 0, policy_version 834 (0.0040) +[2023-09-05 00:11:47,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.6, 300 sec: 3221.3). Total num frames: 3424256. Throughput: 0: 829.7. Samples: 329300. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:11:47,646][01455] Avg episode reward: [(0, '22.230')] +[2023-09-05 00:11:52,643][01455] Fps is (10 sec: 3686.5, 60 sec: 3276.8, 300 sec: 3221.3). Total num frames: 3440640. Throughput: 0: 816.5. Samples: 334778. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:11:52,648][01455] Avg episode reward: [(0, '22.368')] +[2023-09-05 00:11:57,646][01455] Fps is (10 sec: 2866.4, 60 sec: 3276.7, 300 sec: 3221.2). Total num frames: 3452928. Throughput: 0: 804.2. Samples: 336748. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:11:57,648][01455] Avg episode reward: [(0, '23.384')] +[2023-09-05 00:11:57,653][21231] Saving new best policy, reward=23.384! +[2023-09-05 00:11:58,091][21244] Updated weights for policy 0, policy_version 844 (0.0016) +[2023-09-05 00:12:02,646][01455] Fps is (10 sec: 2866.5, 60 sec: 3276.7, 300 sec: 3221.2). Total num frames: 3469312. Throughput: 0: 800.7. Samples: 340488. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:12:02,652][01455] Avg episode reward: [(0, '22.825')] +[2023-09-05 00:12:07,643][01455] Fps is (10 sec: 3277.7, 60 sec: 3208.5, 300 sec: 3221.3). Total num frames: 3485696. Throughput: 0: 828.8. Samples: 346460. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:12:07,648][01455] Avg episode reward: [(0, '22.985')] +[2023-09-05 00:12:09,765][21244] Updated weights for policy 0, policy_version 854 (0.0024) +[2023-09-05 00:12:12,643][01455] Fps is (10 sec: 3687.4, 60 sec: 3276.8, 300 sec: 3221.3). Total num frames: 3506176. Throughput: 0: 829.2. Samples: 349474. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:12:12,647][01455] Avg episode reward: [(0, '21.902')] +[2023-09-05 00:12:17,644][01455] Fps is (10 sec: 3276.5, 60 sec: 3276.8, 300 sec: 3221.3). Total num frames: 3518464. Throughput: 0: 802.8. Samples: 353858. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:12:17,647][01455] Avg episode reward: [(0, '22.245')] +[2023-09-05 00:12:22,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 3534848. Throughput: 0: 807.7. Samples: 357798. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:12:22,651][01455] Avg episode reward: [(0, '22.492')] +[2023-09-05 00:12:22,663][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000863_3534848.pth... +[2023-09-05 00:12:22,793][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000675_2764800.pth +[2023-09-05 00:12:23,678][21244] Updated weights for policy 0, policy_version 864 (0.0018) +[2023-09-05 00:12:27,643][01455] Fps is (10 sec: 3277.1, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 3551232. Throughput: 0: 831.3. Samples: 360732. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:12:27,646][01455] Avg episode reward: [(0, '20.818')] +[2023-09-05 00:12:32,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 3571712. Throughput: 0: 834.5. Samples: 366854. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:12:32,645][01455] Avg episode reward: [(0, '20.425')] +[2023-09-05 00:12:34,459][21244] Updated weights for policy 0, policy_version 874 (0.0021) +[2023-09-05 00:12:37,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3221.3). Total num frames: 3584000. Throughput: 0: 806.0. Samples: 371046. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:12:37,655][01455] Avg episode reward: [(0, '20.595')] +[2023-09-05 00:12:42,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3235.2). Total num frames: 3600384. Throughput: 0: 804.4. Samples: 372944. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:12:42,646][01455] Avg episode reward: [(0, '22.838')] +[2023-09-05 00:12:47,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 3616768. Throughput: 0: 833.2. Samples: 377978. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:12:47,646][01455] Avg episode reward: [(0, '22.708')] +[2023-09-05 00:12:47,805][21244] Updated weights for policy 0, policy_version 884 (0.0013) +[2023-09-05 00:12:52,645][01455] Fps is (10 sec: 3685.8, 60 sec: 3276.7, 300 sec: 3235.1). Total num frames: 3637248. Throughput: 0: 833.0. Samples: 383946. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:12:52,647][01455] Avg episode reward: [(0, '22.196')] +[2023-09-05 00:12:57,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3345.2, 300 sec: 3235.2). Total num frames: 3653632. Throughput: 0: 815.4. Samples: 386168. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:12:57,646][01455] Avg episode reward: [(0, '22.006')] +[2023-09-05 00:13:00,752][21244] Updated weights for policy 0, policy_version 894 (0.0028) +[2023-09-05 00:13:02,643][01455] Fps is (10 sec: 2867.6, 60 sec: 3276.9, 300 sec: 3235.1). Total num frames: 3665920. Throughput: 0: 802.2. Samples: 389956. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:13:02,646][01455] Avg episode reward: [(0, '23.477')] +[2023-09-05 00:13:02,666][21231] Saving new best policy, reward=23.477! +[2023-09-05 00:13:07,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 3682304. Throughput: 0: 823.4. Samples: 394852. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:13:07,645][01455] Avg episode reward: [(0, '25.075')] +[2023-09-05 00:13:07,655][21231] Saving new best policy, reward=25.075! +[2023-09-05 00:13:12,510][21244] Updated weights for policy 0, policy_version 904 (0.0025) +[2023-09-05 00:13:12,647][01455] Fps is (10 sec: 3685.1, 60 sec: 3276.6, 300 sec: 3249.0). Total num frames: 3702784. Throughput: 0: 822.8. Samples: 397760. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:13:12,651][01455] Avg episode reward: [(0, '24.802')] +[2023-09-05 00:13:17,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3276.9, 300 sec: 3221.3). Total num frames: 3715072. Throughput: 0: 803.9. Samples: 403030. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:13:17,650][01455] Avg episode reward: [(0, '24.036')] +[2023-09-05 00:13:22,645][01455] Fps is (10 sec: 2458.1, 60 sec: 3208.4, 300 sec: 3221.2). Total num frames: 3727360. Throughput: 0: 795.3. Samples: 406838. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:13:22,652][01455] Avg episode reward: [(0, '24.930')] +[2023-09-05 00:13:26,565][21244] Updated weights for policy 0, policy_version 914 (0.0043) +[2023-09-05 00:13:27,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 3743744. Throughput: 0: 798.2. Samples: 408864. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:13:27,651][01455] Avg episode reward: [(0, '23.891')] +[2023-09-05 00:13:32,650][01455] Fps is (10 sec: 3684.4, 60 sec: 3208.2, 300 sec: 3235.1). Total num frames: 3764224. Throughput: 0: 821.6. Samples: 414956. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:13:32,653][01455] Avg episode reward: [(0, '24.922')] +[2023-09-05 00:13:37,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 3780608. Throughput: 0: 802.2. Samples: 420046. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:13:37,647][01455] Avg episode reward: [(0, '24.345')] +[2023-09-05 00:13:38,029][21244] Updated weights for policy 0, policy_version 924 (0.0013) +[2023-09-05 00:13:42,643][01455] Fps is (10 sec: 2869.2, 60 sec: 3208.5, 300 sec: 3235.2). Total num frames: 3792896. Throughput: 0: 794.2. Samples: 421908. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:13:42,651][01455] Avg episode reward: [(0, '22.440')] +[2023-09-05 00:13:47,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 3809280. Throughput: 0: 801.4. Samples: 426018. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:13:47,646][01455] Avg episode reward: [(0, '23.694')] +[2023-09-05 00:13:50,852][21244] Updated weights for policy 0, policy_version 934 (0.0013) +[2023-09-05 00:13:52,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.6, 300 sec: 3249.0). Total num frames: 3829760. Throughput: 0: 826.3. Samples: 432034. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:13:52,646][01455] Avg episode reward: [(0, '23.746')] +[2023-09-05 00:13:57,644][01455] Fps is (10 sec: 3686.0, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 3846144. Throughput: 0: 828.2. Samples: 435026. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:13:57,647][01455] Avg episode reward: [(0, '24.889')] +[2023-09-05 00:14:02,645][01455] Fps is (10 sec: 2866.8, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 3858432. Throughput: 0: 798.7. Samples: 438972. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:14:02,654][01455] Avg episode reward: [(0, '26.132')] +[2023-09-05 00:14:02,671][21231] Saving new best policy, reward=26.132! +[2023-09-05 00:14:04,741][21244] Updated weights for policy 0, policy_version 944 (0.0029) +[2023-09-05 00:14:07,643][01455] Fps is (10 sec: 2867.5, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 3874816. Throughput: 0: 800.8. Samples: 442872. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:14:07,645][01455] Avg episode reward: [(0, '25.699')] +[2023-09-05 00:14:12,643][01455] Fps is (10 sec: 3687.0, 60 sec: 3208.7, 300 sec: 3249.0). Total num frames: 3895296. Throughput: 0: 821.5. Samples: 445832. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:14:12,648][01455] Avg episode reward: [(0, '25.436')] +[2023-09-05 00:14:15,553][21244] Updated weights for policy 0, policy_version 954 (0.0022) +[2023-09-05 00:14:17,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 3911680. Throughput: 0: 823.1. Samples: 451990. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:14:17,651][01455] Avg episode reward: [(0, '25.455')] +[2023-09-05 00:14:22,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.9, 300 sec: 3235.1). Total num frames: 3923968. Throughput: 0: 799.7. Samples: 456032. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:14:22,650][01455] Avg episode reward: [(0, '23.758')] +[2023-09-05 00:14:22,659][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000958_3923968.pth... +[2023-09-05 00:14:22,859][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000768_3145728.pth +[2023-09-05 00:14:27,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 3940352. Throughput: 0: 799.8. Samples: 457898. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:14:27,647][01455] Avg episode reward: [(0, '24.070')] +[2023-09-05 00:14:29,708][21244] Updated weights for policy 0, policy_version 964 (0.0040) +[2023-09-05 00:14:32,646][01455] Fps is (10 sec: 3275.9, 60 sec: 3208.8, 300 sec: 3249.0). Total num frames: 3956736. Throughput: 0: 824.6. Samples: 463128. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:14:32,649][01455] Avg episode reward: [(0, '23.626')] +[2023-09-05 00:14:37,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 3977216. Throughput: 0: 823.0. Samples: 469070. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:14:37,650][01455] Avg episode reward: [(0, '23.336')] +[2023-09-05 00:14:41,082][21244] Updated weights for policy 0, policy_version 974 (0.0037) +[2023-09-05 00:14:42,657][01455] Fps is (10 sec: 3682.2, 60 sec: 3344.3, 300 sec: 3248.9). Total num frames: 3993600. Throughput: 0: 801.9. Samples: 471120. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:14:42,673][01455] Avg episode reward: [(0, '22.428')] +[2023-09-05 00:14:47,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3249.1). Total num frames: 4005888. Throughput: 0: 799.8. Samples: 474964. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:14:47,646][01455] Avg episode reward: [(0, '22.780')] +[2023-09-05 00:14:52,643][01455] Fps is (10 sec: 2871.3, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4022272. Throughput: 0: 829.6. Samples: 480202. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:14:52,648][01455] Avg episode reward: [(0, '23.238')] +[2023-09-05 00:14:53,937][21244] Updated weights for policy 0, policy_version 984 (0.0023) +[2023-09-05 00:14:57,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.9, 300 sec: 3249.0). Total num frames: 4042752. Throughput: 0: 831.4. Samples: 483244. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:14:57,650][01455] Avg episode reward: [(0, '22.867')] +[2023-09-05 00:15:02,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3276.9, 300 sec: 3235.2). Total num frames: 4055040. Throughput: 0: 801.5. Samples: 488056. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:15:02,647][01455] Avg episode reward: [(0, '22.885')] +[2023-09-05 00:15:07,643][01455] Fps is (10 sec: 2457.6, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4067328. Throughput: 0: 793.6. Samples: 491746. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:15:07,651][01455] Avg episode reward: [(0, '22.939')] +[2023-09-05 00:15:07,879][21244] Updated weights for policy 0, policy_version 994 (0.0030) +[2023-09-05 00:15:12,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3262.9). Total num frames: 4087808. Throughput: 0: 801.4. Samples: 493960. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:15:12,646][01455] Avg episode reward: [(0, '23.473')] +[2023-09-05 00:15:17,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4104192. Throughput: 0: 819.9. Samples: 500020. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:15:17,649][01455] Avg episode reward: [(0, '24.631')] +[2023-09-05 00:15:18,678][21244] Updated weights for policy 0, policy_version 1004 (0.0034) +[2023-09-05 00:15:22,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 4120576. Throughput: 0: 798.9. Samples: 505020. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:15:22,647][01455] Avg episode reward: [(0, '23.824')] +[2023-09-05 00:15:27,646][01455] Fps is (10 sec: 2866.3, 60 sec: 3208.4, 300 sec: 3235.1). Total num frames: 4132864. Throughput: 0: 794.2. Samples: 506850. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:15:27,651][01455] Avg episode reward: [(0, '24.361')] +[2023-09-05 00:15:32,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.7, 300 sec: 3249.0). Total num frames: 4149248. Throughput: 0: 800.2. Samples: 510972. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:15:32,651][01455] Avg episode reward: [(0, '23.599')] +[2023-09-05 00:15:32,907][21244] Updated weights for policy 0, policy_version 1014 (0.0015) +[2023-09-05 00:15:37,643][01455] Fps is (10 sec: 3687.6, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4169728. Throughput: 0: 818.0. Samples: 517010. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:15:37,646][01455] Avg episode reward: [(0, '23.125')] +[2023-09-05 00:15:42,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3209.3, 300 sec: 3235.2). Total num frames: 4186112. Throughput: 0: 817.2. Samples: 520018. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:15:42,646][01455] Avg episode reward: [(0, '22.910')] +[2023-09-05 00:15:44,545][21244] Updated weights for policy 0, policy_version 1024 (0.0014) +[2023-09-05 00:15:47,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 4202496. Throughput: 0: 796.0. Samples: 523876. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:15:47,650][01455] Avg episode reward: [(0, '23.075')] +[2023-09-05 00:15:52,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4214784. Throughput: 0: 806.9. Samples: 528056. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:15:52,645][01455] Avg episode reward: [(0, '22.078')] +[2023-09-05 00:15:57,391][21244] Updated weights for policy 0, policy_version 1034 (0.0029) +[2023-09-05 00:15:57,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3262.9). Total num frames: 4235264. Throughput: 0: 825.2. Samples: 531092. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:15:57,648][01455] Avg episode reward: [(0, '22.427')] +[2023-09-05 00:16:02,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 4251648. Throughput: 0: 822.8. Samples: 537048. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0) +[2023-09-05 00:16:02,652][01455] Avg episode reward: [(0, '22.250')] +[2023-09-05 00:16:07,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 4263936. Throughput: 0: 797.2. Samples: 540894. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:16:07,649][01455] Avg episode reward: [(0, '24.359')] +[2023-09-05 00:16:11,011][21244] Updated weights for policy 0, policy_version 1044 (0.0013) +[2023-09-05 00:16:12,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4280320. Throughput: 0: 797.9. Samples: 542752. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:16:12,646][01455] Avg episode reward: [(0, '23.563')] +[2023-09-05 00:16:17,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4296704. Throughput: 0: 822.1. Samples: 547966. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0) +[2023-09-05 00:16:17,646][01455] Avg episode reward: [(0, '23.697')] +[2023-09-05 00:16:21,883][21244] Updated weights for policy 0, policy_version 1054 (0.0021) +[2023-09-05 00:16:22,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 4317184. Throughput: 0: 824.0. Samples: 554088. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:16:22,650][01455] Avg episode reward: [(0, '24.447')] +[2023-09-05 00:16:22,664][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001054_4317184.pth... +[2023-09-05 00:16:22,795][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000863_3534848.pth +[2023-09-05 00:16:27,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3277.0, 300 sec: 3235.1). Total num frames: 4329472. Throughput: 0: 799.4. Samples: 555992. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:16:27,646][01455] Avg episode reward: [(0, '24.161')] +[2023-09-05 00:16:32,645][01455] Fps is (10 sec: 2457.2, 60 sec: 3208.4, 300 sec: 3235.1). Total num frames: 4341760. Throughput: 0: 796.6. Samples: 559726. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2023-09-05 00:16:32,650][01455] Avg episode reward: [(0, '24.555')] +[2023-09-05 00:16:36,036][21244] Updated weights for policy 0, policy_version 1064 (0.0016) +[2023-09-05 00:16:37,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4362240. Throughput: 0: 822.0. Samples: 565048. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:16:37,648][01455] Avg episode reward: [(0, '23.841')] +[2023-09-05 00:16:42,643][01455] Fps is (10 sec: 4096.6, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 4382720. Throughput: 0: 821.1. Samples: 568044. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:16:42,646][01455] Avg episode reward: [(0, '24.502')] +[2023-09-05 00:16:47,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4395008. Throughput: 0: 795.8. Samples: 572860. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0) +[2023-09-05 00:16:47,652][01455] Avg episode reward: [(0, '25.487')] +[2023-09-05 00:16:48,125][21244] Updated weights for policy 0, policy_version 1074 (0.0018) +[2023-09-05 00:16:52,643][01455] Fps is (10 sec: 2457.7, 60 sec: 3208.5, 300 sec: 3235.2). Total num frames: 4407296. Throughput: 0: 796.5. Samples: 576738. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:16:52,646][01455] Avg episode reward: [(0, '25.465')] +[2023-09-05 00:16:57,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3249.1). Total num frames: 4427776. Throughput: 0: 807.2. Samples: 579076. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:16:57,651][01455] Avg episode reward: [(0, '27.068')] +[2023-09-05 00:16:57,656][21231] Saving new best policy, reward=27.068! +[2023-09-05 00:17:00,584][21244] Updated weights for policy 0, policy_version 1084 (0.0024) +[2023-09-05 00:17:02,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4444160. Throughput: 0: 822.7. Samples: 584986. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:17:02,646][01455] Avg episode reward: [(0, '28.327')] +[2023-09-05 00:17:02,658][21231] Saving new best policy, reward=28.327! +[2023-09-05 00:17:07,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 4460544. Throughput: 0: 793.2. Samples: 589782. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:17:07,657][01455] Avg episode reward: [(0, '27.065')] +[2023-09-05 00:17:12,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3235.2). Total num frames: 4472832. Throughput: 0: 793.6. Samples: 591704. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:17:12,647][01455] Avg episode reward: [(0, '28.517')] +[2023-09-05 00:17:12,659][21231] Saving new best policy, reward=28.517! +[2023-09-05 00:17:14,967][21244] Updated weights for policy 0, policy_version 1094 (0.0030) +[2023-09-05 00:17:17,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4489216. Throughput: 0: 803.7. Samples: 595892. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:17:17,645][01455] Avg episode reward: [(0, '27.792')] +[2023-09-05 00:17:22,643][01455] Fps is (10 sec: 3686.3, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4509696. Throughput: 0: 820.8. Samples: 601986. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:17:22,651][01455] Avg episode reward: [(0, '25.270')] +[2023-09-05 00:17:25,229][21244] Updated weights for policy 0, policy_version 1104 (0.0014) +[2023-09-05 00:17:27,643][01455] Fps is (10 sec: 3686.3, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 4526080. Throughput: 0: 817.3. Samples: 604824. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:17:27,652][01455] Avg episode reward: [(0, '25.377')] +[2023-09-05 00:17:32,643][01455] Fps is (10 sec: 2867.3, 60 sec: 3276.9, 300 sec: 3235.1). Total num frames: 4538368. Throughput: 0: 796.9. Samples: 608722. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:17:32,649][01455] Avg episode reward: [(0, '24.273')] +[2023-09-05 00:17:37,643][01455] Fps is (10 sec: 2867.3, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4554752. Throughput: 0: 805.5. Samples: 612986. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:17:37,645][01455] Avg episode reward: [(0, '23.519')] +[2023-09-05 00:17:39,381][21244] Updated weights for policy 0, policy_version 1114 (0.0020) +[2023-09-05 00:17:42,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.6, 300 sec: 3249.0). Total num frames: 4575232. Throughput: 0: 820.0. Samples: 615976. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:17:42,646][01455] Avg episode reward: [(0, '24.888')] +[2023-09-05 00:17:47,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3235.2). Total num frames: 4591616. Throughput: 0: 820.9. Samples: 621926. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:17:47,648][01455] Avg episode reward: [(0, '25.323')] +[2023-09-05 00:17:51,265][21244] Updated weights for policy 0, policy_version 1124 (0.0026) +[2023-09-05 00:17:52,648][01455] Fps is (10 sec: 2865.9, 60 sec: 3276.6, 300 sec: 3221.2). Total num frames: 4603904. Throughput: 0: 799.7. Samples: 625774. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:17:52,650][01455] Avg episode reward: [(0, '26.045')] +[2023-09-05 00:17:57,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4620288. Throughput: 0: 799.0. Samples: 627660. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:17:57,646][01455] Avg episode reward: [(0, '27.406')] +[2023-09-05 00:18:02,643][01455] Fps is (10 sec: 3688.1, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 4640768. Throughput: 0: 831.2. Samples: 633294. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:18:02,645][01455] Avg episode reward: [(0, '27.676')] +[2023-09-05 00:18:03,581][21244] Updated weights for policy 0, policy_version 1134 (0.0023) +[2023-09-05 00:18:07,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3235.2). Total num frames: 4657152. Throughput: 0: 820.5. Samples: 638906. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:18:07,646][01455] Avg episode reward: [(0, '27.992')] +[2023-09-05 00:18:12,645][01455] Fps is (10 sec: 2866.5, 60 sec: 3276.7, 300 sec: 3235.1). Total num frames: 4669440. Throughput: 0: 799.5. Samples: 640804. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:18:12,649][01455] Avg episode reward: [(0, '28.667')] +[2023-09-05 00:18:12,673][21231] Saving new best policy, reward=28.667! +[2023-09-05 00:18:17,598][21244] Updated weights for policy 0, policy_version 1144 (0.0032) +[2023-09-05 00:18:17,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3249.0). Total num frames: 4685824. Throughput: 0: 797.4. Samples: 644604. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:18:17,645][01455] Avg episode reward: [(0, '29.023')] +[2023-09-05 00:18:17,653][21231] Saving new best policy, reward=29.023! +[2023-09-05 00:18:22,643][01455] Fps is (10 sec: 3277.6, 60 sec: 3208.6, 300 sec: 3249.0). Total num frames: 4702208. Throughput: 0: 828.7. Samples: 650278. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:18:22,646][01455] Avg episode reward: [(0, '28.345')] +[2023-09-05 00:18:22,664][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001148_4702208.pth... +[2023-09-05 00:18:22,807][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000958_3923968.pth +[2023-09-05 00:18:27,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3249.1). Total num frames: 4722688. Throughput: 0: 827.9. Samples: 653232. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:18:27,648][01455] Avg episode reward: [(0, '28.763')] +[2023-09-05 00:18:28,619][21244] Updated weights for policy 0, policy_version 1154 (0.0016) +[2023-09-05 00:18:32,644][01455] Fps is (10 sec: 3276.4, 60 sec: 3276.7, 300 sec: 3235.1). Total num frames: 4734976. Throughput: 0: 794.1. Samples: 657662. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:18:32,646][01455] Avg episode reward: [(0, '28.254')] +[2023-09-05 00:18:37,644][01455] Fps is (10 sec: 2457.5, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4747264. Throughput: 0: 792.1. Samples: 661414. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:18:37,646][01455] Avg episode reward: [(0, '27.401')] +[2023-09-05 00:18:42,242][21244] Updated weights for policy 0, policy_version 1164 (0.0027) +[2023-09-05 00:18:42,643][01455] Fps is (10 sec: 3277.2, 60 sec: 3208.5, 300 sec: 3249.0). Total num frames: 4767744. Throughput: 0: 810.8. Samples: 664146. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:18:42,648][01455] Avg episode reward: [(0, '26.398')] +[2023-09-05 00:18:47,647][01455] Fps is (10 sec: 4094.7, 60 sec: 3276.6, 300 sec: 3249.0). Total num frames: 4788224. Throughput: 0: 821.0. Samples: 670244. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:18:47,652][01455] Avg episode reward: [(0, '24.753')] +[2023-09-05 00:18:52,643][01455] Fps is (10 sec: 3276.8, 60 sec: 3277.1, 300 sec: 3235.2). Total num frames: 4800512. Throughput: 0: 796.9. Samples: 674766. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:18:52,646][01455] Avg episode reward: [(0, '23.914')] +[2023-09-05 00:18:54,913][21244] Updated weights for policy 0, policy_version 1174 (0.0013) +[2023-09-05 00:18:57,643][01455] Fps is (10 sec: 2458.5, 60 sec: 3208.5, 300 sec: 3235.2). Total num frames: 4812800. Throughput: 0: 796.0. Samples: 676624. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:18:57,651][01455] Avg episode reward: [(0, '22.115')] +[2023-09-05 00:19:02,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 3235.1). Total num frames: 4829184. Throughput: 0: 814.4. Samples: 681252. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0) +[2023-09-05 00:19:02,651][01455] Avg episode reward: [(0, '21.377')] +[2023-09-05 00:19:06,920][21244] Updated weights for policy 0, policy_version 1184 (0.0027) +[2023-09-05 00:19:07,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4849664. Throughput: 0: 820.3. Samples: 687190. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:19:07,648][01455] Avg episode reward: [(0, '21.602')] +[2023-09-05 00:19:12,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.9, 300 sec: 3235.1). Total num frames: 4866048. Throughput: 0: 810.8. Samples: 689720. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:19:12,646][01455] Avg episode reward: [(0, '21.798')] +[2023-09-05 00:19:17,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4878336. Throughput: 0: 797.5. Samples: 693550. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:19:17,649][01455] Avg episode reward: [(0, '22.291')] +[2023-09-05 00:19:20,946][21244] Updated weights for policy 0, policy_version 1194 (0.0034) +[2023-09-05 00:19:22,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4894720. Throughput: 0: 821.1. Samples: 698364. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:19:22,649][01455] Avg episode reward: [(0, '22.413')] +[2023-09-05 00:19:27,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.5, 300 sec: 3249.1). Total num frames: 4915200. Throughput: 0: 826.5. Samples: 701340. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0) +[2023-09-05 00:19:27,646][01455] Avg episode reward: [(0, '21.777')] +[2023-09-05 00:19:31,901][21244] Updated weights for policy 0, policy_version 1204 (0.0023) +[2023-09-05 00:19:32,646][01455] Fps is (10 sec: 3685.2, 60 sec: 3276.7, 300 sec: 3235.1). Total num frames: 4931584. Throughput: 0: 814.3. Samples: 706886. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:19:32,653][01455] Avg episode reward: [(0, '21.654')] +[2023-09-05 00:19:37,643][01455] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3221.4). Total num frames: 4943872. Throughput: 0: 797.7. Samples: 710662. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:19:37,650][01455] Avg episode reward: [(0, '22.148')] +[2023-09-05 00:19:42,643][01455] Fps is (10 sec: 2868.1, 60 sec: 3208.5, 300 sec: 3235.1). Total num frames: 4960256. Throughput: 0: 799.3. Samples: 712592. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0) +[2023-09-05 00:19:42,646][01455] Avg episode reward: [(0, '22.363')] +[2023-09-05 00:19:45,392][21244] Updated weights for policy 0, policy_version 1214 (0.0026) +[2023-09-05 00:19:47,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3208.7, 300 sec: 3249.0). Total num frames: 4980736. Throughput: 0: 825.7. Samples: 718408. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2023-09-05 00:19:47,650][01455] Avg episode reward: [(0, '22.165')] +[2023-09-05 00:19:52,643][01455] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3235.1). Total num frames: 4997120. Throughput: 0: 817.1. Samples: 723958. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0) +[2023-09-05 00:19:52,648][01455] Avg episode reward: [(0, '23.655')] +[2023-09-05 00:19:54,846][21231] Stopping Batcher_0... +[2023-09-05 00:19:54,847][21231] Loop batcher_evt_loop terminating... +[2023-09-05 00:19:54,848][01455] Component Batcher_0 stopped! +[2023-09-05 00:19:54,854][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth... +[2023-09-05 00:19:54,953][01455] Component RolloutWorker_w1 stopped! +[2023-09-05 00:19:54,962][21247] Stopping RolloutWorker_w1... +[2023-09-05 00:19:54,963][21247] Loop rollout_proc1_evt_loop terminating... +[2023-09-05 00:19:54,974][01455] Component RolloutWorker_w7 stopped! +[2023-09-05 00:19:54,985][21244] Weights refcount: 2 0 +[2023-09-05 00:19:54,973][21252] Stopping RolloutWorker_w7... +[2023-09-05 00:19:54,994][01455] Component InferenceWorker_p0-w0 stopped! +[2023-09-05 00:19:55,000][01455] Component RolloutWorker_w3 stopped! +[2023-09-05 00:19:55,002][21248] Stopping RolloutWorker_w3... +[2023-09-05 00:19:55,003][21248] Loop rollout_proc3_evt_loop terminating... +[2023-09-05 00:19:55,004][21244] Stopping InferenceWorker_p0-w0... +[2023-09-05 00:19:55,004][21244] Loop inference_proc0-0_evt_loop terminating... +[2023-09-05 00:19:54,989][21252] Loop rollout_proc7_evt_loop terminating... +[2023-09-05 00:19:55,019][21245] Stopping RolloutWorker_w0... +[2023-09-05 00:19:55,019][21245] Loop rollout_proc0_evt_loop terminating... +[2023-09-05 00:19:55,019][01455] Component RolloutWorker_w0 stopped! +[2023-09-05 00:19:55,030][01455] Component RolloutWorker_w5 stopped! +[2023-09-05 00:19:55,032][21249] Stopping RolloutWorker_w5... +[2023-09-05 00:19:55,035][21249] Loop rollout_proc5_evt_loop terminating... +[2023-09-05 00:19:55,072][21250] Stopping RolloutWorker_w4... +[2023-09-05 00:19:55,074][01455] Component RolloutWorker_w4 stopped! +[2023-09-05 00:19:55,076][21250] Loop rollout_proc4_evt_loop terminating... +[2023-09-05 00:19:55,077][21231] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001054_4317184.pth +[2023-09-05 00:19:55,087][01455] Component RolloutWorker_w2 stopped! +[2023-09-05 00:19:55,089][21246] Stopping RolloutWorker_w2... +[2023-09-05 00:19:55,098][21231] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth... +[2023-09-05 00:19:55,098][21246] Loop rollout_proc2_evt_loop terminating... +[2023-09-05 00:19:55,151][01455] Component RolloutWorker_w6 stopped! +[2023-09-05 00:19:55,156][21251] Stopping RolloutWorker_w6... +[2023-09-05 00:19:55,157][21251] Loop rollout_proc6_evt_loop terminating... +[2023-09-05 00:19:55,282][01455] Component LearnerWorker_p0 stopped! +[2023-09-05 00:19:55,284][01455] Waiting for process learner_proc0 to stop... +[2023-09-05 00:19:55,286][21231] Stopping LearnerWorker_p0... +[2023-09-05 00:19:55,287][21231] Loop learner_proc0_evt_loop terminating... +[2023-09-05 00:19:56,865][01455] Waiting for process inference_proc0-0 to join... +[2023-09-05 00:19:57,426][01455] Waiting for process rollout_proc0 to join... +[2023-09-05 00:19:59,744][01455] Waiting for process rollout_proc1 to join... +[2023-09-05 00:20:00,140][01455] Waiting for process rollout_proc2 to join... +[2023-09-05 00:20:00,142][01455] Waiting for process rollout_proc3 to join... +[2023-09-05 00:20:00,145][01455] Waiting for process rollout_proc4 to join... +[2023-09-05 00:20:00,150][01455] Waiting for process rollout_proc5 to join... +[2023-09-05 00:20:00,152][01455] Waiting for process rollout_proc6 to join... +[2023-09-05 00:20:00,154][01455] Waiting for process rollout_proc7 to join... +[2023-09-05 00:20:00,158][01455] Batcher 0 profile tree view: +batching: 21.3569, releasing_batches: 0.0177 +[2023-09-05 00:20:00,161][01455] InferenceWorker_p0-w0 profile tree view: +wait_policy: 0.0000 + wait_policy_total: 409.1899 +update_model: 6.5868 + weight_update: 0.0019 +one_step: 0.0026 + handle_policy_step: 461.5617 + deserialize: 12.6187, stack: 2.3326, obs_to_device_normalize: 89.2713, forward: 251.7134, send_messages: 22.3127 + prepare_outputs: 61.2108 + to_cpu: 34.7263 +[2023-09-05 00:20:00,163][01455] Learner 0 profile tree view: +misc: 0.0035, prepare_batch: 14.7406 +train: 56.1469 + epoch_init: 0.0099, minibatch_init: 0.0062, losses_postprocess: 0.5078, kl_divergence: 0.4666, after_optimizer: 2.9281 + calculate_losses: 19.3400 + losses_init: 0.0028, forward_head: 1.1714, bptt_initial: 12.5733, tail: 0.8596, advantages_returns: 0.2281, losses: 2.7586 + bptt: 1.4973 + bptt_forward_core: 1.4179 + update: 32.4588 + clip: 23.5277 +[2023-09-05 00:20:00,165][01455] RolloutWorker_w0 profile tree view: +wait_for_trajectories: 0.1987, enqueue_policy_requests: 115.9850, env_step: 676.3126, overhead: 19.0331, complete_rollouts: 5.5677 +save_policy_outputs: 16.8259 + split_output_tensors: 7.9690 +[2023-09-05 00:20:00,168][01455] RolloutWorker_w7 profile tree view: +wait_for_trajectories: 0.2649, enqueue_policy_requests: 124.0107, env_step: 671.4321, overhead: 19.3642, complete_rollouts: 5.5382 +save_policy_outputs: 16.6984 + split_output_tensors: 7.8373 +[2023-09-05 00:20:00,170][01455] Loop Runner_EvtLoop terminating... +[2023-09-05 00:20:00,172][01455] Runner profile tree view: +main_loop: 934.4484 +[2023-09-05 00:20:00,173][01455] Collected {0: 5005312}, FPS: 3103.4 +[2023-09-05 00:20:00,208][01455] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-09-05 00:20:00,209][01455] Overriding arg 'num_workers' with value 1 passed from command line +[2023-09-05 00:20:00,211][01455] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-09-05 00:20:00,212][01455] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-09-05 00:20:00,215][01455] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-09-05 00:20:00,216][01455] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-09-05 00:20:00,217][01455] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file! +[2023-09-05 00:20:00,219][01455] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-09-05 00:20:00,220][01455] Adding new argument 'push_to_hub'=False that is not in the saved config file! +[2023-09-05 00:20:00,221][01455] Adding new argument 'hf_repository'=None that is not in the saved config file! +[2023-09-05 00:20:00,222][01455] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-09-05 00:20:00,223][01455] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-09-05 00:20:00,225][01455] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-09-05 00:20:00,226][01455] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-09-05 00:20:00,227][01455] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-09-05 00:20:00,270][01455] RunningMeanStd input shape: (3, 72, 128) +[2023-09-05 00:20:00,274][01455] RunningMeanStd input shape: (1,) +[2023-09-05 00:20:00,287][01455] ConvEncoder: input_channels=3 +[2023-09-05 00:20:00,325][01455] Conv encoder output size: 512 +[2023-09-05 00:20:00,327][01455] Policy head output size: 512 +[2023-09-05 00:20:00,347][01455] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth... +[2023-09-05 00:20:00,845][01455] Num frames 100... +[2023-09-05 00:20:00,994][01455] Num frames 200... +[2023-09-05 00:20:01,133][01455] Num frames 300... +[2023-09-05 00:20:01,273][01455] Num frames 400... +[2023-09-05 00:20:01,430][01455] Num frames 500... +[2023-09-05 00:20:01,569][01455] Num frames 600... +[2023-09-05 00:20:01,709][01455] Num frames 700... +[2023-09-05 00:20:01,856][01455] Num frames 800... +[2023-09-05 00:20:01,996][01455] Num frames 900... +[2023-09-05 00:20:02,130][01455] Num frames 1000... +[2023-09-05 00:20:02,269][01455] Num frames 1100... +[2023-09-05 00:20:02,427][01455] Num frames 1200... +[2023-09-05 00:20:02,564][01455] Num frames 1300... +[2023-09-05 00:20:02,708][01455] Num frames 1400... +[2023-09-05 00:20:02,850][01455] Num frames 1500... +[2023-09-05 00:20:03,018][01455] Avg episode rewards: #0: 40.829, true rewards: #0: 15.830 +[2023-09-05 00:20:03,021][01455] Avg episode reward: 40.829, avg true_objective: 15.830 +[2023-09-05 00:20:03,048][01455] Num frames 1600... +[2023-09-05 00:20:03,189][01455] Num frames 1700... +[2023-09-05 00:20:03,331][01455] Num frames 1800... +[2023-09-05 00:20:03,464][01455] Num frames 1900... +[2023-09-05 00:20:03,609][01455] Num frames 2000... +[2023-09-05 00:20:03,751][01455] Num frames 2100... +[2023-09-05 00:20:03,894][01455] Num frames 2200... +[2023-09-05 00:20:04,042][01455] Num frames 2300... +[2023-09-05 00:20:04,221][01455] Avg episode rewards: #0: 28.915, true rewards: #0: 11.915 +[2023-09-05 00:20:04,222][01455] Avg episode reward: 28.915, avg true_objective: 11.915 +[2023-09-05 00:20:04,249][01455] Num frames 2400... +[2023-09-05 00:20:04,396][01455] Num frames 2500... +[2023-09-05 00:20:04,535][01455] Num frames 2600... +[2023-09-05 00:20:04,677][01455] Num frames 2700... +[2023-09-05 00:20:04,848][01455] Num frames 2800... +[2023-09-05 00:20:05,006][01455] Num frames 2900... +[2023-09-05 00:20:05,138][01455] Num frames 3000... +[2023-09-05 00:20:05,276][01455] Num frames 3100... +[2023-09-05 00:20:05,416][01455] Num frames 3200... +[2023-09-05 00:20:05,552][01455] Num frames 3300... +[2023-09-05 00:20:05,693][01455] Num frames 3400... +[2023-09-05 00:20:05,828][01455] Num frames 3500... +[2023-09-05 00:20:05,963][01455] Num frames 3600... +[2023-09-05 00:20:06,106][01455] Num frames 3700... +[2023-09-05 00:20:06,240][01455] Num frames 3800... +[2023-09-05 00:20:06,378][01455] Num frames 3900... +[2023-09-05 00:20:06,518][01455] Num frames 4000... +[2023-09-05 00:20:06,688][01455] Avg episode rewards: #0: 34.613, true rewards: #0: 13.613 +[2023-09-05 00:20:06,690][01455] Avg episode reward: 34.613, avg true_objective: 13.613 +[2023-09-05 00:20:06,714][01455] Num frames 4100... +[2023-09-05 00:20:06,852][01455] Num frames 4200... +[2023-09-05 00:20:06,987][01455] Num frames 4300... +[2023-09-05 00:20:07,117][01455] Num frames 4400... +[2023-09-05 00:20:07,255][01455] Num frames 4500... +[2023-09-05 00:20:07,389][01455] Num frames 4600... +[2023-09-05 00:20:07,529][01455] Num frames 4700... +[2023-09-05 00:20:07,669][01455] Num frames 4800... +[2023-09-05 00:20:07,800][01455] Num frames 4900... +[2023-09-05 00:20:07,935][01455] Num frames 5000... +[2023-09-05 00:20:08,076][01455] Num frames 5100... +[2023-09-05 00:20:08,211][01455] Num frames 5200... +[2023-09-05 00:20:08,350][01455] Num frames 5300... +[2023-09-05 00:20:08,409][01455] Avg episode rewards: #0: 32.502, true rewards: #0: 13.252 +[2023-09-05 00:20:08,411][01455] Avg episode reward: 32.502, avg true_objective: 13.252 +[2023-09-05 00:20:08,554][01455] Num frames 5400... +[2023-09-05 00:20:08,709][01455] Num frames 5500... +[2023-09-05 00:20:08,842][01455] Num frames 5600... +[2023-09-05 00:20:08,986][01455] Num frames 5700... +[2023-09-05 00:20:09,070][01455] Avg episode rewards: #0: 27.234, true rewards: #0: 11.434 +[2023-09-05 00:20:09,071][01455] Avg episode reward: 27.234, avg true_objective: 11.434 +[2023-09-05 00:20:09,193][01455] Num frames 5800... +[2023-09-05 00:20:09,333][01455] Num frames 5900... +[2023-09-05 00:20:09,477][01455] Num frames 6000... +[2023-09-05 00:20:09,690][01455] Num frames 6100... +[2023-09-05 00:20:09,890][01455] Num frames 6200... +[2023-09-05 00:20:10,008][01455] Avg episode rewards: #0: 23.882, true rewards: #0: 10.382 +[2023-09-05 00:20:10,015][01455] Avg episode reward: 23.882, avg true_objective: 10.382 +[2023-09-05 00:20:10,150][01455] Num frames 6300... +[2023-09-05 00:20:10,341][01455] Num frames 6400... +[2023-09-05 00:20:10,561][01455] Num frames 6500... +[2023-09-05 00:20:10,762][01455] Num frames 6600... +[2023-09-05 00:20:10,956][01455] Num frames 6700... +[2023-09-05 00:20:11,154][01455] Num frames 6800... +[2023-09-05 00:20:11,348][01455] Num frames 6900... +[2023-09-05 00:20:11,412][01455] Avg episode rewards: #0: 22.430, true rewards: #0: 9.859 +[2023-09-05 00:20:11,415][01455] Avg episode reward: 22.430, avg true_objective: 9.859 +[2023-09-05 00:20:11,612][01455] Num frames 7000... +[2023-09-05 00:20:11,812][01455] Num frames 7100... +[2023-09-05 00:20:12,008][01455] Num frames 7200... +[2023-09-05 00:20:12,206][01455] Num frames 7300... +[2023-09-05 00:20:12,399][01455] Num frames 7400... +[2023-09-05 00:20:12,607][01455] Num frames 7500... +[2023-09-05 00:20:12,803][01455] Num frames 7600... +[2023-09-05 00:20:13,005][01455] Num frames 7700... +[2023-09-05 00:20:13,196][01455] Num frames 7800... +[2023-09-05 00:20:13,392][01455] Num frames 7900... +[2023-09-05 00:20:13,599][01455] Num frames 8000... +[2023-09-05 00:20:13,752][01455] Num frames 8100... +[2023-09-05 00:20:13,892][01455] Num frames 8200... +[2023-09-05 00:20:14,038][01455] Num frames 8300... +[2023-09-05 00:20:14,182][01455] Num frames 8400... +[2023-09-05 00:20:14,316][01455] Num frames 8500... +[2023-09-05 00:20:14,461][01455] Num frames 8600... +[2023-09-05 00:20:14,611][01455] Num frames 8700... +[2023-09-05 00:20:14,765][01455] Avg episode rewards: #0: 26.454, true rewards: #0: 10.954 +[2023-09-05 00:20:14,766][01455] Avg episode reward: 26.454, avg true_objective: 10.954 +[2023-09-05 00:20:14,819][01455] Num frames 8800... +[2023-09-05 00:20:14,956][01455] Num frames 8900... +[2023-09-05 00:20:15,094][01455] Num frames 9000... +[2023-09-05 00:20:15,237][01455] Num frames 9100... +[2023-09-05 00:20:15,372][01455] Num frames 9200... +[2023-09-05 00:20:15,509][01455] Num frames 9300... +[2023-09-05 00:20:15,649][01455] Num frames 9400... +[2023-09-05 00:20:15,798][01455] Num frames 9500... +[2023-09-05 00:20:15,932][01455] Num frames 9600... +[2023-09-05 00:20:16,073][01455] Num frames 9700... +[2023-09-05 00:20:16,213][01455] Num frames 9800... +[2023-09-05 00:20:16,343][01455] Num frames 9900... +[2023-09-05 00:20:16,486][01455] Num frames 10000... +[2023-09-05 00:20:16,627][01455] Num frames 10100... +[2023-09-05 00:20:16,774][01455] Num frames 10200... +[2023-09-05 00:20:16,913][01455] Num frames 10300... +[2023-09-05 00:20:17,059][01455] Num frames 10400... +[2023-09-05 00:20:17,195][01455] Num frames 10500... +[2023-09-05 00:20:17,336][01455] Num frames 10600... +[2023-09-05 00:20:17,511][01455] Avg episode rewards: #0: 29.425, true rewards: #0: 11.870 +[2023-09-05 00:20:17,513][01455] Avg episode reward: 29.425, avg true_objective: 11.870 +[2023-09-05 00:20:17,540][01455] Num frames 10700... +[2023-09-05 00:20:17,686][01455] Num frames 10800... +[2023-09-05 00:20:17,826][01455] Num frames 10900... +[2023-09-05 00:20:17,963][01455] Num frames 11000... +[2023-09-05 00:20:18,094][01455] Num frames 11100... +[2023-09-05 00:20:18,232][01455] Num frames 11200... +[2023-09-05 00:20:18,370][01455] Num frames 11300... +[2023-09-05 00:20:18,503][01455] Num frames 11400... +[2023-09-05 00:20:18,646][01455] Num frames 11500... +[2023-09-05 00:20:18,794][01455] Num frames 11600... +[2023-09-05 00:20:18,928][01455] Num frames 11700... +[2023-09-05 00:20:19,072][01455] Num frames 11800... +[2023-09-05 00:20:19,258][01455] Avg episode rewards: #0: 28.999, true rewards: #0: 11.899 +[2023-09-05 00:20:19,260][01455] Avg episode reward: 28.999, avg true_objective: 11.899 +[2023-09-05 00:20:19,265][01455] Num frames 11900... +[2023-09-05 00:21:35,923][01455] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2023-09-05 00:21:36,528][01455] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-09-05 00:21:36,531][01455] Overriding arg 'num_workers' with value 1 passed from command line +[2023-09-05 00:21:36,533][01455] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-09-05 00:21:36,535][01455] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-09-05 00:21:36,537][01455] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-09-05 00:21:36,539][01455] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-09-05 00:21:36,540][01455] Adding new argument 'max_num_frames'=100000 that is not in the saved config file! +[2023-09-05 00:21:36,541][01455] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-09-05 00:21:36,543][01455] Adding new argument 'push_to_hub'=True that is not in the saved config file! +[2023-09-05 00:21:36,544][01455] Adding new argument 'hf_repository'='dimitarrskv/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file! +[2023-09-05 00:21:36,545][01455] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-09-05 00:21:36,546][01455] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-09-05 00:21:36,547][01455] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-09-05 00:21:36,548][01455] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-09-05 00:21:36,549][01455] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-09-05 00:21:36,590][01455] RunningMeanStd input shape: (3, 72, 128) +[2023-09-05 00:21:36,592][01455] RunningMeanStd input shape: (1,) +[2023-09-05 00:21:36,610][01455] ConvEncoder: input_channels=3 +[2023-09-05 00:21:36,666][01455] Conv encoder output size: 512 +[2023-09-05 00:21:36,668][01455] Policy head output size: 512 +[2023-09-05 00:21:36,706][01455] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001222_5005312.pth... +[2023-09-05 00:21:37,558][01455] Num frames 100... +[2023-09-05 00:21:37,798][01455] Num frames 200... +[2023-09-05 00:21:37,998][01455] Num frames 300... +[2023-09-05 00:21:38,184][01455] Num frames 400... +[2023-09-05 00:21:38,391][01455] Num frames 500... +[2023-09-05 00:21:38,602][01455] Num frames 600... +[2023-09-05 00:21:38,793][01455] Num frames 700... +[2023-09-05 00:21:38,981][01455] Num frames 800... +[2023-09-05 00:21:39,168][01455] Num frames 900... +[2023-09-05 00:21:39,378][01455] Num frames 1000... +[2023-09-05 00:21:39,479][01455] Avg episode rewards: #0: 22.170, true rewards: #0: 10.170 +[2023-09-05 00:21:39,482][01455] Avg episode reward: 22.170, avg true_objective: 10.170 +[2023-09-05 00:21:39,654][01455] Num frames 1100... +[2023-09-05 00:21:39,867][01455] Num frames 1200... +[2023-09-05 00:21:40,120][01455] Num frames 1300... +[2023-09-05 00:21:40,317][01455] Num frames 1400... +[2023-09-05 00:21:40,550][01455] Num frames 1500... +[2023-09-05 00:21:40,773][01455] Num frames 1600... +[2023-09-05 00:21:41,016][01455] Num frames 1700... +[2023-09-05 00:21:41,249][01455] Num frames 1800... +[2023-09-05 00:21:41,488][01455] Num frames 1900... +[2023-09-05 00:21:41,744][01455] Num frames 2000... +[2023-09-05 00:21:41,989][01455] Num frames 2100... +[2023-09-05 00:21:42,228][01455] Num frames 2200... +[2023-09-05 00:21:42,502][01455] Avg episode rewards: #0: 25.485, true rewards: #0: 11.485 +[2023-09-05 00:21:42,505][01455] Avg episode reward: 25.485, avg true_objective: 11.485 +[2023-09-05 00:21:42,514][01455] Num frames 2300... +[2023-09-05 00:21:42,750][01455] Num frames 2400... +[2023-09-05 00:21:42,996][01455] Num frames 2500... +[2023-09-05 00:21:43,239][01455] Num frames 2600... +[2023-09-05 00:21:43,453][01455] Num frames 2700... +[2023-09-05 00:21:43,651][01455] Num frames 2800... +[2023-09-05 00:21:43,854][01455] Num frames 2900... +[2023-09-05 00:21:44,058][01455] Num frames 3000... +[2023-09-05 00:21:44,244][01455] Num frames 3100... +[2023-09-05 00:21:44,434][01455] Num frames 3200... +[2023-09-05 00:21:44,666][01455] Num frames 3300... +[2023-09-05 00:21:44,894][01455] Num frames 3400... +[2023-09-05 00:21:45,112][01455] Num frames 3500... +[2023-09-05 00:21:45,351][01455] Num frames 3600... +[2023-09-05 00:21:45,546][01455] Num frames 3700... +[2023-09-05 00:21:45,743][01455] Num frames 3800... +[2023-09-05 00:21:45,936][01455] Num frames 3900... +[2023-09-05 00:21:46,125][01455] Num frames 4000... +[2023-09-05 00:21:46,314][01455] Num frames 4100... +[2023-09-05 00:21:46,507][01455] Num frames 4200... +[2023-09-05 00:21:46,629][01455] Avg episode rewards: #0: 36.450, true rewards: #0: 14.117 +[2023-09-05 00:21:46,632][01455] Avg episode reward: 36.450, avg true_objective: 14.117 +[2023-09-05 00:21:46,758][01455] Num frames 4300... +[2023-09-05 00:21:46,960][01455] Num frames 4400... +[2023-09-05 00:21:47,140][01455] Num frames 4500... +[2023-09-05 00:21:47,271][01455] Num frames 4600... +[2023-09-05 00:21:47,400][01455] Num frames 4700... +[2023-09-05 00:21:47,534][01455] Num frames 4800... +[2023-09-05 00:21:47,670][01455] Num frames 4900... +[2023-09-05 00:21:47,814][01455] Num frames 5000... +[2023-09-05 00:21:47,952][01455] Num frames 5100... +[2023-09-05 00:21:48,091][01455] Num frames 5200... +[2023-09-05 00:21:48,230][01455] Num frames 5300... +[2023-09-05 00:21:48,362][01455] Num frames 5400... +[2023-09-05 00:21:48,505][01455] Num frames 5500... +[2023-09-05 00:21:48,644][01455] Num frames 5600... +[2023-09-05 00:21:48,783][01455] Num frames 5700... +[2023-09-05 00:21:48,929][01455] Num frames 5800... +[2023-09-05 00:21:49,070][01455] Num frames 5900... +[2023-09-05 00:21:49,212][01455] Num frames 6000... +[2023-09-05 00:21:49,352][01455] Num frames 6100... +[2023-09-05 00:21:49,498][01455] Num frames 6200... +[2023-09-05 00:21:49,637][01455] Num frames 6300... +[2023-09-05 00:21:49,745][01455] Avg episode rewards: #0: 42.337, true rewards: #0: 15.838 +[2023-09-05 00:21:49,747][01455] Avg episode reward: 42.337, avg true_objective: 15.838 +[2023-09-05 00:21:49,837][01455] Num frames 6400... +[2023-09-05 00:21:49,981][01455] Num frames 6500... +[2023-09-05 00:21:50,121][01455] Num frames 6600... +[2023-09-05 00:21:50,257][01455] Num frames 6700... +[2023-09-05 00:21:50,396][01455] Num frames 6800... +[2023-09-05 00:21:50,547][01455] Num frames 6900... +[2023-09-05 00:21:50,684][01455] Num frames 7000... +[2023-09-05 00:21:50,754][01455] Avg episode rewards: #0: 36.214, true rewards: #0: 14.014 +[2023-09-05 00:21:50,755][01455] Avg episode reward: 36.214, avg true_objective: 14.014 +[2023-09-05 00:21:50,886][01455] Num frames 7100... +[2023-09-05 00:21:51,025][01455] Num frames 7200... +[2023-09-05 00:21:51,165][01455] Num frames 7300... +[2023-09-05 00:21:51,300][01455] Num frames 7400... +[2023-09-05 00:21:51,433][01455] Num frames 7500... +[2023-09-05 00:21:51,577][01455] Num frames 7600... +[2023-09-05 00:21:51,716][01455] Num frames 7700... +[2023-09-05 00:21:51,852][01455] Num frames 7800... +[2023-09-05 00:21:51,988][01455] Num frames 7900... +[2023-09-05 00:21:52,128][01455] Num frames 8000... +[2023-09-05 00:21:52,266][01455] Num frames 8100... +[2023-09-05 00:21:52,364][01455] Avg episode rewards: #0: 34.216, true rewards: #0: 13.550 +[2023-09-05 00:21:52,366][01455] Avg episode reward: 34.216, avg true_objective: 13.550 +[2023-09-05 00:21:52,464][01455] Num frames 8200... +[2023-09-05 00:21:52,602][01455] Num frames 8300... +[2023-09-05 00:21:52,736][01455] Num frames 8400... +[2023-09-05 00:21:52,875][01455] Num frames 8500... +[2023-09-05 00:21:53,019][01455] Num frames 8600... +[2023-09-05 00:21:53,156][01455] Num frames 8700... +[2023-09-05 00:21:53,291][01455] Num frames 8800... +[2023-09-05 00:21:53,401][01455] Avg episode rewards: #0: 31.334, true rewards: #0: 12.620 +[2023-09-05 00:21:53,403][01455] Avg episode reward: 31.334, avg true_objective: 12.620 +[2023-09-05 00:21:53,548][01455] Num frames 8900... +[2023-09-05 00:21:53,746][01455] Num frames 9000... +[2023-09-05 00:21:53,960][01455] Num frames 9100... +[2023-09-05 00:21:54,184][01455] Num frames 9200... +[2023-09-05 00:21:54,345][01455] Avg episode rewards: #0: 28.695, true rewards: #0: 11.570 +[2023-09-05 00:21:54,347][01455] Avg episode reward: 28.695, avg true_objective: 11.570 +[2023-09-05 00:21:54,442][01455] Num frames 9300... +[2023-09-05 00:21:54,652][01455] Num frames 9400... +[2023-09-05 00:21:54,852][01455] Num frames 9500... +[2023-09-05 00:21:55,067][01455] Num frames 9600... +[2023-09-05 00:21:55,266][01455] Num frames 9700... +[2023-09-05 00:21:55,480][01455] Num frames 9800... +[2023-09-05 00:21:55,703][01455] Avg episode rewards: #0: 26.979, true rewards: #0: 10.979 +[2023-09-05 00:21:55,706][01455] Avg episode reward: 26.979, avg true_objective: 10.979 +[2023-09-05 00:21:55,748][01455] Num frames 9900... +[2023-09-05 00:21:55,957][01455] Num frames 10000... +[2023-09-05 00:21:56,169][01455] Num frames 10100... +[2023-09-05 00:21:56,364][01455] Num frames 10200... +[2023-09-05 00:21:56,574][01455] Num frames 10300... +[2023-09-05 00:21:56,772][01455] Num frames 10400... +[2023-09-05 00:21:56,977][01455] Num frames 10500... +[2023-09-05 00:21:57,201][01455] Num frames 10600... +[2023-09-05 00:21:57,408][01455] Num frames 10700... +[2023-09-05 00:21:57,619][01455] Num frames 10800... +[2023-09-05 00:21:57,822][01455] Num frames 10900... +[2023-09-05 00:21:57,979][01455] Avg episode rewards: #0: 26.856, true rewards: #0: 10.956 +[2023-09-05 00:21:57,981][01455] Avg episode reward: 26.856, avg true_objective: 10.956 +[2023-09-05 00:23:09,303][01455] Replay video saved to /content/train_dir/default_experiment/replay.mp4!