diff --git "a/sf_log.txt" "b/sf_log.txt" new file mode 100644--- /dev/null +++ "b/sf_log.txt" @@ -0,0 +1,1970 @@ +[2023-07-23 05:41:22,746][00397] Saving configuration to /content/train_dir/default_experiment/config.json... +[2023-07-23 05:41:22,754][00397] Rollout worker 0 uses device cpu +[2023-07-23 05:41:22,758][00397] Rollout worker 1 uses device cpu +[2023-07-23 05:41:22,761][00397] Rollout worker 2 uses device cpu +[2023-07-23 05:41:22,765][00397] Rollout worker 3 uses device cpu +[2023-07-23 05:41:22,769][00397] Rollout worker 4 uses device cpu +[2023-07-23 05:41:22,772][00397] Rollout worker 5 uses device cpu +[2023-07-23 05:41:22,774][00397] Rollout worker 6 uses device cpu +[2023-07-23 05:41:22,777][00397] Rollout worker 7 uses device cpu +[2023-07-23 05:41:23,278][00397] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-23 05:41:23,283][00397] InferenceWorker_p0-w0: min num requests: 2 +[2023-07-23 05:41:23,368][00397] Starting all processes... +[2023-07-23 05:41:23,370][00397] Starting process learner_proc0 +[2023-07-23 05:41:23,531][00397] Starting all processes... +[2023-07-23 05:41:23,570][00397] Starting process inference_proc0-0 +[2023-07-23 05:41:23,571][00397] Starting process rollout_proc0 +[2023-07-23 05:41:23,571][00397] Starting process rollout_proc1 +[2023-07-23 05:41:23,581][00397] Starting process rollout_proc3 +[2023-07-23 05:41:23,581][00397] Starting process rollout_proc4 +[2023-07-23 05:41:23,581][00397] Starting process rollout_proc5 +[2023-07-23 05:41:23,581][00397] Starting process rollout_proc6 +[2023-07-23 05:41:23,582][00397] Starting process rollout_proc7 +[2023-07-23 05:41:23,581][00397] Starting process rollout_proc2 +[2023-07-23 05:41:41,831][07592] Worker 6 uses CPU cores [0] +[2023-07-23 05:41:42,032][07585] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-23 05:41:42,035][07571] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-23 05:41:42,036][07585] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2023-07-23 05:41:42,037][07571] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2023-07-23 05:41:42,065][07588] Worker 4 uses CPU cores [0] +[2023-07-23 05:41:42,085][07571] Num visible devices: 1 +[2023-07-23 05:41:42,125][07585] Num visible devices: 1 +[2023-07-23 05:41:42,136][07571] Starting seed is not provided +[2023-07-23 05:41:42,137][07571] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-23 05:41:42,137][07571] Initializing actor-critic model on device cuda:0 +[2023-07-23 05:41:42,138][07571] RunningMeanStd input shape: (3, 72, 128) +[2023-07-23 05:41:42,142][07571] RunningMeanStd input shape: (1,) +[2023-07-23 05:41:42,172][07589] Worker 5 uses CPU cores [1] +[2023-07-23 05:41:42,225][07571] ConvEncoder: input_channels=3 +[2023-07-23 05:41:42,267][07591] Worker 2 uses CPU cores [0] +[2023-07-23 05:41:42,303][07586] Worker 1 uses CPU cores [1] +[2023-07-23 05:41:42,319][07584] Worker 0 uses CPU cores [0] +[2023-07-23 05:41:42,371][07590] Worker 7 uses CPU cores [1] +[2023-07-23 05:41:42,383][07587] Worker 3 uses CPU cores [1] +[2023-07-23 05:41:42,613][07571] Conv encoder output size: 512 +[2023-07-23 05:41:42,614][07571] Policy head output size: 512 +[2023-07-23 05:41:42,661][07571] Created Actor Critic model with architecture: +[2023-07-23 05:41:42,661][07571] ActorCriticSharedWeights( + (obs_normalizer): ObservationNormalizer( + (running_mean_std): RunningMeanStdDictInPlace( + (running_mean_std): ModuleDict( + (obs): RunningMeanStdInPlace() + ) + ) + ) + (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace) + (encoder): VizdoomEncoder( + (basic_encoder): ConvEncoder( + (enc): RecursiveScriptModule( + original_name=ConvEncoderImpl + (conv_head): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Conv2d) + (1): RecursiveScriptModule(original_name=ELU) + (2): RecursiveScriptModule(original_name=Conv2d) + (3): RecursiveScriptModule(original_name=ELU) + (4): RecursiveScriptModule(original_name=Conv2d) + (5): RecursiveScriptModule(original_name=ELU) + ) + (mlp_layers): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Linear) + (1): RecursiveScriptModule(original_name=ELU) + ) + ) + ) + ) + (core): ModelCoreRNN( + (core): GRU(512, 512) + ) + (decoder): MlpDecoder( + (mlp): Identity() + ) + (critic_linear): Linear(in_features=512, out_features=1, bias=True) + (action_parameterization): ActionParameterizationDefault( + (distribution_linear): Linear(in_features=512, out_features=5, bias=True) + ) +) +[2023-07-23 05:41:43,269][00397] Heartbeat connected on Batcher_0 +[2023-07-23 05:41:43,278][00397] Heartbeat connected on InferenceWorker_p0-w0 +[2023-07-23 05:41:43,294][00397] Heartbeat connected on RolloutWorker_w0 +[2023-07-23 05:41:43,310][00397] Heartbeat connected on RolloutWorker_w1 +[2023-07-23 05:41:43,330][00397] Heartbeat connected on RolloutWorker_w2 +[2023-07-23 05:41:43,346][00397] Heartbeat connected on RolloutWorker_w3 +[2023-07-23 05:41:43,358][00397] Heartbeat connected on RolloutWorker_w5 +[2023-07-23 05:41:43,363][00397] Heartbeat connected on RolloutWorker_w4 +[2023-07-23 05:41:43,368][00397] Heartbeat connected on RolloutWorker_w6 +[2023-07-23 05:41:43,370][00397] Heartbeat connected on RolloutWorker_w7 +[2023-07-23 05:41:51,437][07571] Using optimizer +[2023-07-23 05:41:51,439][07571] No checkpoints found +[2023-07-23 05:41:51,439][07571] Did not load from checkpoint, starting from scratch! +[2023-07-23 05:41:51,439][07571] Initialized policy 0 weights for model version 0 +[2023-07-23 05:41:51,442][07571] LearnerWorker_p0 finished initialization! +[2023-07-23 05:41:51,443][00397] Heartbeat connected on LearnerWorker_p0 +[2023-07-23 05:41:51,450][07571] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2023-07-23 05:41:51,655][07585] RunningMeanStd input shape: (3, 72, 128) +[2023-07-23 05:41:51,656][07585] RunningMeanStd input shape: (1,) +[2023-07-23 05:41:51,668][07585] ConvEncoder: input_channels=3 +[2023-07-23 05:41:51,775][07585] Conv encoder output size: 512 +[2023-07-23 05:41:51,775][07585] Policy head output size: 512 +[2023-07-23 05:41:51,892][00397] Inference worker 0-0 is ready! +[2023-07-23 05:41:51,894][00397] All inference workers are ready! Signal rollout workers to start! +[2023-07-23 05:41:52,125][07590] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:52,133][07587] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:52,151][07586] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:52,176][07588] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:52,183][07592] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:52,184][07584] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:52,186][07591] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:52,190][07589] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 05:41:54,486][07592] Decorrelating experience for 0 frames... +[2023-07-23 05:41:54,486][07586] Decorrelating experience for 0 frames... +[2023-07-23 05:41:54,486][07584] Decorrelating experience for 0 frames... +[2023-07-23 05:41:54,487][07589] Decorrelating experience for 0 frames... +[2023-07-23 05:41:54,759][00397] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:41:55,327][07591] Decorrelating experience for 0 frames... +[2023-07-23 05:41:55,338][07584] Decorrelating experience for 32 frames... +[2023-07-23 05:41:55,832][07586] Decorrelating experience for 32 frames... +[2023-07-23 05:41:55,842][07589] Decorrelating experience for 32 frames... +[2023-07-23 05:41:55,875][07590] Decorrelating experience for 0 frames... +[2023-07-23 05:41:56,494][07588] Decorrelating experience for 0 frames... +[2023-07-23 05:41:56,542][07584] Decorrelating experience for 64 frames... +[2023-07-23 05:41:56,543][07591] Decorrelating experience for 32 frames... +[2023-07-23 05:41:56,857][07590] Decorrelating experience for 32 frames... +[2023-07-23 05:41:56,887][07586] Decorrelating experience for 64 frames... +[2023-07-23 05:41:57,359][07588] Decorrelating experience for 32 frames... +[2023-07-23 05:41:57,446][07591] Decorrelating experience for 64 frames... +[2023-07-23 05:41:58,081][07587] Decorrelating experience for 0 frames... +[2023-07-23 05:41:58,159][07589] Decorrelating experience for 64 frames... +[2023-07-23 05:41:58,338][07584] Decorrelating experience for 96 frames... +[2023-07-23 05:41:58,344][07588] Decorrelating experience for 64 frames... +[2023-07-23 05:41:58,397][07590] Decorrelating experience for 64 frames... +[2023-07-23 05:41:58,465][07586] Decorrelating experience for 96 frames... +[2023-07-23 05:41:59,315][07590] Decorrelating experience for 96 frames... +[2023-07-23 05:41:59,438][07586] Decorrelating experience for 128 frames... +[2023-07-23 05:41:59,659][07592] Decorrelating experience for 32 frames... +[2023-07-23 05:41:59,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:00,021][07591] Decorrelating experience for 96 frames... +[2023-07-23 05:42:00,089][07588] Decorrelating experience for 96 frames... +[2023-07-23 05:42:00,157][07584] Decorrelating experience for 128 frames... +[2023-07-23 05:42:00,206][07586] Decorrelating experience for 160 frames... +[2023-07-23 05:42:01,097][07587] Decorrelating experience for 32 frames... +[2023-07-23 05:42:01,281][07590] Decorrelating experience for 128 frames... +[2023-07-23 05:42:01,527][07591] Decorrelating experience for 128 frames... +[2023-07-23 05:42:01,576][07588] Decorrelating experience for 128 frames... +[2023-07-23 05:42:01,906][07584] Decorrelating experience for 160 frames... +[2023-07-23 05:42:02,699][07586] Decorrelating experience for 192 frames... +[2023-07-23 05:42:02,866][07588] Decorrelating experience for 160 frames... +[2023-07-23 05:42:03,209][07587] Decorrelating experience for 64 frames... +[2023-07-23 05:42:03,452][07590] Decorrelating experience for 160 frames... +[2023-07-23 05:42:04,399][07584] Decorrelating experience for 192 frames... +[2023-07-23 05:42:04,624][07589] Decorrelating experience for 96 frames... +[2023-07-23 05:42:04,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:05,146][07586] Decorrelating experience for 224 frames... +[2023-07-23 05:42:05,361][07587] Decorrelating experience for 96 frames... +[2023-07-23 05:42:05,778][07590] Decorrelating experience for 192 frames... +[2023-07-23 05:42:06,757][07591] Decorrelating experience for 160 frames... +[2023-07-23 05:42:06,811][07589] Decorrelating experience for 128 frames... +[2023-07-23 05:42:06,901][07584] Decorrelating experience for 224 frames... +[2023-07-23 05:42:07,343][07587] Decorrelating experience for 128 frames... +[2023-07-23 05:42:08,394][07589] Decorrelating experience for 160 frames... +[2023-07-23 05:42:08,974][07590] Decorrelating experience for 224 frames... +[2023-07-23 05:42:09,202][07592] Decorrelating experience for 64 frames... +[2023-07-23 05:42:09,229][07587] Decorrelating experience for 160 frames... +[2023-07-23 05:42:09,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:10,080][07586] Decorrelating experience for 256 frames... +[2023-07-23 05:42:10,569][07588] Decorrelating experience for 192 frames... +[2023-07-23 05:42:10,625][07592] Decorrelating experience for 96 frames... +[2023-07-23 05:42:11,955][07589] Decorrelating experience for 192 frames... +[2023-07-23 05:42:12,383][07587] Decorrelating experience for 192 frames... +[2023-07-23 05:42:12,737][07592] Decorrelating experience for 128 frames... +[2023-07-23 05:42:12,893][07588] Decorrelating experience for 224 frames... +[2023-07-23 05:42:12,993][07586] Decorrelating experience for 288 frames... +[2023-07-23 05:42:13,133][07590] Decorrelating experience for 256 frames... +[2023-07-23 05:42:13,877][07584] Decorrelating experience for 256 frames... +[2023-07-23 05:42:14,223][07591] Decorrelating experience for 192 frames... +[2023-07-23 05:42:14,333][07589] Decorrelating experience for 224 frames... +[2023-07-23 05:42:14,600][07587] Decorrelating experience for 224 frames... +[2023-07-23 05:42:14,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:14,915][07592] Decorrelating experience for 160 frames... +[2023-07-23 05:42:15,621][07590] Decorrelating experience for 288 frames... +[2023-07-23 05:42:16,006][07584] Decorrelating experience for 288 frames... +[2023-07-23 05:42:16,593][07588] Decorrelating experience for 256 frames... +[2023-07-23 05:42:16,839][07592] Decorrelating experience for 192 frames... +[2023-07-23 05:42:17,209][07589] Decorrelating experience for 256 frames... +[2023-07-23 05:42:17,382][07590] Decorrelating experience for 320 frames... +[2023-07-23 05:42:17,485][07591] Decorrelating experience for 224 frames... +[2023-07-23 05:42:17,555][07587] Decorrelating experience for 256 frames... +[2023-07-23 05:42:18,692][07584] Decorrelating experience for 320 frames... +[2023-07-23 05:42:19,192][07588] Decorrelating experience for 288 frames... +[2023-07-23 05:42:19,211][07586] Decorrelating experience for 320 frames... +[2023-07-23 05:42:19,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:20,268][07590] Decorrelating experience for 352 frames... +[2023-07-23 05:42:20,801][07592] Decorrelating experience for 224 frames... +[2023-07-23 05:42:22,009][07584] Decorrelating experience for 352 frames... +[2023-07-23 05:42:22,290][07591] Decorrelating experience for 256 frames... +[2023-07-23 05:42:22,655][07588] Decorrelating experience for 320 frames... +[2023-07-23 05:42:22,656][07589] Decorrelating experience for 288 frames... +[2023-07-23 05:42:24,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:24,781][07586] Decorrelating experience for 352 frames... +[2023-07-23 05:42:25,794][07587] Decorrelating experience for 288 frames... +[2023-07-23 05:42:25,800][07590] Decorrelating experience for 384 frames... +[2023-07-23 05:42:26,608][07591] Decorrelating experience for 288 frames... +[2023-07-23 05:42:26,721][07584] Decorrelating experience for 384 frames... +[2023-07-23 05:42:27,242][07589] Decorrelating experience for 320 frames... +[2023-07-23 05:42:28,903][07586] Decorrelating experience for 384 frames... +[2023-07-23 05:42:29,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:30,224][07592] Decorrelating experience for 256 frames... +[2023-07-23 05:42:30,658][07588] Decorrelating experience for 352 frames... +[2023-07-23 05:42:30,877][07589] Decorrelating experience for 352 frames... +[2023-07-23 05:42:31,437][07591] Decorrelating experience for 320 frames... +[2023-07-23 05:42:31,891][07590] Decorrelating experience for 416 frames... +[2023-07-23 05:42:32,171][07584] Decorrelating experience for 416 frames... +[2023-07-23 05:42:34,361][07586] Decorrelating experience for 416 frames... +[2023-07-23 05:42:34,690][07592] Decorrelating experience for 288 frames... +[2023-07-23 05:42:34,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:35,432][07588] Decorrelating experience for 384 frames... +[2023-07-23 05:42:35,481][07587] Decorrelating experience for 320 frames... +[2023-07-23 05:42:36,529][07591] Decorrelating experience for 352 frames... +[2023-07-23 05:42:39,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:39,805][07590] Decorrelating experience for 448 frames... +[2023-07-23 05:42:40,936][07589] Decorrelating experience for 384 frames... +[2023-07-23 05:42:41,665][07584] Decorrelating experience for 448 frames... +[2023-07-23 05:42:42,035][07592] Decorrelating experience for 320 frames... +[2023-07-23 05:42:42,858][07587] Decorrelating experience for 352 frames... +[2023-07-23 05:42:43,017][07588] Decorrelating experience for 416 frames... +[2023-07-23 05:42:44,361][07586] Decorrelating experience for 448 frames... +[2023-07-23 05:42:44,382][07590] Decorrelating experience for 480 frames... +[2023-07-23 05:42:44,708][07589] Decorrelating experience for 416 frames... +[2023-07-23 05:42:44,760][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:45,244][07584] Decorrelating experience for 480 frames... +[2023-07-23 05:42:46,009][07588] Decorrelating experience for 448 frames... +[2023-07-23 05:42:46,240][07591] Decorrelating experience for 384 frames... +[2023-07-23 05:42:47,441][07587] Decorrelating experience for 384 frames... +[2023-07-23 05:42:47,927][07592] Decorrelating experience for 352 frames... +[2023-07-23 05:42:48,702][07586] Decorrelating experience for 480 frames... +[2023-07-23 05:42:48,888][07589] Decorrelating experience for 448 frames... +[2023-07-23 05:42:49,077][07591] Decorrelating experience for 416 frames... +[2023-07-23 05:42:49,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 11.7. Samples: 528. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:49,761][00397] Avg episode reward: [(0, '0.747')] +[2023-07-23 05:42:50,104][07588] Decorrelating experience for 480 frames... +[2023-07-23 05:42:51,384][07592] Decorrelating experience for 384 frames... +[2023-07-23 05:42:51,672][07587] Decorrelating experience for 416 frames... +[2023-07-23 05:42:53,749][07571] Signal inference workers to stop experience collection... +[2023-07-23 05:42:53,772][07585] InferenceWorker_p0-w0: stopping experience collection +[2023-07-23 05:42:53,783][07589] Decorrelating experience for 480 frames... +[2023-07-23 05:42:53,977][07591] Decorrelating experience for 448 frames... +[2023-07-23 05:42:54,759][00397] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 40.9. Samples: 1840. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2023-07-23 05:42:54,762][00397] Avg episode reward: [(0, '1.628')] +[2023-07-23 05:42:54,882][07587] Decorrelating experience for 448 frames... +[2023-07-23 05:42:55,515][07592] Decorrelating experience for 416 frames... +[2023-07-23 05:42:56,616][07587] Decorrelating experience for 480 frames... +[2023-07-23 05:42:57,489][07591] Decorrelating experience for 480 frames... +[2023-07-23 05:42:58,988][07571] Signal inference workers to resume experience collection... +[2023-07-23 05:42:58,989][07585] InferenceWorker_p0-w0: resuming experience collection +[2023-07-23 05:42:59,703][07592] Decorrelating experience for 448 frames... +[2023-07-23 05:42:59,759][00397] Fps is (10 sec: 409.6, 60 sec: 68.3, 300 sec: 63.0). Total num frames: 4096. Throughput: 0: 63.3. Samples: 2848. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0) +[2023-07-23 05:42:59,769][00397] Avg episode reward: [(0, '1.628')] +[2023-07-23 05:43:04,763][00397] Fps is (10 sec: 1228.3, 60 sec: 204.8, 300 sec: 175.5). Total num frames: 12288. Throughput: 0: 112.3. Samples: 5056. Policy #0 lag: (min: 1.0, avg: 1.0, max: 1.0) +[2023-07-23 05:43:04,765][00397] Avg episode reward: [(0, '1.594')] +[2023-07-23 05:43:07,215][07592] Decorrelating experience for 480 frames... +[2023-07-23 05:43:09,759][00397] Fps is (10 sec: 2457.6, 60 sec: 477.9, 300 sec: 382.3). Total num frames: 28672. Throughput: 0: 159.1. Samples: 7160. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:43:09,764][00397] Avg episode reward: [(0, '2.075')] +[2023-07-23 05:43:13,562][07585] Updated weights for policy 0, policy_version 10 (0.0015) +[2023-07-23 05:43:14,759][00397] Fps is (10 sec: 2868.4, 60 sec: 682.7, 300 sec: 512.0). Total num frames: 40960. Throughput: 0: 257.2. Samples: 11576. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:43:14,761][00397] Avg episode reward: [(0, '2.419')] +[2023-07-23 05:43:14,772][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000010_40960.pth... +[2023-07-23 05:43:19,760][00397] Fps is (10 sec: 2866.8, 60 sec: 955.7, 300 sec: 674.6). Total num frames: 57344. Throughput: 0: 370.7. Samples: 16680. Policy #0 lag: (min: 0.0, avg: 2.6, max: 6.0) +[2023-07-23 05:43:19,762][00397] Avg episode reward: [(0, '3.542')] +[2023-07-23 05:43:24,524][07585] Updated weights for policy 0, policy_version 20 (0.0015) +[2023-07-23 05:43:24,759][00397] Fps is (10 sec: 4505.7, 60 sec: 1433.6, 300 sec: 955.7). Total num frames: 86016. Throughput: 0: 429.9. Samples: 19344. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:43:24,761][00397] Avg episode reward: [(0, '4.302')] +[2023-07-23 05:43:29,761][00397] Fps is (10 sec: 4914.7, 60 sec: 1774.9, 300 sec: 1121.0). Total num frames: 106496. Throughput: 0: 596.1. Samples: 26824. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 05:43:29,764][00397] Avg episode reward: [(0, '4.283')] +[2023-07-23 05:43:29,769][07571] Saving new best policy, reward=4.283! +[2023-07-23 05:43:33,460][07585] Updated weights for policy 0, policy_version 30 (0.0012) +[2023-07-23 05:43:34,761][00397] Fps is (10 sec: 3685.5, 60 sec: 2047.9, 300 sec: 1228.8). Total num frames: 122880. Throughput: 0: 708.4. Samples: 32408. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:43:34,764][00397] Avg episode reward: [(0, '4.319')] +[2023-07-23 05:43:34,775][07571] Saving new best policy, reward=4.319! +[2023-07-23 05:43:39,759][00397] Fps is (10 sec: 3277.6, 60 sec: 2321.1, 300 sec: 1326.3). Total num frames: 139264. Throughput: 0: 735.5. Samples: 34936. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 05:43:39,761][00397] Avg episode reward: [(0, '4.342')] +[2023-07-23 05:43:39,772][07571] Saving new best policy, reward=4.342! +[2023-07-23 05:43:44,759][00397] Fps is (10 sec: 3277.6, 60 sec: 2594.2, 300 sec: 1415.0). Total num frames: 155648. Throughput: 0: 823.3. Samples: 39896. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:43:44,765][00397] Avg episode reward: [(0, '4.520')] +[2023-07-23 05:43:44,777][07571] Saving new best policy, reward=4.520! +[2023-07-23 05:43:47,958][07585] Updated weights for policy 0, policy_version 40 (0.0013) +[2023-07-23 05:43:49,759][00397] Fps is (10 sec: 3276.8, 60 sec: 2867.2, 300 sec: 1495.9). Total num frames: 172032. Throughput: 0: 871.7. Samples: 44280. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:43:49,765][00397] Avg episode reward: [(0, '4.441')] +[2023-07-23 05:43:54,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 1570.1). Total num frames: 188416. Throughput: 0: 883.0. Samples: 46896. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:43:54,766][00397] Avg episode reward: [(0, '4.365')] +[2023-07-23 05:43:57,205][07585] Updated weights for policy 0, policy_version 50 (0.0013) +[2023-07-23 05:43:59,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3549.9, 300 sec: 1736.7). Total num frames: 217088. Throughput: 0: 943.5. Samples: 54032. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:43:59,766][00397] Avg episode reward: [(0, '4.363')] +[2023-07-23 05:44:04,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3686.7, 300 sec: 1795.9). Total num frames: 233472. Throughput: 0: 977.1. Samples: 60648. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:44:04,765][00397] Avg episode reward: [(0, '4.436')] +[2023-07-23 05:44:07,477][07585] Updated weights for policy 0, policy_version 60 (0.0013) +[2023-07-23 05:44:09,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 1881.1). Total num frames: 253952. Throughput: 0: 975.3. Samples: 63232. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 05:44:09,761][00397] Avg episode reward: [(0, '4.401')] +[2023-07-23 05:44:14,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3754.5, 300 sec: 1901.7). Total num frames: 266240. Throughput: 0: 920.4. Samples: 68240. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:44:14,764][00397] Avg episode reward: [(0, '4.289')] +[2023-07-23 05:44:19,391][07585] Updated weights for policy 0, policy_version 70 (0.0013) +[2023-07-23 05:44:19,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3823.0, 300 sec: 1977.4). Total num frames: 286720. Throughput: 0: 907.1. Samples: 73224. Policy #0 lag: (min: 0.0, avg: 2.3, max: 6.0) +[2023-07-23 05:44:19,761][00397] Avg episode reward: [(0, '4.465')] +[2023-07-23 05:44:24,759][00397] Fps is (10 sec: 3277.6, 60 sec: 3549.9, 300 sec: 1993.4). Total num frames: 299008. Throughput: 0: 905.4. Samples: 75680. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:44:24,761][00397] Avg episode reward: [(0, '4.615')] +[2023-07-23 05:44:24,770][07571] Saving new best policy, reward=4.615! +[2023-07-23 05:44:29,362][07585] Updated weights for policy 0, policy_version 80 (0.0012) +[2023-07-23 05:44:29,761][00397] Fps is (10 sec: 4095.2, 60 sec: 3686.4, 300 sec: 2114.0). Total num frames: 327680. Throughput: 0: 933.5. Samples: 81904. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 05:44:29,763][00397] Avg episode reward: [(0, '4.594')] +[2023-07-23 05:44:34,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3754.8, 300 sec: 2176.0). Total num frames: 348160. Throughput: 0: 998.0. Samples: 89192. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:44:34,761][00397] Avg episode reward: [(0, '4.415')] +[2023-07-23 05:44:39,759][00397] Fps is (10 sec: 3687.1, 60 sec: 3754.7, 300 sec: 2209.4). Total num frames: 364544. Throughput: 0: 1003.4. Samples: 92048. Policy #0 lag: (min: 0.0, avg: 2.6, max: 5.0) +[2023-07-23 05:44:39,764][00397] Avg episode reward: [(0, '4.506')] +[2023-07-23 05:44:41,139][07585] Updated weights for policy 0, policy_version 90 (0.0013) +[2023-07-23 05:44:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 2240.8). Total num frames: 380928. Throughput: 0: 931.7. Samples: 95960. Policy #0 lag: (min: 0.0, avg: 2.6, max: 4.0) +[2023-07-23 05:44:44,761][00397] Avg episode reward: [(0, '4.519')] +[2023-07-23 05:44:49,760][00397] Fps is (10 sec: 2866.9, 60 sec: 3686.3, 300 sec: 2246.9). Total num frames: 393216. Throughput: 0: 872.2. Samples: 99896. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:44:49,762][00397] Avg episode reward: [(0, '4.583')] +[2023-07-23 05:45:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 2418.6). Total num frames: 507904. Throughput: 0: 789.0. Samples: 127552. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 05:45:24,764][00397] Avg episode reward: [(0, '4.757')] +[2023-07-23 05:45:29,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3276.9, 300 sec: 2438.5). Total num frames: 524288. Throughput: 0: 813.5. Samples: 132568. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:45:29,761][00397] Avg episode reward: [(0, '4.887')] +[2023-07-23 05:45:29,763][07571] Saving new best policy, reward=4.887! +[2023-07-23 05:45:31,229][07585] Updated weights for policy 0, policy_version 130 (0.0022) +[2023-07-23 05:45:34,759][00397] Fps is (10 sec: 3276.6, 60 sec: 3208.5, 300 sec: 2457.6). Total num frames: 540672. Throughput: 0: 835.6. Samples: 137496. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:45:34,763][00397] Avg episode reward: [(0, '4.959')] +[2023-07-23 05:45:34,774][07571] Saving new best policy, reward=4.959! +[2023-07-23 05:45:39,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 2494.0). Total num frames: 561152. Throughput: 0: 846.4. Samples: 139904. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 05:45:39,762][00397] Avg episode reward: [(0, '4.978')] +[2023-07-23 05:45:39,770][07571] Saving new best policy, reward=4.978! +[2023-07-23 05:45:42,104][07585] Updated weights for policy 0, policy_version 140 (0.0014) +[2023-07-23 05:45:44,759][00397] Fps is (10 sec: 4505.9, 60 sec: 3413.3, 300 sec: 2546.6). Total num frames: 585728. Throughput: 0: 917.7. Samples: 146912. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:45:44,762][00397] Avg episode reward: [(0, '4.762')] +[2023-07-23 05:45:49,760][00397] Fps is (10 sec: 4505.1, 60 sec: 3549.9, 300 sec: 2579.6). Total num frames: 606208. Throughput: 0: 991.4. Samples: 154016. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:45:49,764][00397] Avg episode reward: [(0, '4.633')] +[2023-07-23 05:45:50,617][07585] Updated weights for policy 0, policy_version 150 (0.0012) +[2023-07-23 05:45:54,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 2594.1). Total num frames: 622592. Throughput: 0: 995.9. Samples: 156472. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 05:45:54,764][00397] Avg episode reward: [(0, '4.605')] +[2023-07-23 05:45:59,759][00397] Fps is (10 sec: 3686.8, 60 sec: 3822.9, 300 sec: 2624.8). Total num frames: 643072. Throughput: 0: 943.5. Samples: 161368. Policy #0 lag: (min: 0.0, avg: 2.0, max: 6.0) +[2023-07-23 05:45:59,765][00397] Avg episode reward: [(0, '4.750')] +[2023-07-23 05:46:03,264][07585] Updated weights for policy 0, policy_version 160 (0.0012) +[2023-07-23 05:46:04,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 2637.8). Total num frames: 659456. Throughput: 0: 917.0. Samples: 166272. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 05:46:04,761][00397] Avg episode reward: [(0, '4.786')] +[2023-07-23 05:46:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 2650.4). Total num frames: 675840. Throughput: 0: 916.3. Samples: 168784. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:46:09,762][00397] Avg episode reward: [(0, '4.816')] +[2023-07-23 05:46:13,681][07585] Updated weights for policy 0, policy_version 170 (0.0023) +[2023-07-23 05:46:14,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 2678.2). Total num frames: 696320. Throughput: 0: 938.5. Samples: 174800. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:46:14,765][00397] Avg episode reward: [(0, '4.989')] +[2023-07-23 05:46:14,872][07571] Saving new best policy, reward=4.989! +[2023-07-23 05:46:19,759][00397] Fps is (10 sec: 4915.3, 60 sec: 3891.2, 300 sec: 2735.8). Total num frames: 724992. Throughput: 0: 993.6. Samples: 182208. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 05:46:19,761][00397] Avg episode reward: [(0, '4.840')] +[2023-07-23 05:46:22,671][07585] Updated weights for policy 0, policy_version 180 (0.0012) +[2023-07-23 05:46:24,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 2745.8). Total num frames: 741376. Throughput: 0: 1011.7. Samples: 185432. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:46:24,761][00397] Avg episode reward: [(0, '4.727')] +[2023-07-23 05:46:29,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 2755.5). Total num frames: 757760. Throughput: 0: 969.4. Samples: 190536. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:46:29,766][00397] Avg episode reward: [(0, '4.795')] +[2023-07-23 05:46:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 2764.8). Total num frames: 774144. Throughput: 0: 921.6. Samples: 195488. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 05:46:34,761][00397] Avg episode reward: [(0, '4.893')] +[2023-07-23 05:46:35,170][07585] Updated weights for policy 0, policy_version 190 (0.0013) +[2023-07-23 05:46:39,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 2773.8). Total num frames: 790528. Throughput: 0: 921.4. Samples: 197936. Policy #0 lag: (min: 0.0, avg: 1.9, max: 6.0) +[2023-07-23 05:46:39,763][00397] Avg episode reward: [(0, '5.056')] +[2023-07-23 05:46:39,816][07571] Saving new best policy, reward=5.056! +[2023-07-23 05:46:44,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 2796.6). Total num frames: 811008. Throughput: 0: 927.1. Samples: 203088. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 05:46:44,761][00397] Avg episode reward: [(0, '5.079')] +[2023-07-23 05:46:44,770][07571] Saving new best policy, reward=5.079! +[2023-07-23 05:46:45,623][07585] Updated weights for policy 0, policy_version 200 (0.0022) +[2023-07-23 05:46:49,759][00397] Fps is (10 sec: 4505.8, 60 sec: 3823.0, 300 sec: 2832.5). Total num frames: 835584. Throughput: 0: 980.1. Samples: 210376. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:46:49,761][00397] Avg episode reward: [(0, '5.365')] +[2023-07-23 05:46:49,763][07571] Saving new best policy, reward=5.365! +[2023-07-23 05:46:54,612][07585] Updated weights for policy 0, policy_version 210 (0.0012) +[2023-07-23 05:46:54,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3959.5, 300 sec: 2915.8). Total num frames: 860160. Throughput: 0: 1007.3. Samples: 214112. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:46:54,763][00397] Avg episode reward: [(0, '5.582')] +[2023-07-23 05:46:54,779][07571] Saving new best policy, reward=5.582! +[2023-07-23 05:46:59,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 2957.5). Total num frames: 872448. Throughput: 0: 992.0. Samples: 219440. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:46:59,761][00397] Avg episode reward: [(0, '5.331')] +[2023-07-23 05:47:04,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3754.7, 300 sec: 2999.1). Total num frames: 884736. Throughput: 0: 912.0. Samples: 223248. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:47:04,761][00397] Avg episode reward: [(0, '5.381')] +[2023-07-23 05:47:09,604][07585] Updated weights for policy 0, policy_version 220 (0.0013) +[2023-07-23 05:47:09,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3754.7, 300 sec: 3054.6). Total num frames: 901120. Throughput: 0: 883.0. Samples: 225168. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:47:09,765][00397] Avg episode reward: [(0, '5.274')] +[2023-07-23 05:47:14,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3549.9, 300 sec: 3082.4). Total num frames: 909312. Throughput: 0: 856.2. Samples: 229064. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 05:47:14,764][00397] Avg episode reward: [(0, '5.391')] +[2023-07-23 05:47:14,774][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000222_909312.pth... +[2023-07-23 05:47:14,939][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000010_40960.pth +[2023-07-23 05:47:19,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3345.1, 300 sec: 3138.0). Total num frames: 925696. Throughput: 0: 832.2. Samples: 232936. Policy #0 lag: (min: 0.0, avg: 2.1, max: 6.0) +[2023-07-23 05:47:19,761][00397] Avg episode reward: [(0, '5.246')] +[2023-07-23 05:47:24,759][00397] Fps is (10 sec: 2867.1, 60 sec: 3276.8, 300 sec: 3179.6). Total num frames: 937984. Throughput: 0: 821.2. Samples: 234888. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 05:47:24,769][00397] Avg episode reward: [(0, '5.367')] +[2023-07-23 05:47:25,107][07585] Updated weights for policy 0, policy_version 230 (0.0021) +[2023-07-23 05:47:29,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3345.0, 300 sec: 3249.0). Total num frames: 958464. Throughput: 0: 819.2. Samples: 239952. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:47:29,762][00397] Avg episode reward: [(0, '5.669')] +[2023-07-23 05:47:29,770][07571] Saving new best policy, reward=5.669! +[2023-07-23 05:47:34,186][07585] Updated weights for policy 0, policy_version 240 (0.0012) +[2023-07-23 05:47:34,767][00397] Fps is (10 sec: 4502.0, 60 sec: 3481.1, 300 sec: 3332.2). Total num frames: 983040. Throughput: 0: 814.4. Samples: 247032. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:47:34,771][00397] Avg episode reward: [(0, '5.947')] +[2023-07-23 05:47:34,790][07571] Saving new best policy, reward=5.947! +[2023-07-23 05:47:39,764][00397] Fps is (10 sec: 4093.9, 60 sec: 3481.3, 300 sec: 3387.8). Total num frames: 999424. Throughput: 0: 784.8. Samples: 249432. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:47:39,766][00397] Avg episode reward: [(0, '5.888')] +[2023-07-23 05:47:44,759][00397] Fps is (10 sec: 3279.5, 60 sec: 3413.3, 300 sec: 3443.4). Total num frames: 1015808. Throughput: 0: 779.6. Samples: 254520. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:47:44,764][00397] Avg episode reward: [(0, '6.020')] +[2023-07-23 05:47:44,774][07571] Saving new best policy, reward=6.020! +[2023-07-23 05:47:47,705][07585] Updated weights for policy 0, policy_version 250 (0.0012) +[2023-07-23 05:47:49,759][00397] Fps is (10 sec: 3278.6, 60 sec: 3276.8, 300 sec: 3499.0). Total num frames: 1032192. Throughput: 0: 804.6. Samples: 259456. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:47:49,761][00397] Avg episode reward: [(0, '6.018')] +[2023-07-23 05:47:54,759][00397] Fps is (10 sec: 2867.1, 60 sec: 3072.0, 300 sec: 3526.7). Total num frames: 1044480. Throughput: 0: 817.2. Samples: 261944. Policy #0 lag: (min: 0.0, avg: 1.0, max: 3.0) +[2023-07-23 05:47:54,762][00397] Avg episode reward: [(0, '6.380')] +[2023-07-23 05:47:54,774][07571] Saving new best policy, reward=6.380! +[2023-07-23 05:47:58,621][07585] Updated weights for policy 0, policy_version 260 (0.0017) +[2023-07-23 05:47:59,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3345.1, 300 sec: 3596.2). Total num frames: 1073152. Throughput: 0: 862.8. Samples: 267888. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:47:59,767][00397] Avg episode reward: [(0, '6.843')] +[2023-07-23 05:47:59,775][07571] Saving new best policy, reward=6.843! +[2023-07-23 05:48:04,759][00397] Fps is (10 sec: 4915.4, 60 sec: 3481.6, 300 sec: 3610.0). Total num frames: 1093632. Throughput: 0: 933.7. Samples: 274952. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:48:04,764][00397] Avg episode reward: [(0, '6.991')] +[2023-07-23 05:48:04,776][07571] Saving new best policy, reward=6.991! +[2023-07-23 05:48:07,450][07585] Updated weights for policy 0, policy_version 270 (0.0013) +[2023-07-23 05:48:09,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3623.9). Total num frames: 1110016. Throughput: 0: 954.7. Samples: 277848. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 05:48:09,764][00397] Avg episode reward: [(0, '7.060')] +[2023-07-23 05:48:09,767][07571] Saving new best policy, reward=7.060! +[2023-07-23 05:48:14,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 1126400. Throughput: 0: 953.3. Samples: 282848. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:48:14,763][00397] Avg episode reward: [(0, '7.217')] +[2023-07-23 05:48:14,777][07571] Saving new best policy, reward=7.217! +[2023-07-23 05:48:19,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1142784. Throughput: 0: 905.4. Samples: 287768. Policy #0 lag: (min: 0.0, avg: 2.5, max: 5.0) +[2023-07-23 05:48:19,762][00397] Avg episode reward: [(0, '7.760')] +[2023-07-23 05:48:19,764][07571] Saving new best policy, reward=7.760! +[2023-07-23 05:48:20,698][07585] Updated weights for policy 0, policy_version 280 (0.0020) +[2023-07-23 05:48:24,762][00397] Fps is (10 sec: 3275.7, 60 sec: 3686.2, 300 sec: 3568.4). Total num frames: 1159168. Throughput: 0: 907.4. Samples: 290264. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 05:48:24,765][00397] Avg episode reward: [(0, '8.105')] +[2023-07-23 05:48:24,775][07571] Saving new best policy, reward=8.105! +[2023-07-23 05:48:29,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3596.2). Total num frames: 1183744. Throughput: 0: 910.0. Samples: 295472. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 05:48:29,761][00397] Avg episode reward: [(0, '8.152')] +[2023-07-23 05:48:29,768][07571] Saving new best policy, reward=8.152! +[2023-07-23 05:48:30,594][07585] Updated weights for policy 0, policy_version 290 (0.0013) +[2023-07-23 05:48:34,759][00397] Fps is (10 sec: 4916.8, 60 sec: 3755.2, 300 sec: 3623.9). Total num frames: 1208320. Throughput: 0: 962.5. Samples: 302768. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 05:48:34,761][00397] Avg episode reward: [(0, '8.399')] +[2023-07-23 05:48:34,769][07571] Saving new best policy, reward=8.399! +[2023-07-23 05:48:39,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3755.0, 300 sec: 3623.9). Total num frames: 1224704. Throughput: 0: 987.6. Samples: 306384. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:48:39,764][00397] Avg episode reward: [(0, '8.344')] +[2023-07-23 05:48:39,983][07585] Updated weights for policy 0, policy_version 300 (0.0012) +[2023-07-23 05:48:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3623.9). Total num frames: 1241088. Throughput: 0: 969.1. Samples: 311496. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:48:44,761][00397] Avg episode reward: [(0, '8.371')] +[2023-07-23 05:48:49,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3623.9). Total num frames: 1257472. Throughput: 0: 925.9. Samples: 316616. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 05:48:49,763][00397] Avg episode reward: [(0, '8.538')] +[2023-07-23 05:48:49,765][07571] Saving new best policy, reward=8.538! +[2023-07-23 05:48:51,946][07585] Updated weights for policy 0, policy_version 310 (0.0012) +[2023-07-23 05:48:54,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 1273856. Throughput: 0: 916.3. Samples: 319080. Policy #0 lag: (min: 0.0, avg: 2.6, max: 5.0) +[2023-07-23 05:48:54,761][00397] Avg episode reward: [(0, '8.610')] +[2023-07-23 05:48:54,773][07571] Saving new best policy, reward=8.610! +[2023-07-23 05:48:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1290240. Throughput: 0: 914.8. Samples: 324016. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 05:48:59,763][00397] Avg episode reward: [(0, '8.377')] +[2023-07-23 05:49:03,341][07585] Updated weights for policy 0, policy_version 320 (0.0012) +[2023-07-23 05:49:04,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3610.0). Total num frames: 1318912. Throughput: 0: 953.9. Samples: 330696. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 05:49:04,768][00397] Avg episode reward: [(0, '8.310')] +[2023-07-23 05:49:09,759][00397] Fps is (10 sec: 4915.0, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 1339392. Throughput: 0: 981.8. Samples: 334440. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:49:09,768][00397] Avg episode reward: [(0, '8.815')] +[2023-07-23 05:49:09,774][07571] Saving new best policy, reward=8.815! +[2023-07-23 05:49:11,874][07585] Updated weights for policy 0, policy_version 330 (0.0012) +[2023-07-23 05:49:14,759][00397] Fps is (10 sec: 3686.5, 60 sec: 3822.9, 300 sec: 3623.9). Total num frames: 1355776. Throughput: 0: 1000.5. Samples: 340496. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:49:14,766][00397] Avg episode reward: [(0, '8.994')] +[2023-07-23 05:49:14,775][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000331_1355776.pth... +[2023-07-23 05:49:14,930][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000116_475136.pth +[2023-07-23 05:49:14,941][07571] Saving new best policy, reward=8.994! +[2023-07-23 05:49:19,759][00397] Fps is (10 sec: 3276.9, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 1372160. Throughput: 0: 946.3. Samples: 345352. Policy #0 lag: (min: 0.0, avg: 2.8, max: 5.0) +[2023-07-23 05:49:19,761][00397] Avg episode reward: [(0, '8.828')] +[2023-07-23 05:49:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3823.1, 300 sec: 3596.2). Total num frames: 1388544. Throughput: 0: 909.2. Samples: 347296. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 05:49:24,769][00397] Avg episode reward: [(0, '9.340')] +[2023-07-23 05:49:24,793][07571] Saving new best policy, reward=9.340! +[2023-07-23 05:49:26,559][07585] Updated weights for policy 0, policy_version 340 (0.0020) +[2023-07-23 05:49:29,760][00397] Fps is (10 sec: 2866.8, 60 sec: 3618.0, 300 sec: 3568.4). Total num frames: 1400832. Throughput: 0: 880.7. Samples: 351128. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 05:49:29,763][00397] Avg episode reward: [(0, '9.914')] +[2023-07-23 05:49:29,765][07571] Saving new best policy, reward=9.914! +[2023-07-23 05:49:34,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3413.3, 300 sec: 3554.5). Total num frames: 1413120. Throughput: 0: 853.2. Samples: 355008. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:49:34,764][00397] Avg episode reward: [(0, '9.773')] +[2023-07-23 05:49:39,759][00397] Fps is (10 sec: 2457.9, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 1425408. Throughput: 0: 841.2. Samples: 356936. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 05:49:39,761][00397] Avg episode reward: [(0, '10.414')] +[2023-07-23 05:49:39,772][07571] Saving new best policy, reward=10.414! +[2023-07-23 05:49:40,673][07585] Updated weights for policy 0, policy_version 350 (0.0035) +[2023-07-23 05:49:44,760][00397] Fps is (10 sec: 2866.8, 60 sec: 3345.0, 300 sec: 3554.5). Total num frames: 1441792. Throughput: 0: 840.9. Samples: 361856. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:49:44,763][00397] Avg episode reward: [(0, '10.033')] +[2023-07-23 05:49:49,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3596.1). Total num frames: 1462272. Throughput: 0: 812.3. Samples: 367248. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:49:49,766][00397] Avg episode reward: [(0, '9.999')] +[2023-07-23 05:49:53,427][07585] Updated weights for policy 0, policy_version 360 (0.0018) +[2023-07-23 05:49:54,762][00397] Fps is (10 sec: 3685.7, 60 sec: 3413.2, 300 sec: 3610.0). Total num frames: 1478656. Throughput: 0: 783.9. Samples: 369720. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:49:54,765][00397] Avg episode reward: [(0, '10.012')] +[2023-07-23 05:49:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3610.0). Total num frames: 1495040. Throughput: 0: 762.0. Samples: 374784. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:49:59,765][00397] Avg episode reward: [(0, '10.097')] +[2023-07-23 05:50:04,760][00397] Fps is (10 sec: 3277.4, 60 sec: 3208.5, 300 sec: 3596.1). Total num frames: 1511424. Throughput: 0: 765.5. Samples: 379800. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:50:04,770][00397] Avg episode reward: [(0, '11.413')] +[2023-07-23 05:50:04,811][07571] Saving new best policy, reward=11.413! +[2023-07-23 05:50:04,818][07585] Updated weights for policy 0, policy_version 370 (0.0015) +[2023-07-23 05:50:09,760][00397] Fps is (10 sec: 3276.5, 60 sec: 3140.2, 300 sec: 3568.4). Total num frames: 1527808. Throughput: 0: 774.9. Samples: 382168. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:50:09,764][00397] Avg episode reward: [(0, '11.757')] +[2023-07-23 05:50:09,772][07571] Saving new best policy, reward=11.757! +[2023-07-23 05:50:14,759][00397] Fps is (10 sec: 4096.6, 60 sec: 3276.8, 300 sec: 3596.1). Total num frames: 1552384. Throughput: 0: 825.8. Samples: 388288. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 05:50:14,763][00397] Avg episode reward: [(0, '11.885')] +[2023-07-23 05:50:14,853][07571] Saving new best policy, reward=11.885! +[2023-07-23 05:50:14,865][07585] Updated weights for policy 0, policy_version 380 (0.0019) +[2023-07-23 05:50:19,759][00397] Fps is (10 sec: 4915.6, 60 sec: 3413.3, 300 sec: 3623.9). Total num frames: 1576960. Throughput: 0: 905.6. Samples: 395760. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 05:50:19,767][00397] Avg episode reward: [(0, '12.028')] +[2023-07-23 05:50:19,772][07571] Saving new best policy, reward=12.028! +[2023-07-23 05:50:24,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3623.9). Total num frames: 1593344. Throughput: 0: 925.7. Samples: 398592. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:50:24,762][00397] Avg episode reward: [(0, '12.144')] +[2023-07-23 05:50:24,775][07571] Saving new best policy, reward=12.144! +[2023-07-23 05:50:25,317][07585] Updated weights for policy 0, policy_version 390 (0.0012) +[2023-07-23 05:50:29,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3637.8). Total num frames: 1613824. Throughput: 0: 925.7. Samples: 403512. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:50:29,771][00397] Avg episode reward: [(0, '12.352')] +[2023-07-23 05:50:29,774][07571] Saving new best policy, reward=12.352! +[2023-07-23 05:50:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 1626112. Throughput: 0: 916.6. Samples: 408496. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:50:34,762][00397] Avg episode reward: [(0, '12.821')] +[2023-07-23 05:50:34,772][07571] Saving new best policy, reward=12.821! +[2023-07-23 05:50:38,247][07585] Updated weights for policy 0, policy_version 400 (0.0021) +[2023-07-23 05:50:39,759][00397] Fps is (10 sec: 2867.1, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 1642496. Throughput: 0: 914.2. Samples: 410856. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:50:39,764][00397] Avg episode reward: [(0, '12.066')] +[2023-07-23 05:50:44,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3754.8, 300 sec: 3596.2). Total num frames: 1667072. Throughput: 0: 918.8. Samples: 416128. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:50:44,761][00397] Avg episode reward: [(0, '12.609')] +[2023-07-23 05:50:47,540][07585] Updated weights for policy 0, policy_version 410 (0.0012) +[2023-07-23 05:50:49,759][00397] Fps is (10 sec: 4505.8, 60 sec: 3754.7, 300 sec: 3610.0). Total num frames: 1687552. Throughput: 0: 971.6. Samples: 423520. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 05:50:49,761][00397] Avg episode reward: [(0, '12.745')] +[2023-07-23 05:50:54,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3891.4, 300 sec: 3623.9). Total num frames: 1712128. Throughput: 0: 1002.7. Samples: 427288. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:50:54,761][00397] Avg episode reward: [(0, '12.956')] +[2023-07-23 05:50:54,773][07571] Saving new best policy, reward=12.956! +[2023-07-23 05:50:57,964][07585] Updated weights for policy 0, policy_version 420 (0.0024) +[2023-07-23 05:50:59,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3623.9). Total num frames: 1728512. Throughput: 0: 981.2. Samples: 432440. Policy #0 lag: (min: 0.0, avg: 1.6, max: 5.0) +[2023-07-23 05:50:59,762][00397] Avg episode reward: [(0, '13.127')] +[2023-07-23 05:50:59,764][07571] Saving new best policy, reward=13.127! +[2023-07-23 05:51:04,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.3, 300 sec: 3623.9). Total num frames: 1744896. Throughput: 0: 924.4. Samples: 437360. Policy #0 lag: (min: 0.0, avg: 1.2, max: 3.0) +[2023-07-23 05:51:04,766][00397] Avg episode reward: [(0, '12.639')] +[2023-07-23 05:51:09,272][07585] Updated weights for policy 0, policy_version 430 (0.0012) +[2023-07-23 05:51:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.3, 300 sec: 3610.0). Total num frames: 1761280. Throughput: 0: 914.7. Samples: 439752. Policy #0 lag: (min: 1.0, avg: 2.5, max: 5.0) +[2023-07-23 05:51:09,761][00397] Avg episode reward: [(0, '12.705')] +[2023-07-23 05:51:14,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 1773568. Throughput: 0: 915.2. Samples: 444696. Policy #0 lag: (min: 0.0, avg: 2.4, max: 6.0) +[2023-07-23 05:51:14,761][00397] Avg episode reward: [(0, '13.223')] +[2023-07-23 05:51:14,770][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000433_1773568.pth... +[2023-07-23 05:51:14,881][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000222_909312.pth +[2023-07-23 05:51:14,892][07571] Saving new best policy, reward=13.223! +[2023-07-23 05:51:19,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 1798144. Throughput: 0: 949.9. Samples: 451240. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:51:19,762][00397] Avg episode reward: [(0, '14.124')] +[2023-07-23 05:51:19,764][07571] Saving new best policy, reward=14.124! +[2023-07-23 05:51:20,530][07585] Updated weights for policy 0, policy_version 440 (0.0013) +[2023-07-23 05:51:24,759][00397] Fps is (10 sec: 5324.8, 60 sec: 3891.2, 300 sec: 3623.9). Total num frames: 1826816. Throughput: 0: 978.1. Samples: 454872. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:51:24,766][00397] Avg episode reward: [(0, '13.873')] +[2023-07-23 05:51:29,378][07585] Updated weights for policy 0, policy_version 450 (0.0012) +[2023-07-23 05:51:29,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3623.9). Total num frames: 1843200. Throughput: 0: 998.0. Samples: 461040. Policy #0 lag: (min: 0.0, avg: 2.5, max: 5.0) +[2023-07-23 05:51:29,763][00397] Avg episode reward: [(0, '13.419')] +[2023-07-23 05:51:34,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 1855488. Throughput: 0: 943.8. Samples: 465992. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 05:51:34,764][00397] Avg episode reward: [(0, '13.308')] +[2023-07-23 05:51:39,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3823.0, 300 sec: 3596.1). Total num frames: 1871872. Throughput: 0: 917.5. Samples: 468576. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:51:39,761][00397] Avg episode reward: [(0, '13.614')] +[2023-07-23 05:51:42,020][07585] Updated weights for policy 0, policy_version 460 (0.0022) +[2023-07-23 05:51:44,759][00397] Fps is (10 sec: 2867.1, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 1884160. Throughput: 0: 904.9. Samples: 473160. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:51:44,761][00397] Avg episode reward: [(0, '13.045')] +[2023-07-23 05:51:49,761][00397] Fps is (10 sec: 2866.5, 60 sec: 3549.7, 300 sec: 3526.7). Total num frames: 1900544. Throughput: 0: 884.8. Samples: 477176. Policy #0 lag: (min: 0.0, avg: 2.5, max: 5.0) +[2023-07-23 05:51:49,763][00397] Avg episode reward: [(0, '14.554')] +[2023-07-23 05:51:49,769][07571] Saving new best policy, reward=14.554! +[2023-07-23 05:51:54,760][00397] Fps is (10 sec: 3276.4, 60 sec: 3413.3, 300 sec: 3540.6). Total num frames: 1916928. Throughput: 0: 883.9. Samples: 479528. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:51:54,765][00397] Avg episode reward: [(0, '14.901')] +[2023-07-23 05:51:54,774][07571] Saving new best policy, reward=14.901! +[2023-07-23 05:51:57,048][07585] Updated weights for policy 0, policy_version 470 (0.0021) +[2023-07-23 05:51:59,761][00397] Fps is (10 sec: 3276.8, 60 sec: 3413.2, 300 sec: 3554.5). Total num frames: 1933312. Throughput: 0: 881.7. Samples: 484376. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 05:51:59,764][00397] Avg episode reward: [(0, '16.028')] +[2023-07-23 05:51:59,769][07571] Saving new best policy, reward=16.028! +[2023-07-23 05:52:04,759][00397] Fps is (10 sec: 3277.2, 60 sec: 3413.3, 300 sec: 3554.5). Total num frames: 1949696. Throughput: 0: 829.9. Samples: 488584. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:52:04,764][00397] Avg episode reward: [(0, '15.230')] +[2023-07-23 05:52:09,759][00397] Fps is (10 sec: 2867.9, 60 sec: 3345.1, 300 sec: 3568.4). Total num frames: 1961984. Throughput: 0: 794.7. Samples: 490632. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:52:09,766][00397] Avg episode reward: [(0, '16.051')] +[2023-07-23 05:52:09,772][07571] Saving new best policy, reward=16.051! +[2023-07-23 05:52:10,152][07585] Updated weights for policy 0, policy_version 480 (0.0016) +[2023-07-23 05:52:14,760][00397] Fps is (10 sec: 2866.9, 60 sec: 3413.3, 300 sec: 3568.4). Total num frames: 1978368. Throughput: 0: 763.0. Samples: 495376. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:52:14,763][00397] Avg episode reward: [(0, '14.833')] +[2023-07-23 05:52:19,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3596.2). Total num frames: 1998848. Throughput: 0: 765.9. Samples: 500456. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:52:19,761][00397] Avg episode reward: [(0, '14.112')] +[2023-07-23 05:52:23,444][07585] Updated weights for policy 0, policy_version 490 (0.0014) +[2023-07-23 05:52:24,759][00397] Fps is (10 sec: 3277.2, 60 sec: 3072.0, 300 sec: 3568.4). Total num frames: 2011136. Throughput: 0: 763.7. Samples: 502944. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 05:52:24,766][00397] Avg episode reward: [(0, '13.380')] +[2023-07-23 05:52:29,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3208.5, 300 sec: 3568.5). Total num frames: 2035712. Throughput: 0: 801.1. Samples: 509208. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:52:29,762][00397] Avg episode reward: [(0, '14.113')] +[2023-07-23 05:52:31,480][07585] Updated weights for policy 0, policy_version 500 (0.0012) +[2023-07-23 05:52:34,759][00397] Fps is (10 sec: 5324.9, 60 sec: 3481.6, 300 sec: 3610.1). Total num frames: 2064384. Throughput: 0: 875.1. Samples: 516552. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:52:34,761][00397] Avg episode reward: [(0, '14.484')] +[2023-07-23 05:52:39,759][00397] Fps is (10 sec: 4095.9, 60 sec: 3413.3, 300 sec: 3596.1). Total num frames: 2076672. Throughput: 0: 886.2. Samples: 519408. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:52:39,765][00397] Avg episode reward: [(0, '14.920')] +[2023-07-23 05:52:43,258][07585] Updated weights for policy 0, policy_version 510 (0.0012) +[2023-07-23 05:52:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 2097152. Throughput: 0: 890.0. Samples: 524424. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:52:44,770][00397] Avg episode reward: [(0, '16.746')] +[2023-07-23 05:52:44,782][07571] Saving new best policy, reward=16.746! +[2023-07-23 05:52:49,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3550.0, 300 sec: 3623.9). Total num frames: 2113536. Throughput: 0: 906.7. Samples: 529384. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 05:52:49,761][00397] Avg episode reward: [(0, '17.764')] +[2023-07-23 05:52:49,765][07571] Saving new best policy, reward=17.764! +[2023-07-23 05:52:54,242][07585] Updated weights for policy 0, policy_version 520 (0.0030) +[2023-07-23 05:52:54,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2129920. Throughput: 0: 917.3. Samples: 531912. Policy #0 lag: (min: 0.0, avg: 2.2, max: 6.0) +[2023-07-23 05:52:54,761][00397] Avg episode reward: [(0, '17.303')] +[2023-07-23 05:52:59,759][00397] Fps is (10 sec: 3686.6, 60 sec: 3618.3, 300 sec: 3582.3). Total num frames: 2150400. Throughput: 0: 928.0. Samples: 537136. Policy #0 lag: (min: 0.0, avg: 2.3, max: 6.0) +[2023-07-23 05:52:59,763][00397] Avg episode reward: [(0, '18.922')] +[2023-07-23 05:52:59,772][07571] Saving new best policy, reward=18.922! +[2023-07-23 05:53:04,003][07585] Updated weights for policy 0, policy_version 530 (0.0012) +[2023-07-23 05:53:04,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 2170880. Throughput: 0: 976.5. Samples: 544400. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:53:04,762][00397] Avg episode reward: [(0, '19.373')] +[2023-07-23 05:53:04,777][07571] Saving new best policy, reward=19.373! +[2023-07-23 05:53:09,759][00397] Fps is (10 sec: 4095.9, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 2191360. Throughput: 0: 999.8. Samples: 547936. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:53:09,766][00397] Avg episode reward: [(0, '18.742')] +[2023-07-23 05:53:14,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3596.1). Total num frames: 2203648. Throughput: 0: 975.6. Samples: 553112. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 05:53:14,769][00397] Avg episode reward: [(0, '19.254')] +[2023-07-23 05:53:14,856][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000539_2207744.pth... +[2023-07-23 05:53:14,993][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000331_1355776.pth +[2023-07-23 05:53:15,335][07585] Updated weights for policy 0, policy_version 540 (0.0012) +[2023-07-23 05:53:19,766][00397] Fps is (10 sec: 3274.5, 60 sec: 3754.2, 300 sec: 3610.0). Total num frames: 2224128. Throughput: 0: 924.5. Samples: 558160. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 05:53:19,769][00397] Avg episode reward: [(0, '19.316')] +[2023-07-23 05:53:24,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 2240512. Throughput: 0: 915.2. Samples: 560592. Policy #0 lag: (min: 0.0, avg: 2.8, max: 6.0) +[2023-07-23 05:53:24,766][00397] Avg episode reward: [(0, '19.241')] +[2023-07-23 05:53:26,898][07585] Updated weights for policy 0, policy_version 550 (0.0012) +[2023-07-23 05:53:29,759][00397] Fps is (10 sec: 3279.2, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 2256896. Throughput: 0: 917.9. Samples: 565728. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:53:29,761][00397] Avg episode reward: [(0, '19.970')] +[2023-07-23 05:53:29,768][07571] Saving new best policy, reward=19.970! +[2023-07-23 05:53:34,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 2285568. Throughput: 0: 955.4. Samples: 572376. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:53:34,765][00397] Avg episode reward: [(0, '19.135')] +[2023-07-23 05:53:36,930][07585] Updated weights for policy 0, policy_version 560 (0.0018) +[2023-07-23 05:53:39,762][00397] Fps is (10 sec: 4913.6, 60 sec: 3822.7, 300 sec: 3610.0). Total num frames: 2306048. Throughput: 0: 980.7. Samples: 576048. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 05:53:39,765][00397] Avg episode reward: [(0, '18.295')] +[2023-07-23 05:53:44,759][00397] Fps is (10 sec: 4095.9, 60 sec: 3822.9, 300 sec: 3623.9). Total num frames: 2326528. Throughput: 0: 1001.4. Samples: 582200. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:53:44,762][00397] Avg episode reward: [(0, '18.717')] +[2023-07-23 05:53:47,153][07585] Updated weights for policy 0, policy_version 570 (0.0016) +[2023-07-23 05:53:49,759][00397] Fps is (10 sec: 3687.6, 60 sec: 3823.0, 300 sec: 3623.9). Total num frames: 2342912. Throughput: 0: 952.2. Samples: 587248. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 05:53:49,761][00397] Avg episode reward: [(0, '18.228')] +[2023-07-23 05:53:54,764][00397] Fps is (10 sec: 3275.1, 60 sec: 3822.6, 300 sec: 3623.9). Total num frames: 2359296. Throughput: 0: 930.7. Samples: 589824. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 05:53:54,767][00397] Avg episode reward: [(0, '18.102')] +[2023-07-23 05:53:58,393][07585] Updated weights for policy 0, policy_version 580 (0.0026) +[2023-07-23 05:53:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 2375680. Throughput: 0: 925.5. Samples: 594760. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:53:59,764][00397] Avg episode reward: [(0, '18.078')] +[2023-07-23 05:54:04,760][00397] Fps is (10 sec: 3278.1, 60 sec: 3686.3, 300 sec: 3568.4). Total num frames: 2392064. Throughput: 0: 922.1. Samples: 599648. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:54:04,763][00397] Avg episode reward: [(0, '18.772')] +[2023-07-23 05:54:09,762][00397] Fps is (10 sec: 3275.8, 60 sec: 3617.9, 300 sec: 3568.3). Total num frames: 2408448. Throughput: 0: 923.7. Samples: 602160. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 05:54:09,766][00397] Avg episode reward: [(0, '18.851')] +[2023-07-23 05:54:11,938][07585] Updated weights for policy 0, policy_version 590 (0.0023) +[2023-07-23 05:54:14,759][00397] Fps is (10 sec: 3277.1, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 2424832. Throughput: 0: 919.1. Samples: 607088. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:54:14,762][00397] Avg episode reward: [(0, '18.731')] +[2023-07-23 05:54:19,760][00397] Fps is (10 sec: 2867.8, 60 sec: 3550.2, 300 sec: 3554.5). Total num frames: 2437120. Throughput: 0: 856.7. Samples: 610928. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:54:19,764][00397] Avg episode reward: [(0, '18.189')] +[2023-07-23 05:54:24,764][00397] Fps is (10 sec: 2456.4, 60 sec: 3481.3, 300 sec: 3554.4). Total num frames: 2449408. Throughput: 0: 820.6. Samples: 612976. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:54:24,766][00397] Avg episode reward: [(0, '18.526')] +[2023-07-23 05:54:26,815][07585] Updated weights for policy 0, policy_version 600 (0.0015) +[2023-07-23 05:54:29,759][00397] Fps is (10 sec: 2048.3, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 2457600. Throughput: 0: 771.9. Samples: 616936. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:54:29,762][00397] Avg episode reward: [(0, '18.726')] +[2023-07-23 05:54:34,761][00397] Fps is (10 sec: 2458.3, 60 sec: 3140.1, 300 sec: 3554.5). Total num frames: 2473984. Throughput: 0: 767.1. Samples: 621768. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:54:34,764][00397] Avg episode reward: [(0, '18.791')] +[2023-07-23 05:54:39,760][00397] Fps is (10 sec: 3686.1, 60 sec: 3140.4, 300 sec: 3568.4). Total num frames: 2494464. Throughput: 0: 764.3. Samples: 624216. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:54:39,766][00397] Avg episode reward: [(0, '18.609')] +[2023-07-23 05:54:40,123][07585] Updated weights for policy 0, policy_version 610 (0.0013) +[2023-07-23 05:54:44,759][00397] Fps is (10 sec: 4506.6, 60 sec: 3208.5, 300 sec: 3582.3). Total num frames: 2519040. Throughput: 0: 793.2. Samples: 630456. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:54:44,766][00397] Avg episode reward: [(0, '19.502')] +[2023-07-23 05:54:48,030][07585] Updated weights for policy 0, policy_version 620 (0.0015) +[2023-07-23 05:54:49,759][00397] Fps is (10 sec: 4915.5, 60 sec: 3345.1, 300 sec: 3610.1). Total num frames: 2543616. Throughput: 0: 849.8. Samples: 637888. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 05:54:49,766][00397] Avg episode reward: [(0, '20.082')] +[2023-07-23 05:54:49,770][07571] Saving new best policy, reward=20.082! +[2023-07-23 05:54:54,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3413.6, 300 sec: 3623.9). Total num frames: 2564096. Throughput: 0: 857.7. Samples: 640752. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:54:54,763][00397] Avg episode reward: [(0, '20.179')] +[2023-07-23 05:54:54,778][07571] Saving new best policy, reward=20.179! +[2023-07-23 05:54:59,656][07585] Updated weights for policy 0, policy_version 630 (0.0012) +[2023-07-23 05:54:59,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3623.9). Total num frames: 2580480. Throughput: 0: 858.7. Samples: 645728. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:54:59,765][00397] Avg episode reward: [(0, '19.584')] +[2023-07-23 05:55:04,759][00397] Fps is (10 sec: 3276.9, 60 sec: 3413.4, 300 sec: 3623.9). Total num frames: 2596864. Throughput: 0: 883.4. Samples: 650680. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:55:04,761][00397] Avg episode reward: [(0, '19.154')] +[2023-07-23 05:55:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3413.5, 300 sec: 3596.1). Total num frames: 2613248. Throughput: 0: 892.7. Samples: 653144. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 05:55:09,766][00397] Avg episode reward: [(0, '20.428')] +[2023-07-23 05:55:09,770][07571] Saving new best policy, reward=20.428! +[2023-07-23 05:55:11,633][07585] Updated weights for policy 0, policy_version 640 (0.0017) +[2023-07-23 05:55:14,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 2633728. Throughput: 0: 918.4. Samples: 658264. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 05:55:14,765][00397] Avg episode reward: [(0, '20.910')] +[2023-07-23 05:55:14,781][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000643_2633728.pth... +[2023-07-23 05:55:14,907][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000433_1773568.pth +[2023-07-23 05:55:14,914][07571] Saving new best policy, reward=20.910! +[2023-07-23 05:55:19,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3596.1). Total num frames: 2654208. Throughput: 0: 967.9. Samples: 665320. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 05:55:19,764][00397] Avg episode reward: [(0, '21.042')] +[2023-07-23 05:55:19,768][07571] Saving new best policy, reward=21.042! +[2023-07-23 05:55:21,177][07585] Updated weights for policy 0, policy_version 650 (0.0013) +[2023-07-23 05:55:24,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3755.0, 300 sec: 3596.1). Total num frames: 2674688. Throughput: 0: 994.5. Samples: 668968. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 05:55:24,761][00397] Avg episode reward: [(0, '21.184')] +[2023-07-23 05:55:24,775][07571] Saving new best policy, reward=21.184! +[2023-07-23 05:55:29,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3610.0). Total num frames: 2691072. Throughput: 0: 970.7. Samples: 674136. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 05:55:29,761][00397] Avg episode reward: [(0, '20.297')] +[2023-07-23 05:55:31,483][07585] Updated weights for policy 0, policy_version 660 (0.0012) +[2023-07-23 05:55:34,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3891.2, 300 sec: 3610.0). Total num frames: 2707456. Throughput: 0: 918.2. Samples: 679208. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 05:55:34,764][00397] Avg episode reward: [(0, '19.890')] +[2023-07-23 05:55:39,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.3, 300 sec: 3596.1). Total num frames: 2727936. Throughput: 0: 909.7. Samples: 681688. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:55:39,761][00397] Avg episode reward: [(0, '19.163')] +[2023-07-23 05:55:43,825][07585] Updated weights for policy 0, policy_version 670 (0.0015) +[2023-07-23 05:55:44,759][00397] Fps is (10 sec: 3687.2, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 2744320. Throughput: 0: 910.0. Samples: 686680. Policy #0 lag: (min: 0.0, avg: 1.5, max: 5.0) +[2023-07-23 05:55:44,762][00397] Avg episode reward: [(0, '19.105')] +[2023-07-23 05:55:49,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 2764800. Throughput: 0: 948.8. Samples: 693376. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 05:55:49,765][00397] Avg episode reward: [(0, '19.442')] +[2023-07-23 05:55:53,332][07585] Updated weights for policy 0, policy_version 680 (0.0016) +[2023-07-23 05:55:54,759][00397] Fps is (10 sec: 4915.3, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 2793472. Throughput: 0: 975.5. Samples: 697040. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 05:55:54,762][00397] Avg episode reward: [(0, '19.095')] +[2023-07-23 05:55:59,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 2809856. Throughput: 0: 998.2. Samples: 703184. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:55:59,769][00397] Avg episode reward: [(0, '18.765')] +[2023-07-23 05:56:03,890][07585] Updated weights for policy 0, policy_version 690 (0.0012) +[2023-07-23 05:56:04,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3822.8, 300 sec: 3610.0). Total num frames: 2826240. Throughput: 0: 950.2. Samples: 708080. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 05:56:04,764][00397] Avg episode reward: [(0, '20.427')] +[2023-07-23 05:56:09,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3623.9). Total num frames: 2842624. Throughput: 0: 926.9. Samples: 710680. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:56:09,762][00397] Avg episode reward: [(0, '19.524')] +[2023-07-23 05:56:14,759][00397] Fps is (10 sec: 3277.6, 60 sec: 3754.7, 300 sec: 3596.1). Total num frames: 2859008. Throughput: 0: 921.4. Samples: 715600. Policy #0 lag: (min: 0.0, avg: 2.6, max: 6.0) +[2023-07-23 05:56:14,761][00397] Avg episode reward: [(0, '19.423')] +[2023-07-23 05:56:17,586][07585] Updated weights for policy 0, policy_version 700 (0.0018) +[2023-07-23 05:56:19,759][00397] Fps is (10 sec: 3686.6, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 2879488. Throughput: 0: 935.9. Samples: 721320. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:56:19,761][00397] Avg episode reward: [(0, '20.665')] +[2023-07-23 05:56:24,759][00397] Fps is (10 sec: 4505.5, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 2904064. Throughput: 0: 962.3. Samples: 724992. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:56:24,761][00397] Avg episode reward: [(0, '21.872')] +[2023-07-23 05:56:24,790][07571] Saving new best policy, reward=21.872! +[2023-07-23 05:56:24,830][07585] Updated weights for policy 0, policy_version 710 (0.0012) +[2023-07-23 05:56:29,759][00397] Fps is (10 sec: 3276.6, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 2912256. Throughput: 0: 977.6. Samples: 730672. Policy #0 lag: (min: 0.0, avg: 2.6, max: 5.0) +[2023-07-23 05:56:29,763][00397] Avg episode reward: [(0, '22.064')] +[2023-07-23 05:56:29,799][07571] Saving new best policy, reward=22.064! +[2023-07-23 05:56:34,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3686.5, 300 sec: 3582.3). Total num frames: 2928640. Throughput: 0: 913.2. Samples: 734472. Policy #0 lag: (min: 0.0, avg: 2.6, max: 6.0) +[2023-07-23 05:56:34,764][00397] Avg episode reward: [(0, '21.690')] +[2023-07-23 05:56:39,759][00397] Fps is (10 sec: 3276.9, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 2945024. Throughput: 0: 873.2. Samples: 736336. Policy #0 lag: (min: 0.0, avg: 2.5, max: 5.0) +[2023-07-23 05:56:39,767][00397] Avg episode reward: [(0, '23.004')] +[2023-07-23 05:56:39,781][07571] Saving new best policy, reward=23.004! +[2023-07-23 05:56:42,731][07585] Updated weights for policy 0, policy_version 720 (0.0015) +[2023-07-23 05:56:44,759][00397] Fps is (10 sec: 2867.3, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2957312. Throughput: 0: 823.5. Samples: 740240. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:56:44,764][00397] Avg episode reward: [(0, '22.988')] +[2023-07-23 05:56:49,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 2973696. Throughput: 0: 802.7. Samples: 744200. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 05:56:49,766][00397] Avg episode reward: [(0, '23.373')] +[2023-07-23 05:56:49,768][07571] Saving new best policy, reward=23.373! +[2023-07-23 05:56:54,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3140.3, 300 sec: 3554.5). Total num frames: 2981888. Throughput: 0: 786.1. Samples: 746056. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 05:56:54,763][00397] Avg episode reward: [(0, '22.149')] +[2023-07-23 05:56:55,630][07585] Updated weights for policy 0, policy_version 730 (0.0025) +[2023-07-23 05:56:59,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3568.4). Total num frames: 3002368. Throughput: 0: 784.2. Samples: 750888. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 05:56:59,767][00397] Avg episode reward: [(0, '21.729')] +[2023-07-23 05:57:04,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3345.2, 300 sec: 3610.0). Total num frames: 3026944. Throughput: 0: 819.9. Samples: 758216. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:57:04,761][00397] Avg episode reward: [(0, '20.243')] +[2023-07-23 05:57:05,706][07585] Updated weights for policy 0, policy_version 740 (0.0018) +[2023-07-23 05:57:09,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3413.4, 300 sec: 3623.9). Total num frames: 3047424. Throughput: 0: 819.0. Samples: 761848. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 05:57:09,761][00397] Avg episode reward: [(0, '19.341')] +[2023-07-23 05:57:14,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3610.0). Total num frames: 3063808. Throughput: 0: 810.7. Samples: 767152. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 05:57:14,763][00397] Avg episode reward: [(0, '18.193')] +[2023-07-23 05:57:14,773][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000748_3063808.pth... +[2023-07-23 05:57:14,903][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000539_2207744.pth +[2023-07-23 05:57:15,631][07585] Updated weights for policy 0, policy_version 750 (0.0012) +[2023-07-23 05:57:19,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3637.8). Total num frames: 3084288. Throughput: 0: 836.1. Samples: 772096. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 05:57:19,763][00397] Avg episode reward: [(0, '19.207')] +[2023-07-23 05:57:24,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3276.8, 300 sec: 3610.0). Total num frames: 3100672. Throughput: 0: 849.8. Samples: 774576. Policy #0 lag: (min: 0.0, avg: 2.5, max: 6.0) +[2023-07-23 05:57:24,761][00397] Avg episode reward: [(0, '18.915')] +[2023-07-23 05:57:28,081][07585] Updated weights for policy 0, policy_version 760 (0.0031) +[2023-07-23 05:57:29,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3413.4, 300 sec: 3568.4). Total num frames: 3117056. Throughput: 0: 874.3. Samples: 779584. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 05:57:29,765][00397] Avg episode reward: [(0, '19.180')] +[2023-07-23 05:57:34,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3596.2). Total num frames: 3137536. Throughput: 0: 938.0. Samples: 786408. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 05:57:34,765][00397] Avg episode reward: [(0, '20.523')] +[2023-07-23 05:57:37,301][07585] Updated weights for policy 0, policy_version 770 (0.0017) +[2023-07-23 05:57:39,760][00397] Fps is (10 sec: 4914.7, 60 sec: 3686.3, 300 sec: 3623.9). Total num frames: 3166208. Throughput: 0: 978.5. Samples: 790088. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:57:39,766][00397] Avg episode reward: [(0, '20.650')] +[2023-07-23 05:57:44,759][00397] Fps is (10 sec: 4095.9, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 3178496. Throughput: 0: 1006.4. Samples: 796176. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:57:44,767][00397] Avg episode reward: [(0, '23.577')] +[2023-07-23 05:57:44,780][07571] Saving new best policy, reward=23.577! +[2023-07-23 05:57:48,397][07585] Updated weights for policy 0, policy_version 780 (0.0017) +[2023-07-23 05:57:49,759][00397] Fps is (10 sec: 3277.2, 60 sec: 3754.7, 300 sec: 3623.9). Total num frames: 3198976. Throughput: 0: 952.0. Samples: 801056. Policy #0 lag: (min: 0.0, avg: 2.7, max: 6.0) +[2023-07-23 05:57:49,763][00397] Avg episode reward: [(0, '23.946')] +[2023-07-23 05:57:49,768][07571] Saving new best policy, reward=23.946! +[2023-07-23 05:57:54,759][00397] Fps is (10 sec: 3276.9, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 3211264. Throughput: 0: 924.1. Samples: 803432. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:57:54,761][00397] Avg episode reward: [(0, '23.388')] +[2023-07-23 05:57:59,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 3231744. Throughput: 0: 916.8. Samples: 808408. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 05:57:59,761][00397] Avg episode reward: [(0, '24.025')] +[2023-07-23 05:57:59,763][07571] Saving new best policy, reward=24.025! +[2023-07-23 05:58:00,869][07585] Updated weights for policy 0, policy_version 790 (0.0020) +[2023-07-23 05:58:04,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3596.2). Total num frames: 3252224. Throughput: 0: 933.9. Samples: 814120. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:58:04,764][00397] Avg episode reward: [(0, '25.061')] +[2023-07-23 05:58:04,772][07571] Saving new best policy, reward=25.061! +[2023-07-23 05:58:09,759][00397] Fps is (10 sec: 4096.1, 60 sec: 3754.7, 300 sec: 3623.9). Total num frames: 3272704. Throughput: 0: 957.7. Samples: 817672. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:58:09,761][00397] Avg episode reward: [(0, '22.740')] +[2023-07-23 05:58:09,937][07585] Updated weights for policy 0, policy_version 800 (0.0016) +[2023-07-23 05:58:14,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3624.0). Total num frames: 3293184. Throughput: 0: 997.5. Samples: 824472. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 05:58:14,761][00397] Avg episode reward: [(0, '21.724')] +[2023-07-23 05:58:19,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3623.9). Total num frames: 3309568. Throughput: 0: 959.1. Samples: 829568. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:58:19,761][00397] Avg episode reward: [(0, '21.670')] +[2023-07-23 05:58:20,671][07585] Updated weights for policy 0, policy_version 810 (0.0016) +[2023-07-23 05:58:24,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 3330048. Throughput: 0: 933.5. Samples: 832096. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 05:58:24,761][00397] Avg episode reward: [(0, '19.544')] +[2023-07-23 05:58:29,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3596.2). Total num frames: 3346432. Throughput: 0: 911.3. Samples: 837184. Policy #0 lag: (min: 0.0, avg: 0.7, max: 4.0) +[2023-07-23 05:58:29,765][00397] Avg episode reward: [(0, '19.336')] +[2023-07-23 05:58:33,882][07585] Updated weights for policy 0, policy_version 820 (0.0012) +[2023-07-23 05:58:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 3362816. Throughput: 0: 913.4. Samples: 842160. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 05:58:34,761][00397] Avg episode reward: [(0, '18.236')] +[2023-07-23 05:58:39,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3686.5, 300 sec: 3596.2). Total num frames: 3387392. Throughput: 0: 942.8. Samples: 845856. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 05:58:39,761][00397] Avg episode reward: [(0, '19.934')] +[2023-07-23 05:58:41,524][07585] Updated weights for policy 0, policy_version 830 (0.0012) +[2023-07-23 05:58:44,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3891.2, 300 sec: 3623.9). Total num frames: 3411968. Throughput: 0: 996.1. Samples: 853232. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:58:44,761][00397] Avg episode reward: [(0, '20.573')] +[2023-07-23 05:58:49,760][00397] Fps is (10 sec: 3685.9, 60 sec: 3754.6, 300 sec: 3610.1). Total num frames: 3424256. Throughput: 0: 981.5. Samples: 858288. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:58:49,769][00397] Avg episode reward: [(0, '20.615')] +[2023-07-23 05:58:54,761][00397] Fps is (10 sec: 2457.0, 60 sec: 3754.5, 300 sec: 3596.1). Total num frames: 3436544. Throughput: 0: 944.1. Samples: 860160. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:58:54,772][00397] Avg episode reward: [(0, '22.036')] +[2023-07-23 05:58:55,322][07585] Updated weights for policy 0, policy_version 840 (0.0016) +[2023-07-23 05:58:59,760][00397] Fps is (10 sec: 2867.2, 60 sec: 3686.3, 300 sec: 3596.1). Total num frames: 3452928. Throughput: 0: 880.5. Samples: 864096. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:58:59,762][00397] Avg episode reward: [(0, '23.182')] +[2023-07-23 05:59:04,760][00397] Fps is (10 sec: 2867.7, 60 sec: 3549.8, 300 sec: 3582.3). Total num frames: 3465216. Throughput: 0: 853.3. Samples: 867968. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 05:59:04,763][00397] Avg episode reward: [(0, '23.272')] +[2023-07-23 05:59:09,352][07585] Updated weights for policy 0, policy_version 850 (0.0028) +[2023-07-23 05:59:09,760][00397] Fps is (10 sec: 2867.2, 60 sec: 3481.5, 300 sec: 3582.3). Total num frames: 3481600. Throughput: 0: 842.1. Samples: 869992. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 05:59:09,764][00397] Avg episode reward: [(0, '23.013')] +[2023-07-23 05:59:14,759][00397] Fps is (10 sec: 2457.7, 60 sec: 3276.8, 300 sec: 3568.4). Total num frames: 3489792. Throughput: 0: 815.8. Samples: 873896. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:59:14,762][00397] Avg episode reward: [(0, '22.850')] +[2023-07-23 05:59:14,781][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000852_3489792.pth... +[2023-07-23 05:59:14,981][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000643_2633728.pth +[2023-07-23 05:59:19,759][00397] Fps is (10 sec: 3277.3, 60 sec: 3413.3, 300 sec: 3610.1). Total num frames: 3514368. Throughput: 0: 823.8. Samples: 879232. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 05:59:19,762][00397] Avg episode reward: [(0, '23.051')] +[2023-07-23 05:59:21,864][07585] Updated weights for policy 0, policy_version 860 (0.0013) +[2023-07-23 05:59:24,759][00397] Fps is (10 sec: 4505.7, 60 sec: 3413.3, 300 sec: 3651.7). Total num frames: 3534848. Throughput: 0: 822.2. Samples: 882856. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:59:24,764][00397] Avg episode reward: [(0, '21.010')] +[2023-07-23 05:59:29,761][00397] Fps is (10 sec: 4095.1, 60 sec: 3481.5, 300 sec: 3665.6). Total num frames: 3555328. Throughput: 0: 801.2. Samples: 889288. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 05:59:29,763][00397] Avg episode reward: [(0, '19.815')] +[2023-07-23 05:59:31,759][07585] Updated weights for policy 0, policy_version 870 (0.0015) +[2023-07-23 05:59:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3637.8). Total num frames: 3567616. Throughput: 0: 802.5. Samples: 894400. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 05:59:34,766][00397] Avg episode reward: [(0, '19.748')] +[2023-07-23 05:59:39,763][00397] Fps is (10 sec: 3276.1, 60 sec: 3344.8, 300 sec: 3623.9). Total num frames: 3588096. Throughput: 0: 817.2. Samples: 896936. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 05:59:39,766][00397] Avg episode reward: [(0, '19.825')] +[2023-07-23 05:59:44,760][00397] Fps is (10 sec: 3276.4, 60 sec: 3140.2, 300 sec: 3582.2). Total num frames: 3600384. Throughput: 0: 842.1. Samples: 901992. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 05:59:44,767][00397] Avg episode reward: [(0, '19.224')] +[2023-07-23 05:59:44,963][07585] Updated weights for policy 0, policy_version 880 (0.0012) +[2023-07-23 05:59:49,759][00397] Fps is (10 sec: 4097.8, 60 sec: 3413.4, 300 sec: 3610.0). Total num frames: 3629056. Throughput: 0: 879.3. Samples: 907536. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 05:59:49,767][00397] Avg episode reward: [(0, '19.618')] +[2023-07-23 05:59:53,161][07585] Updated weights for policy 0, policy_version 890 (0.0019) +[2023-07-23 05:59:54,759][00397] Fps is (10 sec: 4915.8, 60 sec: 3550.0, 300 sec: 3623.9). Total num frames: 3649536. Throughput: 0: 916.7. Samples: 911240. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 05:59:54,765][00397] Avg episode reward: [(0, '20.300')] +[2023-07-23 05:59:59,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3637.8). Total num frames: 3670016. Throughput: 0: 994.0. Samples: 918624. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 05:59:59,761][00397] Avg episode reward: [(0, '21.045')] +[2023-07-23 06:00:03,779][07585] Updated weights for policy 0, policy_version 900 (0.0012) +[2023-07-23 06:00:04,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 3686400. Throughput: 0: 986.0. Samples: 923600. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:00:04,761][00397] Avg episode reward: [(0, '21.786')] +[2023-07-23 06:00:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.5, 300 sec: 3623.9). Total num frames: 3702784. Throughput: 0: 961.4. Samples: 926120. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:00:09,771][00397] Avg episode reward: [(0, '22.509')] +[2023-07-23 06:00:14,765][00397] Fps is (10 sec: 3274.7, 60 sec: 3822.5, 300 sec: 3610.0). Total num frames: 3719168. Throughput: 0: 927.7. Samples: 931040. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 06:00:14,768][00397] Avg episode reward: [(0, '22.141')] +[2023-07-23 06:00:16,097][07585] Updated weights for policy 0, policy_version 910 (0.0013) +[2023-07-23 06:00:19,759][00397] Fps is (10 sec: 3686.3, 60 sec: 3754.6, 300 sec: 3610.0). Total num frames: 3739648. Throughput: 0: 926.9. Samples: 936112. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:00:19,762][00397] Avg episode reward: [(0, '20.769')] +[2023-07-23 06:00:24,759][00397] Fps is (10 sec: 4508.5, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 3764224. Throughput: 0: 941.1. Samples: 939280. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:00:24,766][00397] Avg episode reward: [(0, '20.725')] +[2023-07-23 06:00:26,063][07585] Updated weights for policy 0, policy_version 920 (0.0018) +[2023-07-23 06:00:29,759][00397] Fps is (10 sec: 4505.8, 60 sec: 3823.1, 300 sec: 3651.7). Total num frames: 3784704. Throughput: 0: 994.3. Samples: 946736. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:00:29,760][00397] Avg episode reward: [(0, '20.110')] +[2023-07-23 06:00:34,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3959.5, 300 sec: 3651.7). Total num frames: 3805184. Throughput: 0: 1001.2. Samples: 952592. Policy #0 lag: (min: 0.0, avg: 0.7, max: 3.0) +[2023-07-23 06:00:34,761][00397] Avg episode reward: [(0, '20.489')] +[2023-07-23 06:00:35,754][07585] Updated weights for policy 0, policy_version 930 (0.0013) +[2023-07-23 06:00:39,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.5, 300 sec: 3651.7). Total num frames: 3821568. Throughput: 0: 975.5. Samples: 955136. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 06:00:39,761][00397] Avg episode reward: [(0, '22.539')] +[2023-07-23 06:00:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3959.6, 300 sec: 3637.8). Total num frames: 3837952. Throughput: 0: 922.3. Samples: 960128. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:00:44,762][00397] Avg episode reward: [(0, '22.893')] +[2023-07-23 06:00:48,549][07585] Updated weights for policy 0, policy_version 940 (0.0012) +[2023-07-23 06:00:49,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3596.1). Total num frames: 3854336. Throughput: 0: 922.3. Samples: 965104. Policy #0 lag: (min: 0.0, avg: 1.5, max: 5.0) +[2023-07-23 06:00:49,761][00397] Avg episode reward: [(0, '22.685')] +[2023-07-23 06:00:54,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3610.0). Total num frames: 3874816. Throughput: 0: 922.5. Samples: 967632. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:00:54,761][00397] Avg episode reward: [(0, '22.059')] +[2023-07-23 06:00:57,514][07585] Updated weights for policy 0, policy_version 950 (0.0035) +[2023-07-23 06:00:59,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 3899392. Throughput: 0: 972.1. Samples: 974776. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:00:59,761][00397] Avg episode reward: [(0, '22.523')] +[2023-07-23 06:01:04,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3651.7). Total num frames: 3919872. Throughput: 0: 1008.7. Samples: 981504. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:01:04,766][00397] Avg episode reward: [(0, '20.602')] +[2023-07-23 06:01:08,873][07585] Updated weights for policy 0, policy_version 960 (0.0013) +[2023-07-23 06:01:09,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3651.7). Total num frames: 3936256. Throughput: 0: 992.2. Samples: 983928. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:01:09,763][00397] Avg episode reward: [(0, '21.805')] +[2023-07-23 06:01:14,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3823.3, 300 sec: 3623.9). Total num frames: 3948544. Throughput: 0: 924.3. Samples: 988328. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:01:14,765][00397] Avg episode reward: [(0, '22.396')] +[2023-07-23 06:01:14,783][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000964_3948544.pth... +[2023-07-23 06:01:14,981][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000748_3063808.pth +[2023-07-23 06:01:19,759][00397] Fps is (10 sec: 2457.5, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 3960832. Throughput: 0: 879.6. Samples: 992176. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:01:19,762][00397] Avg episode reward: [(0, '23.333')] +[2023-07-23 06:01:23,561][07585] Updated weights for policy 0, policy_version 970 (0.0015) +[2023-07-23 06:01:24,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 3596.2). Total num frames: 3973120. Throughput: 0: 864.9. Samples: 994056. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:01:24,765][00397] Avg episode reward: [(0, '23.935')] +[2023-07-23 06:01:29,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3596.2). Total num frames: 3989504. Throughput: 0: 843.2. Samples: 998072. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:01:29,763][00397] Avg episode reward: [(0, '24.714')] +[2023-07-23 06:01:34,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 4001792. Throughput: 0: 831.1. Samples: 1002504. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 06:01:34,764][00397] Avg episode reward: [(0, '24.820')] +[2023-07-23 06:01:37,931][07585] Updated weights for policy 0, policy_version 980 (0.0015) +[2023-07-23 06:01:39,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 4022272. Throughput: 0: 832.4. Samples: 1005088. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:01:39,761][00397] Avg episode reward: [(0, '25.549')] +[2023-07-23 06:01:39,772][07571] Saving new best policy, reward=25.549! +[2023-07-23 06:01:44,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 4038656. Throughput: 0: 812.4. Samples: 1011336. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:01:44,761][00397] Avg episode reward: [(0, '24.317')] +[2023-07-23 06:01:47,856][07585] Updated weights for policy 0, policy_version 990 (0.0014) +[2023-07-23 06:01:49,759][00397] Fps is (10 sec: 3686.5, 60 sec: 3413.3, 300 sec: 3651.7). Total num frames: 4059136. Throughput: 0: 774.0. Samples: 1016336. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:01:49,765][00397] Avg episode reward: [(0, '23.511')] +[2023-07-23 06:01:54,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3637.8). Total num frames: 4075520. Throughput: 0: 774.6. Samples: 1018784. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:01:54,766][00397] Avg episode reward: [(0, '22.647')] +[2023-07-23 06:01:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3610.0). Total num frames: 4091904. Throughput: 0: 787.0. Samples: 1023744. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 06:01:59,761][00397] Avg episode reward: [(0, '22.884')] +[2023-07-23 06:02:01,112][07585] Updated weights for policy 0, policy_version 1000 (0.0013) +[2023-07-23 06:02:04,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 3596.1). Total num frames: 4108288. Throughput: 0: 809.6. Samples: 1028608. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:02:04,767][00397] Avg episode reward: [(0, '22.847')] +[2023-07-23 06:02:09,684][07585] Updated weights for policy 0, policy_version 1010 (0.0017) +[2023-07-23 06:02:09,761][00397] Fps is (10 sec: 4504.6, 60 sec: 3344.9, 300 sec: 3637.8). Total num frames: 4136960. Throughput: 0: 846.2. Samples: 1032136. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:02:09,763][00397] Avg episode reward: [(0, '23.910')] +[2023-07-23 06:02:14,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3481.6, 300 sec: 3637.8). Total num frames: 4157440. Throughput: 0: 918.9. Samples: 1039424. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:02:14,764][00397] Avg episode reward: [(0, '24.272')] +[2023-07-23 06:02:19,759][00397] Fps is (10 sec: 3687.2, 60 sec: 3549.9, 300 sec: 3637.8). Total num frames: 4173824. Throughput: 0: 944.5. Samples: 1045008. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:02:19,766][00397] Avg episode reward: [(0, '24.250')] +[2023-07-23 06:02:21,323][07585] Updated weights for policy 0, policy_version 1020 (0.0016) +[2023-07-23 06:02:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 4190208. Throughput: 0: 941.5. Samples: 1047456. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:02:24,768][00397] Avg episode reward: [(0, '22.622')] +[2023-07-23 06:02:29,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 4202496. Throughput: 0: 914.5. Samples: 1052488. Policy #0 lag: (min: 0.0, avg: 2.0, max: 6.0) +[2023-07-23 06:02:29,765][00397] Avg episode reward: [(0, '22.333')] +[2023-07-23 06:02:33,070][07585] Updated weights for policy 0, policy_version 1030 (0.0012) +[2023-07-23 06:02:34,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 4222976. Throughput: 0: 912.9. Samples: 1057416. Policy #0 lag: (min: 0.0, avg: 1.3, max: 5.0) +[2023-07-23 06:02:34,768][00397] Avg episode reward: [(0, '22.017')] +[2023-07-23 06:02:39,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 4243456. Throughput: 0: 918.4. Samples: 1060112. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:02:39,763][00397] Avg episode reward: [(0, '23.345')] +[2023-07-23 06:02:42,100][07585] Updated weights for policy 0, policy_version 1040 (0.0013) +[2023-07-23 06:02:44,759][00397] Fps is (10 sec: 4505.7, 60 sec: 3822.9, 300 sec: 3623.9). Total num frames: 4268032. Throughput: 0: 971.9. Samples: 1067480. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:02:44,766][00397] Avg episode reward: [(0, '23.379')] +[2023-07-23 06:02:49,759][00397] Fps is (10 sec: 4505.5, 60 sec: 3822.9, 300 sec: 3651.7). Total num frames: 4288512. Throughput: 0: 1007.6. Samples: 1073952. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:02:49,764][00397] Avg episode reward: [(0, '24.196')] +[2023-07-23 06:02:52,791][07585] Updated weights for policy 0, policy_version 1050 (0.0026) +[2023-07-23 06:02:54,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 4304896. Throughput: 0: 986.2. Samples: 1076512. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:02:54,767][00397] Avg episode reward: [(0, '24.616')] +[2023-07-23 06:02:59,759][00397] Fps is (10 sec: 3276.9, 60 sec: 3822.9, 300 sec: 3623.9). Total num frames: 4321280. Throughput: 0: 934.0. Samples: 1081456. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:02:59,766][00397] Avg episode reward: [(0, '24.037')] +[2023-07-23 06:03:04,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3822.8, 300 sec: 3610.0). Total num frames: 4337664. Throughput: 0: 920.1. Samples: 1086416. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:03:04,765][00397] Avg episode reward: [(0, '24.805')] +[2023-07-23 06:03:05,520][07585] Updated weights for policy 0, policy_version 1060 (0.0012) +[2023-07-23 06:03:09,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.5, 300 sec: 3610.0). Total num frames: 4358144. Throughput: 0: 919.8. Samples: 1088848. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:03:09,761][00397] Avg episode reward: [(0, '24.394')] +[2023-07-23 06:03:14,759][00397] Fps is (10 sec: 4097.0, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 4378624. Throughput: 0: 957.2. Samples: 1095560. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 06:03:14,761][00397] Avg episode reward: [(0, '24.344')] +[2023-07-23 06:03:14,770][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001069_4378624.pth... +[2023-07-23 06:03:14,890][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000852_3489792.pth +[2023-07-23 06:03:14,992][07585] Updated weights for policy 0, policy_version 1070 (0.0015) +[2023-07-23 06:03:19,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 4403200. Throughput: 0: 1006.8. Samples: 1102720. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:03:19,766][00397] Avg episode reward: [(0, '23.750')] +[2023-07-23 06:03:24,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3637.8). Total num frames: 4419584. Throughput: 0: 1000.2. Samples: 1105120. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:03:24,765][00397] Avg episode reward: [(0, '22.471')] +[2023-07-23 06:03:25,236][07585] Updated weights for policy 0, policy_version 1080 (0.0012) +[2023-07-23 06:03:29,763][00397] Fps is (10 sec: 3275.3, 60 sec: 3890.9, 300 sec: 3637.7). Total num frames: 4435968. Throughput: 0: 945.7. Samples: 1110040. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 06:03:29,769][00397] Avg episode reward: [(0, '21.387')] +[2023-07-23 06:03:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 4452352. Throughput: 0: 906.7. Samples: 1114752. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:03:34,767][00397] Avg episode reward: [(0, '22.154')] +[2023-07-23 06:03:39,765][00397] Fps is (10 sec: 2457.1, 60 sec: 3617.7, 300 sec: 3554.4). Total num frames: 4460544. Throughput: 0: 892.3. Samples: 1116672. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 06:03:39,769][00397] Avg episode reward: [(0, '21.889')] +[2023-07-23 06:03:39,975][07585] Updated weights for policy 0, policy_version 1090 (0.0019) +[2023-07-23 06:03:44,759][00397] Fps is (10 sec: 2457.5, 60 sec: 3481.6, 300 sec: 3568.4). Total num frames: 4476928. Throughput: 0: 869.3. Samples: 1120576. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 06:03:44,766][00397] Avg episode reward: [(0, '23.092')] +[2023-07-23 06:03:49,759][00397] Fps is (10 sec: 3279.0, 60 sec: 3413.3, 300 sec: 3582.3). Total num frames: 4493312. Throughput: 0: 865.5. Samples: 1125360. Policy #0 lag: (min: 0.0, avg: 1.1, max: 3.0) +[2023-07-23 06:03:49,764][00397] Avg episode reward: [(0, '23.882')] +[2023-07-23 06:03:52,269][07585] Updated weights for policy 0, policy_version 1100 (0.0012) +[2023-07-23 06:03:54,760][00397] Fps is (10 sec: 3276.4, 60 sec: 3413.2, 300 sec: 3582.3). Total num frames: 4509696. Throughput: 0: 867.0. Samples: 1127864. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:03:54,763][00397] Avg episode reward: [(0, '24.227')] +[2023-07-23 06:03:59,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 4521984. Throughput: 0: 813.0. Samples: 1132144. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:03:59,762][00397] Avg episode reward: [(0, '24.053')] +[2023-07-23 06:04:04,759][00397] Fps is (10 sec: 3277.3, 60 sec: 3413.5, 300 sec: 3596.2). Total num frames: 4542464. Throughput: 0: 757.7. Samples: 1136816. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:04:04,761][00397] Avg episode reward: [(0, '24.104')] +[2023-07-23 06:04:06,040][07585] Updated weights for policy 0, policy_version 1110 (0.0020) +[2023-07-23 06:04:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3610.0). Total num frames: 4554752. Throughput: 0: 758.9. Samples: 1139272. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 06:04:09,766][00397] Avg episode reward: [(0, '23.376')] +[2023-07-23 06:04:14,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3208.5, 300 sec: 3582.3). Total num frames: 4571136. Throughput: 0: 760.1. Samples: 1144240. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 06:04:14,761][00397] Avg episode reward: [(0, '22.067')] +[2023-07-23 06:04:18,610][07585] Updated weights for policy 0, policy_version 1120 (0.0015) +[2023-07-23 06:04:19,772][00397] Fps is (10 sec: 3681.5, 60 sec: 3139.6, 300 sec: 3582.1). Total num frames: 4591616. Throughput: 0: 774.5. Samples: 1149616. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:04:19,775][00397] Avg episode reward: [(0, '19.473')] +[2023-07-23 06:04:24,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3276.8, 300 sec: 3596.2). Total num frames: 4616192. Throughput: 0: 815.1. Samples: 1153344. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:04:24,762][00397] Avg episode reward: [(0, '21.008')] +[2023-07-23 06:04:26,447][07585] Updated weights for policy 0, policy_version 1130 (0.0013) +[2023-07-23 06:04:29,759][00397] Fps is (10 sec: 4921.8, 60 sec: 3413.6, 300 sec: 3637.8). Total num frames: 4640768. Throughput: 0: 893.0. Samples: 1160760. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 06:04:29,763][00397] Avg episode reward: [(0, '20.752')] +[2023-07-23 06:04:34,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3624.0). Total num frames: 4657152. Throughput: 0: 899.7. Samples: 1165848. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:04:34,762][00397] Avg episode reward: [(0, '21.399')] +[2023-07-23 06:04:38,772][07585] Updated weights for policy 0, policy_version 1140 (0.0015) +[2023-07-23 06:04:39,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3550.3, 300 sec: 3637.8). Total num frames: 4673536. Throughput: 0: 898.7. Samples: 1168304. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:04:39,765][00397] Avg episode reward: [(0, '21.551')] +[2023-07-23 06:04:44,760][00397] Fps is (10 sec: 3276.4, 60 sec: 3549.8, 300 sec: 3596.1). Total num frames: 4689920. Throughput: 0: 913.4. Samples: 1173248. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:04:44,764][00397] Avg episode reward: [(0, '22.015')] +[2023-07-23 06:04:49,762][00397] Fps is (10 sec: 3685.2, 60 sec: 3617.9, 300 sec: 3596.1). Total num frames: 4710400. Throughput: 0: 921.5. Samples: 1178288. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:04:49,772][00397] Avg episode reward: [(0, '22.840')] +[2023-07-23 06:04:49,775][07585] Updated weights for policy 0, policy_version 1150 (0.0018) +[2023-07-23 06:04:54,759][00397] Fps is (10 sec: 3686.9, 60 sec: 3618.2, 300 sec: 3582.3). Total num frames: 4726784. Throughput: 0: 936.9. Samples: 1181432. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:04:54,761][00397] Avg episode reward: [(0, '23.205')] +[2023-07-23 06:04:58,633][07585] Updated weights for policy 0, policy_version 1160 (0.0012) +[2023-07-23 06:04:59,759][00397] Fps is (10 sec: 4507.0, 60 sec: 3891.2, 300 sec: 3623.9). Total num frames: 4755456. Throughput: 0: 988.6. Samples: 1188728. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:04:59,761][00397] Avg episode reward: [(0, '24.743')] +[2023-07-23 06:05:04,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3623.9). Total num frames: 4771840. Throughput: 0: 1001.7. Samples: 1194680. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:05:04,761][00397] Avg episode reward: [(0, '26.324')] +[2023-07-23 06:05:04,767][07571] Saving new best policy, reward=26.324! +[2023-07-23 06:05:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 3624.0). Total num frames: 4788224. Throughput: 0: 973.9. Samples: 1197168. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:05:09,764][00397] Avg episode reward: [(0, '26.495')] +[2023-07-23 06:05:09,767][07571] Saving new best policy, reward=26.495! +[2023-07-23 06:05:11,278][07585] Updated weights for policy 0, policy_version 1170 (0.0012) +[2023-07-23 06:05:14,760][00397] Fps is (10 sec: 3276.5, 60 sec: 3891.1, 300 sec: 3610.0). Total num frames: 4804608. Throughput: 0: 917.0. Samples: 1202024. Policy #0 lag: (min: 0.0, avg: 2.6, max: 6.0) +[2023-07-23 06:05:14,768][00397] Avg episode reward: [(0, '26.467')] +[2023-07-23 06:05:14,784][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001173_4804608.pth... +[2023-07-23 06:05:14,921][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000964_3948544.pth +[2023-07-23 06:05:19,766][00397] Fps is (10 sec: 3274.5, 60 sec: 3823.3, 300 sec: 3582.2). Total num frames: 4820992. Throughput: 0: 914.0. Samples: 1206984. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:05:19,768][00397] Avg episode reward: [(0, '25.758')] +[2023-07-23 06:05:22,287][07585] Updated weights for policy 0, policy_version 1180 (0.0013) +[2023-07-23 06:05:24,759][00397] Fps is (10 sec: 3277.1, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 4837376. Throughput: 0: 915.7. Samples: 1209512. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:05:24,761][00397] Avg episode reward: [(0, '26.557')] +[2023-07-23 06:05:24,803][07571] Saving new best policy, reward=26.557! +[2023-07-23 06:05:29,759][00397] Fps is (10 sec: 4508.7, 60 sec: 3754.7, 300 sec: 3596.1). Total num frames: 4866048. Throughput: 0: 964.1. Samples: 1216632. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:05:29,762][00397] Avg episode reward: [(0, '26.020')] +[2023-07-23 06:05:31,451][07585] Updated weights for policy 0, policy_version 1190 (0.0012) +[2023-07-23 06:05:34,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 4886528. Throughput: 0: 1005.9. Samples: 1223552. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:05:34,761][00397] Avg episode reward: [(0, '26.008')] +[2023-07-23 06:05:39,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 4902912. Throughput: 0: 990.9. Samples: 1226024. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:05:39,769][00397] Avg episode reward: [(0, '25.526')] +[2023-07-23 06:05:42,479][07585] Updated weights for policy 0, policy_version 1200 (0.0012) +[2023-07-23 06:05:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3823.0, 300 sec: 3610.0). Total num frames: 4919296. Throughput: 0: 939.2. Samples: 1230992. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:05:44,762][00397] Avg episode reward: [(0, '25.552')] +[2023-07-23 06:05:49,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3686.6, 300 sec: 3582.3). Total num frames: 4931584. Throughput: 0: 903.5. Samples: 1235336. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:05:49,761][00397] Avg episode reward: [(0, '25.123')] +[2023-07-23 06:05:54,762][00397] Fps is (10 sec: 2866.2, 60 sec: 3686.2, 300 sec: 3554.5). Total num frames: 4947968. Throughput: 0: 893.3. Samples: 1237368. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:05:54,768][00397] Avg episode reward: [(0, '24.977')] +[2023-07-23 06:05:56,331][07585] Updated weights for policy 0, policy_version 1210 (0.0022) +[2023-07-23 06:05:59,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 4964352. Throughput: 0: 900.5. Samples: 1242544. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:05:59,762][00397] Avg episode reward: [(0, '23.802')] +[2023-07-23 06:06:04,759][00397] Fps is (10 sec: 3277.9, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 4980736. Throughput: 0: 899.0. Samples: 1247432. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:06:04,762][00397] Avg episode reward: [(0, '23.312')] +[2023-07-23 06:06:09,759][00397] Fps is (10 sec: 2867.3, 60 sec: 3413.3, 300 sec: 3540.6). Total num frames: 4993024. Throughput: 0: 887.8. Samples: 1249464. Policy #0 lag: (min: 0.0, avg: 2.5, max: 5.0) +[2023-07-23 06:06:09,762][00397] Avg episode reward: [(0, '23.039')] +[2023-07-23 06:06:10,071][07585] Updated weights for policy 0, policy_version 1220 (0.0012) +[2023-07-23 06:06:14,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3413.4, 300 sec: 3554.5). Total num frames: 5009408. Throughput: 0: 815.3. Samples: 1253320. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:06:14,761][00397] Avg episode reward: [(0, '23.597')] +[2023-07-23 06:06:19,763][00397] Fps is (10 sec: 3275.4, 60 sec: 3413.5, 300 sec: 3568.3). Total num frames: 5025792. Throughput: 0: 745.5. Samples: 1257104. Policy #0 lag: (min: 0.0, avg: 2.5, max: 5.0) +[2023-07-23 06:06:19,767][00397] Avg episode reward: [(0, '23.810')] +[2023-07-23 06:06:24,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3276.8, 300 sec: 3540.6). Total num frames: 5033984. Throughput: 0: 734.4. Samples: 1259072. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:06:24,762][00397] Avg episode reward: [(0, '24.053')] +[2023-07-23 06:06:25,433][07585] Updated weights for policy 0, policy_version 1230 (0.0012) +[2023-07-23 06:06:29,759][00397] Fps is (10 sec: 2458.6, 60 sec: 3072.0, 300 sec: 3554.5). Total num frames: 5050368. Throughput: 0: 728.5. Samples: 1263776. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:06:29,765][00397] Avg episode reward: [(0, '24.023')] +[2023-07-23 06:06:34,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3072.0, 300 sec: 3554.5). Total num frames: 5070848. Throughput: 0: 751.1. Samples: 1269136. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:06:34,761][00397] Avg episode reward: [(0, '25.800')] +[2023-07-23 06:06:36,114][07585] Updated weights for policy 0, policy_version 1240 (0.0015) +[2023-07-23 06:06:39,759][00397] Fps is (10 sec: 4505.7, 60 sec: 3208.5, 300 sec: 3582.3). Total num frames: 5095424. Throughput: 0: 786.5. Samples: 1272760. Policy #0 lag: (min: 0.0, avg: 2.4, max: 6.0) +[2023-07-23 06:06:39,761][00397] Avg episode reward: [(0, '26.243')] +[2023-07-23 06:06:44,763][00397] Fps is (10 sec: 4094.2, 60 sec: 3208.3, 300 sec: 3568.3). Total num frames: 5111808. Throughput: 0: 832.6. Samples: 1280016. Policy #0 lag: (min: 0.0, avg: 2.4, max: 6.0) +[2023-07-23 06:06:44,766][00397] Avg episode reward: [(0, '26.697')] +[2023-07-23 06:06:44,773][07571] Saving new best policy, reward=26.697! +[2023-07-23 06:06:46,041][07585] Updated weights for policy 0, policy_version 1250 (0.0014) +[2023-07-23 06:06:49,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 5132288. Throughput: 0: 829.5. Samples: 1284760. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:06:49,766][00397] Avg episode reward: [(0, '26.902')] +[2023-07-23 06:06:49,769][07571] Saving new best policy, reward=26.902! +[2023-07-23 06:06:54,759][00397] Fps is (10 sec: 3688.0, 60 sec: 3345.3, 300 sec: 3582.3). Total num frames: 5148672. Throughput: 0: 839.6. Samples: 1287248. Policy #0 lag: (min: 0.0, avg: 1.5, max: 5.0) +[2023-07-23 06:06:54,761][00397] Avg episode reward: [(0, '27.048')] +[2023-07-23 06:06:54,776][07571] Saving new best policy, reward=27.048! +[2023-07-23 06:06:57,700][07585] Updated weights for policy 0, policy_version 1260 (0.0016) +[2023-07-23 06:06:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 5165056. Throughput: 0: 861.5. Samples: 1292088. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:06:59,763][00397] Avg episode reward: [(0, '27.476')] +[2023-07-23 06:06:59,774][07571] Saving new best policy, reward=27.476! +[2023-07-23 06:07:04,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 5181440. Throughput: 0: 887.0. Samples: 1297016. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:07:04,764][00397] Avg episode reward: [(0, '27.083')] +[2023-07-23 06:07:09,397][07585] Updated weights for policy 0, policy_version 1270 (0.0021) +[2023-07-23 06:07:09,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 5206016. Throughput: 0: 916.4. Samples: 1300312. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:07:09,761][00397] Avg episode reward: [(0, '25.855')] +[2023-07-23 06:07:14,761][00397] Fps is (10 sec: 4914.2, 60 sec: 3686.3, 300 sec: 3582.2). Total num frames: 5230592. Throughput: 0: 978.1. Samples: 1307792. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:07:14,764][00397] Avg episode reward: [(0, '25.954')] +[2023-07-23 06:07:14,776][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001277_5230592.pth... +[2023-07-23 06:07:14,931][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001069_4378624.pth +[2023-07-23 06:07:17,507][07585] Updated weights for policy 0, policy_version 1280 (0.0012) +[2023-07-23 06:07:19,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3686.7, 300 sec: 3582.3). Total num frames: 5246976. Throughput: 0: 982.0. Samples: 1313328. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:07:19,761][00397] Avg episode reward: [(0, '27.277')] +[2023-07-23 06:07:24,759][00397] Fps is (10 sec: 3277.3, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 5263360. Throughput: 0: 958.2. Samples: 1315880. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:07:24,767][00397] Avg episode reward: [(0, '25.722')] +[2023-07-23 06:07:29,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 5279744. Throughput: 0: 906.0. Samples: 1320784. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:07:29,761][00397] Avg episode reward: [(0, '26.016')] +[2023-07-23 06:07:31,526][07585] Updated weights for policy 0, policy_version 1290 (0.0015) +[2023-07-23 06:07:34,759][00397] Fps is (10 sec: 3277.0, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 5296128. Throughput: 0: 911.1. Samples: 1325760. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:07:34,762][00397] Avg episode reward: [(0, '27.065')] +[2023-07-23 06:07:39,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 5316608. Throughput: 0: 911.8. Samples: 1328280. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:07:39,766][00397] Avg episode reward: [(0, '27.411')] +[2023-07-23 06:07:40,652][07585] Updated weights for policy 0, policy_version 1300 (0.0013) +[2023-07-23 06:07:44,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3823.2, 300 sec: 3568.4). Total num frames: 5341184. Throughput: 0: 968.5. Samples: 1335672. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:07:44,761][00397] Avg episode reward: [(0, '27.590')] +[2023-07-23 06:07:44,768][07571] Saving new best policy, reward=27.590! +[2023-07-23 06:07:49,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 5361664. Throughput: 0: 1001.4. Samples: 1342080. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 06:07:49,761][00397] Avg episode reward: [(0, '25.786')] +[2023-07-23 06:07:50,479][07585] Updated weights for policy 0, policy_version 1310 (0.0013) +[2023-07-23 06:07:54,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 5378048. Throughput: 0: 983.6. Samples: 1344576. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:07:54,765][00397] Avg episode reward: [(0, '25.806')] +[2023-07-23 06:07:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 5394432. Throughput: 0: 927.9. Samples: 1349544. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:07:59,761][00397] Avg episode reward: [(0, '26.183')] +[2023-07-23 06:08:02,826][07585] Updated weights for policy 0, policy_version 1320 (0.0014) +[2023-07-23 06:08:04,765][00397] Fps is (10 sec: 3274.7, 60 sec: 3822.5, 300 sec: 3568.3). Total num frames: 5410816. Throughput: 0: 913.1. Samples: 1354424. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:08:04,773][00397] Avg episode reward: [(0, '25.977')] +[2023-07-23 06:08:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 5427200. Throughput: 0: 910.6. Samples: 1356856. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:08:09,763][00397] Avg episode reward: [(0, '26.306')] +[2023-07-23 06:08:13,778][07585] Updated weights for policy 0, policy_version 1330 (0.0015) +[2023-07-23 06:08:14,759][00397] Fps is (10 sec: 3688.8, 60 sec: 3618.3, 300 sec: 3540.6). Total num frames: 5447680. Throughput: 0: 941.2. Samples: 1363136. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:08:14,761][00397] Avg episode reward: [(0, '27.447')] +[2023-07-23 06:08:19,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 5476352. Throughput: 0: 992.2. Samples: 1370408. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:08:19,761][00397] Avg episode reward: [(0, '28.909')] +[2023-07-23 06:08:19,766][07571] Saving new best policy, reward=28.909! +[2023-07-23 06:08:23,445][07585] Updated weights for policy 0, policy_version 1340 (0.0012) +[2023-07-23 06:08:24,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3823.0, 300 sec: 3582.3). Total num frames: 5492736. Throughput: 0: 993.1. Samples: 1372968. Policy #0 lag: (min: 0.0, avg: 2.5, max: 4.0) +[2023-07-23 06:08:24,761][00397] Avg episode reward: [(0, '28.185')] +[2023-07-23 06:08:29,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 5500928. Throughput: 0: 912.7. Samples: 1376744. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:08:29,766][00397] Avg episode reward: [(0, '26.834')] +[2023-07-23 06:08:34,762][00397] Fps is (10 sec: 2047.3, 60 sec: 3617.9, 300 sec: 3568.4). Total num frames: 5513216. Throughput: 0: 857.7. Samples: 1380680. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:08:34,764][00397] Avg episode reward: [(0, '25.688')] +[2023-07-23 06:08:39,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 3554.5). Total num frames: 5525504. Throughput: 0: 843.9. Samples: 1382552. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:08:39,761][00397] Avg episode reward: [(0, '25.236')] +[2023-07-23 06:08:40,885][07585] Updated weights for policy 0, policy_version 1350 (0.0019) +[2023-07-23 06:08:44,759][00397] Fps is (10 sec: 2868.2, 60 sec: 3345.1, 300 sec: 3554.5). Total num frames: 5541888. Throughput: 0: 821.5. Samples: 1386512. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:08:44,766][00397] Avg episode reward: [(0, '25.650')] +[2023-07-23 06:08:49,760][00397] Fps is (10 sec: 3276.5, 60 sec: 3276.8, 300 sec: 3554.5). Total num frames: 5558272. Throughput: 0: 800.5. Samples: 1390440. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 06:08:49,764][00397] Avg episode reward: [(0, '24.142')] +[2023-07-23 06:08:53,994][07585] Updated weights for policy 0, policy_version 1360 (0.0019) +[2023-07-23 06:08:54,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3568.4). Total num frames: 5574656. Throughput: 0: 787.6. Samples: 1392296. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:08:54,761][00397] Avg episode reward: [(0, '24.468')] +[2023-07-23 06:08:59,759][00397] Fps is (10 sec: 3686.7, 60 sec: 3345.1, 300 sec: 3568.4). Total num frames: 5595136. Throughput: 0: 804.1. Samples: 1399320. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:08:59,761][00397] Avg episode reward: [(0, '23.873')] +[2023-07-23 06:09:02,921][07585] Updated weights for policy 0, policy_version 1370 (0.0012) +[2023-07-23 06:09:04,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3413.7, 300 sec: 3596.1). Total num frames: 5615616. Throughput: 0: 794.0. Samples: 1406136. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:09:04,761][00397] Avg episode reward: [(0, '24.030')] +[2023-07-23 06:09:09,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3481.6, 300 sec: 3610.0). Total num frames: 5636096. Throughput: 0: 792.0. Samples: 1408608. Policy #0 lag: (min: 0.0, avg: 2.3, max: 6.0) +[2023-07-23 06:09:09,761][00397] Avg episode reward: [(0, '24.843')] +[2023-07-23 06:09:14,578][07585] Updated weights for policy 0, policy_version 1380 (0.0012) +[2023-07-23 06:09:14,761][00397] Fps is (10 sec: 3685.5, 60 sec: 3413.2, 300 sec: 3596.3). Total num frames: 5652480. Throughput: 0: 818.1. Samples: 1413560. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:09:14,764][00397] Avg episode reward: [(0, '26.420')] +[2023-07-23 06:09:14,779][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001380_5652480.pth... +[2023-07-23 06:09:14,930][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001173_4804608.pth +[2023-07-23 06:09:19,760][00397] Fps is (10 sec: 2866.8, 60 sec: 3140.2, 300 sec: 3554.5). Total num frames: 5664768. Throughput: 0: 840.2. Samples: 1418488. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:09:19,762][00397] Avg episode reward: [(0, '26.030')] +[2023-07-23 06:09:24,759][00397] Fps is (10 sec: 2867.8, 60 sec: 3140.3, 300 sec: 3526.7). Total num frames: 5681152. Throughput: 0: 852.1. Samples: 1420896. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:09:24,766][00397] Avg episode reward: [(0, '26.004')] +[2023-07-23 06:09:26,487][07585] Updated weights for policy 0, policy_version 1390 (0.0019) +[2023-07-23 06:09:29,759][00397] Fps is (10 sec: 4096.5, 60 sec: 3413.3, 300 sec: 3554.5). Total num frames: 5705728. Throughput: 0: 900.1. Samples: 1427016. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 06:09:29,768][00397] Avg episode reward: [(0, '28.054')] +[2023-07-23 06:09:34,759][00397] Fps is (10 sec: 4915.3, 60 sec: 3618.3, 300 sec: 3582.3). Total num frames: 5730304. Throughput: 0: 974.8. Samples: 1434304. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:09:34,761][00397] Avg episode reward: [(0, '28.266')] +[2023-07-23 06:09:35,563][07585] Updated weights for policy 0, policy_version 1400 (0.0012) +[2023-07-23 06:09:39,759][00397] Fps is (10 sec: 4096.1, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 5746688. Throughput: 0: 1000.7. Samples: 1437328. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:09:39,763][00397] Avg episode reward: [(0, '28.090')] +[2023-07-23 06:09:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 5763072. Throughput: 0: 955.2. Samples: 1442304. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:09:44,764][00397] Avg episode reward: [(0, '27.319')] +[2023-07-23 06:09:46,769][07585] Updated weights for policy 0, policy_version 1410 (0.0015) +[2023-07-23 06:09:49,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3686.3, 300 sec: 3568.4). Total num frames: 5779456. Throughput: 0: 916.2. Samples: 1447368. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:09:49,769][00397] Avg episode reward: [(0, '27.846')] +[2023-07-23 06:09:54,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3540.6). Total num frames: 5799936. Throughput: 0: 915.0. Samples: 1449784. Policy #0 lag: (min: 0.0, avg: 2.3, max: 6.0) +[2023-07-23 06:09:54,773][00397] Avg episode reward: [(0, '26.321')] +[2023-07-23 06:09:58,784][07585] Updated weights for policy 0, policy_version 1420 (0.0021) +[2023-07-23 06:09:59,759][00397] Fps is (10 sec: 3687.3, 60 sec: 3686.4, 300 sec: 3540.6). Total num frames: 5816320. Throughput: 0: 918.8. Samples: 1454904. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:09:59,768][00397] Avg episode reward: [(0, '26.317')] +[2023-07-23 06:10:04,761][00397] Fps is (10 sec: 4504.5, 60 sec: 3822.8, 300 sec: 3582.2). Total num frames: 5844992. Throughput: 0: 971.2. Samples: 1462192. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:10:04,763][00397] Avg episode reward: [(0, '24.878')] +[2023-07-23 06:10:07,189][07585] Updated weights for policy 0, policy_version 1430 (0.0012) +[2023-07-23 06:10:09,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 5861376. Throughput: 0: 998.9. Samples: 1465848. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:10:09,761][00397] Avg episode reward: [(0, '24.035')] +[2023-07-23 06:10:14,759][00397] Fps is (10 sec: 3277.5, 60 sec: 3754.8, 300 sec: 3582.3). Total num frames: 5877760. Throughput: 0: 977.1. Samples: 1470984. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:10:14,761][00397] Avg episode reward: [(0, '25.280')] +[2023-07-23 06:10:19,583][07585] Updated weights for policy 0, policy_version 1440 (0.0017) +[2023-07-23 06:10:19,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.3, 300 sec: 3596.1). Total num frames: 5898240. Throughput: 0: 930.0. Samples: 1476152. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:10:19,761][00397] Avg episode reward: [(0, '25.889')] +[2023-07-23 06:10:24,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3554.5). Total num frames: 5914624. Throughput: 0: 917.7. Samples: 1478624. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 06:10:24,763][00397] Avg episode reward: [(0, '25.976')] +[2023-07-23 06:10:29,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3754.7, 300 sec: 3540.6). Total num frames: 5931008. Throughput: 0: 917.9. Samples: 1483608. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:10:29,769][00397] Avg episode reward: [(0, '27.753')] +[2023-07-23 06:10:31,251][07585] Updated weights for policy 0, policy_version 1450 (0.0014) +[2023-07-23 06:10:34,759][00397] Fps is (10 sec: 4096.1, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 5955584. Throughput: 0: 951.3. Samples: 1490176. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:10:34,761][00397] Avg episode reward: [(0, '27.235')] +[2023-07-23 06:10:39,466][07585] Updated weights for policy 0, policy_version 1460 (0.0020) +[2023-07-23 06:10:39,759][00397] Fps is (10 sec: 4915.4, 60 sec: 3891.2, 300 sec: 3596.1). Total num frames: 5980160. Throughput: 0: 979.4. Samples: 1493856. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:10:39,761][00397] Avg episode reward: [(0, '28.674')] +[2023-07-23 06:10:44,762][00397] Fps is (10 sec: 4094.6, 60 sec: 3891.0, 300 sec: 3610.0). Total num frames: 5996544. Throughput: 0: 1004.7. Samples: 1500120. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:10:44,765][00397] Avg episode reward: [(0, '26.675')] +[2023-07-23 06:10:49,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.4, 300 sec: 3610.1). Total num frames: 6012928. Throughput: 0: 954.0. Samples: 1505120. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:10:49,766][00397] Avg episode reward: [(0, '25.774')] +[2023-07-23 06:10:52,172][07585] Updated weights for policy 0, policy_version 1470 (0.0020) +[2023-07-23 06:10:54,759][00397] Fps is (10 sec: 3277.9, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 6029312. Throughput: 0: 922.0. Samples: 1507336. Policy #0 lag: (min: 0.0, avg: 1.3, max: 3.0) +[2023-07-23 06:10:54,767][00397] Avg episode reward: [(0, '24.690')] +[2023-07-23 06:10:59,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3754.7, 300 sec: 3596.1). Total num frames: 6041600. Throughput: 0: 893.2. Samples: 1511176. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:10:59,762][00397] Avg episode reward: [(0, '24.489')] +[2023-07-23 06:11:04,764][00397] Fps is (10 sec: 2046.9, 60 sec: 3413.2, 300 sec: 3582.2). Total num frames: 6049792. Throughput: 0: 860.3. Samples: 1514872. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:11:04,769][00397] Avg episode reward: [(0, '24.147')] +[2023-07-23 06:11:06,842][07585] Updated weights for policy 0, policy_version 1480 (0.0013) +[2023-07-23 06:11:09,761][00397] Fps is (10 sec: 2457.0, 60 sec: 3413.2, 300 sec: 3582.2). Total num frames: 6066176. Throughput: 0: 846.9. Samples: 1516736. Policy #0 lag: (min: 1.0, avg: 2.5, max: 5.0) +[2023-07-23 06:11:09,766][00397] Avg episode reward: [(0, '25.278')] +[2023-07-23 06:11:14,759][00397] Fps is (10 sec: 3278.4, 60 sec: 3413.3, 300 sec: 3582.3). Total num frames: 6082560. Throughput: 0: 843.7. Samples: 1521576. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:11:14,764][00397] Avg episode reward: [(0, '25.439')] +[2023-07-23 06:11:14,775][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001485_6082560.pth... +[2023-07-23 06:11:14,964][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001277_5230592.pth +[2023-07-23 06:11:19,759][00397] Fps is (10 sec: 3277.6, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 6098944. Throughput: 0: 809.8. Samples: 1526616. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:11:19,761][00397] Avg episode reward: [(0, '26.057')] +[2023-07-23 06:11:19,956][07585] Updated weights for policy 0, policy_version 1490 (0.0016) +[2023-07-23 06:11:24,759][00397] Fps is (10 sec: 3277.0, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 6115328. Throughput: 0: 781.9. Samples: 1529040. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:11:24,761][00397] Avg episode reward: [(0, '26.130')] +[2023-07-23 06:11:29,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3345.0, 300 sec: 3596.1). Total num frames: 6131712. Throughput: 0: 753.4. Samples: 1534024. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:11:29,770][00397] Avg episode reward: [(0, '26.556')] +[2023-07-23 06:11:32,992][07585] Updated weights for policy 0, policy_version 1500 (0.0012) +[2023-07-23 06:11:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3568.4). Total num frames: 6148096. Throughput: 0: 750.2. Samples: 1538880. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 06:11:34,761][00397] Avg episode reward: [(0, '28.028')] +[2023-07-23 06:11:39,759][00397] Fps is (10 sec: 3277.6, 60 sec: 3072.0, 300 sec: 3568.4). Total num frames: 6164480. Throughput: 0: 758.0. Samples: 1541448. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:11:39,761][00397] Avg episode reward: [(0, '26.494')] +[2023-07-23 06:11:43,627][07585] Updated weights for policy 0, policy_version 1510 (0.0024) +[2023-07-23 06:11:44,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3208.7, 300 sec: 3582.3). Total num frames: 6189056. Throughput: 0: 806.9. Samples: 1547488. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:11:44,762][00397] Avg episode reward: [(0, '27.076')] +[2023-07-23 06:11:49,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 6213632. Throughput: 0: 888.3. Samples: 1554840. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:11:49,761][00397] Avg episode reward: [(0, '25.540')] +[2023-07-23 06:11:52,988][07585] Updated weights for policy 0, policy_version 1520 (0.0014) +[2023-07-23 06:11:54,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 6230016. Throughput: 0: 914.2. Samples: 1557872. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:11:54,761][00397] Avg episode reward: [(0, '26.673')] +[2023-07-23 06:11:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3610.0). Total num frames: 6246400. Throughput: 0: 918.4. Samples: 1562904. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:11:59,762][00397] Avg episode reward: [(0, '27.266')] +[2023-07-23 06:12:04,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3550.2, 300 sec: 3582.3). Total num frames: 6262784. Throughput: 0: 916.1. Samples: 1567840. Policy #0 lag: (min: 0.0, avg: 1.6, max: 5.0) +[2023-07-23 06:12:04,766][00397] Avg episode reward: [(0, '27.823')] +[2023-07-23 06:12:04,942][07585] Updated weights for policy 0, policy_version 1530 (0.0012) +[2023-07-23 06:12:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3550.0, 300 sec: 3554.5). Total num frames: 6279168. Throughput: 0: 916.1. Samples: 1570264. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:12:09,765][00397] Avg episode reward: [(0, '27.258')] +[2023-07-23 06:12:14,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3568.4). Total num frames: 6299648. Throughput: 0: 917.7. Samples: 1575320. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:12:14,761][00397] Avg episode reward: [(0, '26.907')] +[2023-07-23 06:12:16,183][07585] Updated weights for policy 0, policy_version 1540 (0.0014) +[2023-07-23 06:12:19,759][00397] Fps is (10 sec: 4915.0, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 6328320. Throughput: 0: 973.0. Samples: 1582664. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:12:19,761][00397] Avg episode reward: [(0, '26.286')] +[2023-07-23 06:12:24,761][00397] Fps is (10 sec: 4504.5, 60 sec: 3822.8, 300 sec: 3610.0). Total num frames: 6344704. Throughput: 0: 998.3. Samples: 1586376. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:12:24,764][00397] Avg episode reward: [(0, '27.232')] +[2023-07-23 06:12:24,882][07585] Updated weights for policy 0, policy_version 1550 (0.0012) +[2023-07-23 06:12:29,760][00397] Fps is (10 sec: 3686.0, 60 sec: 3891.3, 300 sec: 3623.9). Total num frames: 6365184. Throughput: 0: 980.8. Samples: 1591624. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:12:29,766][00397] Avg episode reward: [(0, '26.973')] +[2023-07-23 06:12:34,759][00397] Fps is (10 sec: 3687.3, 60 sec: 3891.2, 300 sec: 3610.0). Total num frames: 6381568. Throughput: 0: 928.7. Samples: 1596632. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:12:34,762][00397] Avg episode reward: [(0, '25.866')] +[2023-07-23 06:12:38,039][07585] Updated weights for policy 0, policy_version 1560 (0.0013) +[2023-07-23 06:12:39,759][00397] Fps is (10 sec: 3277.2, 60 sec: 3891.2, 300 sec: 3582.3). Total num frames: 6397952. Throughput: 0: 915.6. Samples: 1599072. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:12:39,766][00397] Avg episode reward: [(0, '25.729')] +[2023-07-23 06:12:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 6414336. Throughput: 0: 913.8. Samples: 1604024. Policy #0 lag: (min: 0.0, avg: 2.2, max: 6.0) +[2023-07-23 06:12:44,765][00397] Avg episode reward: [(0, '25.459')] +[2023-07-23 06:12:47,872][07585] Updated weights for policy 0, policy_version 1570 (0.0026) +[2023-07-23 06:12:49,759][00397] Fps is (10 sec: 3686.3, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 6434816. Throughput: 0: 948.8. Samples: 1610536. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:12:49,762][00397] Avg episode reward: [(0, '26.017')] +[2023-07-23 06:12:54,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 6459392. Throughput: 0: 974.4. Samples: 1614112. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:12:54,764][00397] Avg episode reward: [(0, '25.429')] +[2023-07-23 06:12:57,935][07585] Updated weights for policy 0, policy_version 1580 (0.0012) +[2023-07-23 06:12:59,759][00397] Fps is (10 sec: 4505.8, 60 sec: 3891.2, 300 sec: 3624.0). Total num frames: 6479872. Throughput: 0: 1001.6. Samples: 1620392. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 06:12:59,763][00397] Avg episode reward: [(0, '25.578')] +[2023-07-23 06:13:04,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3623.9). Total num frames: 6496256. Throughput: 0: 947.2. Samples: 1625288. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:13:04,761][00397] Avg episode reward: [(0, '24.529')] +[2023-07-23 06:13:09,492][07585] Updated weights for policy 0, policy_version 1590 (0.0020) +[2023-07-23 06:13:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 3610.0). Total num frames: 6512640. Throughput: 0: 917.6. Samples: 1627664. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:13:09,763][00397] Avg episode reward: [(0, '25.201')] +[2023-07-23 06:13:14,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3568.4). Total num frames: 6529024. Throughput: 0: 911.7. Samples: 1632648. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:13:14,766][00397] Avg episode reward: [(0, '25.660')] +[2023-07-23 06:13:14,781][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001594_6529024.pth... +[2023-07-23 06:13:14,937][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001380_5652480.pth +[2023-07-23 06:13:19,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 6545408. Throughput: 0: 920.5. Samples: 1638056. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:13:19,762][00397] Avg episode reward: [(0, '26.312')] +[2023-07-23 06:13:21,738][07585] Updated weights for policy 0, policy_version 1600 (0.0012) +[2023-07-23 06:13:24,759][00397] Fps is (10 sec: 3686.5, 60 sec: 3686.5, 300 sec: 3610.0). Total num frames: 6565888. Throughput: 0: 934.8. Samples: 1641136. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:13:24,763][00397] Avg episode reward: [(0, '26.094')] +[2023-07-23 06:13:29,759][00397] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3610.1). Total num frames: 6578176. Throughput: 0: 931.7. Samples: 1645952. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:13:29,766][00397] Avg episode reward: [(0, '25.322')] +[2023-07-23 06:13:34,708][07585] Updated weights for policy 0, policy_version 1610 (0.0012) +[2023-07-23 06:13:34,764][00397] Fps is (10 sec: 2865.6, 60 sec: 3549.5, 300 sec: 3623.9). Total num frames: 6594560. Throughput: 0: 873.0. Samples: 1649824. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:13:34,767][00397] Avg episode reward: [(0, '25.704')] +[2023-07-23 06:13:39,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3413.3, 300 sec: 3596.1). Total num frames: 6602752. Throughput: 0: 836.4. Samples: 1651752. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:13:39,766][00397] Avg episode reward: [(0, '25.697')] +[2023-07-23 06:13:44,759][00397] Fps is (10 sec: 2049.1, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 6615040. Throughput: 0: 784.5. Samples: 1655696. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:13:44,761][00397] Avg episode reward: [(0, '25.977')] +[2023-07-23 06:13:49,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 6631424. Throughput: 0: 761.6. Samples: 1659560. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 06:13:49,761][00397] Avg episode reward: [(0, '27.111')] +[2023-07-23 06:13:51,598][07585] Updated weights for policy 0, policy_version 1620 (0.0012) +[2023-07-23 06:13:54,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 3568.4). Total num frames: 6647808. Throughput: 0: 761.6. Samples: 1661936. Policy #0 lag: (min: 0.0, avg: 1.6, max: 5.0) +[2023-07-23 06:13:54,761][00397] Avg episode reward: [(0, '26.825')] +[2023-07-23 06:13:59,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3140.3, 300 sec: 3568.4). Total num frames: 6668288. Throughput: 0: 773.0. Samples: 1667432. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:13:59,761][00397] Avg episode reward: [(0, '25.894')] +[2023-07-23 06:14:00,824][07585] Updated weights for policy 0, policy_version 1630 (0.0014) +[2023-07-23 06:14:04,760][00397] Fps is (10 sec: 4914.6, 60 sec: 3345.0, 300 sec: 3596.1). Total num frames: 6696960. Throughput: 0: 816.2. Samples: 1674784. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:14:04,769][00397] Avg episode reward: [(0, '25.362')] +[2023-07-23 06:14:09,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3345.1, 300 sec: 3596.2). Total num frames: 6713344. Throughput: 0: 826.7. Samples: 1678336. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:14:09,769][00397] Avg episode reward: [(0, '26.740')] +[2023-07-23 06:14:10,619][07585] Updated weights for policy 0, policy_version 1640 (0.0013) +[2023-07-23 06:14:14,759][00397] Fps is (10 sec: 3277.2, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 6729728. Throughput: 0: 830.0. Samples: 1683304. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:14:14,761][00397] Avg episode reward: [(0, '27.132')] +[2023-07-23 06:14:19,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3610.0). Total num frames: 6746112. Throughput: 0: 853.3. Samples: 1688216. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:14:19,761][00397] Avg episode reward: [(0, '27.091')] +[2023-07-23 06:14:22,946][07585] Updated weights for policy 0, policy_version 1650 (0.0019) +[2023-07-23 06:14:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 6762496. Throughput: 0: 867.4. Samples: 1690784. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:14:24,768][00397] Avg episode reward: [(0, '27.088')] +[2023-07-23 06:14:29,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3554.5). Total num frames: 6778880. Throughput: 0: 888.9. Samples: 1695696. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:14:29,763][00397] Avg episode reward: [(0, '27.429')] +[2023-07-23 06:14:33,928][07585] Updated weights for policy 0, policy_version 1660 (0.0018) +[2023-07-23 06:14:34,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3481.9, 300 sec: 3582.3). Total num frames: 6803456. Throughput: 0: 954.7. Samples: 1702520. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:14:34,761][00397] Avg episode reward: [(0, '25.568')] +[2023-07-23 06:14:39,759][00397] Fps is (10 sec: 4915.1, 60 sec: 3754.7, 300 sec: 3610.0). Total num frames: 6828032. Throughput: 0: 982.6. Samples: 1706152. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:14:39,761][00397] Avg episode reward: [(0, '24.012')] +[2023-07-23 06:14:42,871][07585] Updated weights for policy 0, policy_version 1670 (0.0012) +[2023-07-23 06:14:44,761][00397] Fps is (10 sec: 4095.3, 60 sec: 3822.8, 300 sec: 3610.0). Total num frames: 6844416. Throughput: 0: 990.7. Samples: 1712016. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:14:44,765][00397] Avg episode reward: [(0, '24.484')] +[2023-07-23 06:14:49,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 6860800. Throughput: 0: 937.8. Samples: 1716984. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:14:49,771][00397] Avg episode reward: [(0, '24.332')] +[2023-07-23 06:14:54,759][00397] Fps is (10 sec: 3277.3, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 6877184. Throughput: 0: 913.4. Samples: 1719440. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:14:54,761][00397] Avg episode reward: [(0, '23.536')] +[2023-07-23 06:14:55,272][07585] Updated weights for policy 0, policy_version 1680 (0.0012) +[2023-07-23 06:14:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3554.5). Total num frames: 6893568. Throughput: 0: 916.1. Samples: 1724528. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:14:59,761][00397] Avg episode reward: [(0, '22.201')] +[2023-07-23 06:15:04,761][00397] Fps is (10 sec: 4095.0, 60 sec: 3686.3, 300 sec: 3582.2). Total num frames: 6918144. Throughput: 0: 934.0. Samples: 1730248. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:15:04,768][00397] Avg episode reward: [(0, '25.106')] +[2023-07-23 06:15:06,122][07585] Updated weights for policy 0, policy_version 1690 (0.0013) +[2023-07-23 06:15:09,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3596.2). Total num frames: 6938624. Throughput: 0: 956.4. Samples: 1733824. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:15:09,761][00397] Avg episode reward: [(0, '24.960')] +[2023-07-23 06:15:14,759][00397] Fps is (10 sec: 4097.0, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 6959104. Throughput: 0: 1004.3. Samples: 1740888. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:15:14,761][00397] Avg episode reward: [(0, '25.903')] +[2023-07-23 06:15:14,771][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001699_6959104.pth... +[2023-07-23 06:15:14,939][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001485_6082560.pth +[2023-07-23 06:15:15,280][07585] Updated weights for policy 0, policy_version 1700 (0.0013) +[2023-07-23 06:15:19,760][00397] Fps is (10 sec: 3686.1, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 6975488. Throughput: 0: 958.7. Samples: 1745664. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:15:19,766][00397] Avg episode reward: [(0, '25.987')] +[2023-07-23 06:15:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3596.2). Total num frames: 6991872. Throughput: 0: 932.6. Samples: 1748120. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:15:24,765][00397] Avg episode reward: [(0, '26.724')] +[2023-07-23 06:15:27,787][07585] Updated weights for policy 0, policy_version 1710 (0.0013) +[2023-07-23 06:15:29,759][00397] Fps is (10 sec: 3277.1, 60 sec: 3822.9, 300 sec: 3568.4). Total num frames: 7008256. Throughput: 0: 912.9. Samples: 1753096. Policy #0 lag: (min: 0.0, avg: 2.5, max: 5.0) +[2023-07-23 06:15:29,767][00397] Avg episode reward: [(0, '27.627')] +[2023-07-23 06:15:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3540.6). Total num frames: 7024640. Throughput: 0: 911.8. Samples: 1758016. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:15:34,765][00397] Avg episode reward: [(0, '27.046')] +[2023-07-23 06:15:38,862][07585] Updated weights for policy 0, policy_version 1720 (0.0015) +[2023-07-23 06:15:39,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 7053312. Throughput: 0: 931.2. Samples: 1761344. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:15:39,764][00397] Avg episode reward: [(0, '26.097')] +[2023-07-23 06:15:44,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3823.0, 300 sec: 3596.1). Total num frames: 7073792. Throughput: 0: 981.0. Samples: 1768672. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:15:44,769][00397] Avg episode reward: [(0, '23.245')] +[2023-07-23 06:15:47,592][07585] Updated weights for policy 0, policy_version 1730 (0.0016) +[2023-07-23 06:15:49,760][00397] Fps is (10 sec: 3276.4, 60 sec: 3754.6, 300 sec: 3582.3). Total num frames: 7086080. Throughput: 0: 978.2. Samples: 1774264. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:15:49,763][00397] Avg episode reward: [(0, '23.618')] +[2023-07-23 06:15:54,763][00397] Fps is (10 sec: 2866.0, 60 sec: 3754.4, 300 sec: 3596.1). Total num frames: 7102464. Throughput: 0: 947.6. Samples: 1776472. Policy #0 lag: (min: 0.0, avg: 2.4, max: 5.0) +[2023-07-23 06:15:54,771][00397] Avg episode reward: [(0, '23.638')] +[2023-07-23 06:15:59,761][00397] Fps is (10 sec: 3276.3, 60 sec: 3754.5, 300 sec: 3624.0). Total num frames: 7118848. Throughput: 0: 876.2. Samples: 1780320. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:15:59,764][00397] Avg episode reward: [(0, '21.986')] +[2023-07-23 06:16:04,051][07585] Updated weights for policy 0, policy_version 1740 (0.0015) +[2023-07-23 06:16:04,760][00397] Fps is (10 sec: 2868.0, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 7131136. Throughput: 0: 854.6. Samples: 1784120. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:16:04,763][00397] Avg episode reward: [(0, '22.277')] +[2023-07-23 06:16:09,759][00397] Fps is (10 sec: 2048.5, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 7139328. Throughput: 0: 843.7. Samples: 1786088. Policy #0 lag: (min: 0.0, avg: 2.4, max: 4.0) +[2023-07-23 06:16:09,761][00397] Avg episode reward: [(0, '23.398')] +[2023-07-23 06:16:14,761][00397] Fps is (10 sec: 2457.4, 60 sec: 3276.7, 300 sec: 3582.2). Total num frames: 7155712. Throughput: 0: 817.0. Samples: 1789864. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 06:16:14,763][00397] Avg episode reward: [(0, '23.468')] +[2023-07-23 06:16:17,652][07585] Updated weights for policy 0, policy_version 1750 (0.0015) +[2023-07-23 06:16:19,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 7172096. Throughput: 0: 812.3. Samples: 1794568. Policy #0 lag: (min: 0.0, avg: 2.2, max: 4.0) +[2023-07-23 06:16:19,765][00397] Avg episode reward: [(0, '25.733')] +[2023-07-23 06:16:24,759][00397] Fps is (10 sec: 3687.3, 60 sec: 3345.1, 300 sec: 3596.2). Total num frames: 7192576. Throughput: 0: 813.3. Samples: 1797944. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:16:24,761][00397] Avg episode reward: [(0, '26.880')] +[2023-07-23 06:16:27,393][07585] Updated weights for policy 0, policy_version 1760 (0.0012) +[2023-07-23 06:16:29,763][00397] Fps is (10 sec: 3684.7, 60 sec: 3344.8, 300 sec: 3596.1). Total num frames: 7208960. Throughput: 0: 780.7. Samples: 1803808. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:16:29,767][00397] Avg episode reward: [(0, '27.051')] +[2023-07-23 06:16:34,764][00397] Fps is (10 sec: 3684.4, 60 sec: 3413.0, 300 sec: 3610.0). Total num frames: 7229440. Throughput: 0: 769.9. Samples: 1808912. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:16:34,767][00397] Avg episode reward: [(0, '27.564')] +[2023-07-23 06:16:39,759][00397] Fps is (10 sec: 3688.1, 60 sec: 3208.5, 300 sec: 3582.3). Total num frames: 7245824. Throughput: 0: 776.1. Samples: 1811392. Policy #0 lag: (min: 0.0, avg: 2.2, max: 5.0) +[2023-07-23 06:16:39,761][00397] Avg episode reward: [(0, '28.173')] +[2023-07-23 06:16:40,468][07585] Updated weights for policy 0, policy_version 1770 (0.0012) +[2023-07-23 06:16:44,760][00397] Fps is (10 sec: 3278.6, 60 sec: 3140.3, 300 sec: 3554.5). Total num frames: 7262208. Throughput: 0: 800.8. Samples: 1816352. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:16:44,764][00397] Avg episode reward: [(0, '27.358')] +[2023-07-23 06:16:49,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3276.9, 300 sec: 3568.4). Total num frames: 7282688. Throughput: 0: 844.1. Samples: 1822104. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:16:49,763][00397] Avg episode reward: [(0, '27.728')] +[2023-07-23 06:16:50,788][07585] Updated weights for policy 0, policy_version 1780 (0.0016) +[2023-07-23 06:16:54,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3413.6, 300 sec: 3596.1). Total num frames: 7307264. Throughput: 0: 881.6. Samples: 1825760. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:16:54,771][00397] Avg episode reward: [(0, '26.415')] +[2023-07-23 06:16:59,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3413.5, 300 sec: 3596.1). Total num frames: 7323648. Throughput: 0: 951.7. Samples: 1832688. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:16:59,766][00397] Avg episode reward: [(0, '26.690')] +[2023-07-23 06:17:01,048][07585] Updated weights for policy 0, policy_version 1790 (0.0019) +[2023-07-23 06:17:04,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3550.0, 300 sec: 3610.0). Total num frames: 7344128. Throughput: 0: 958.9. Samples: 1837720. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:17:04,768][00397] Avg episode reward: [(0, '27.560')] +[2023-07-23 06:17:09,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 7360512. Throughput: 0: 939.2. Samples: 1840208. Policy #0 lag: (min: 0.0, avg: 2.4, max: 6.0) +[2023-07-23 06:17:09,766][00397] Avg episode reward: [(0, '28.016')] +[2023-07-23 06:17:13,212][07585] Updated weights for policy 0, policy_version 1800 (0.0012) +[2023-07-23 06:17:14,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.5, 300 sec: 3554.5). Total num frames: 7376896. Throughput: 0: 919.9. Samples: 1845200. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:17:14,768][00397] Avg episode reward: [(0, '28.789')] +[2023-07-23 06:17:14,780][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001801_7376896.pth... +[2023-07-23 06:17:14,983][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001594_6529024.pth +[2023-07-23 06:17:19,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 7397376. Throughput: 0: 913.7. Samples: 1850024. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:17:19,761][00397] Avg episode reward: [(0, '28.176')] +[2023-07-23 06:17:23,191][07585] Updated weights for policy 0, policy_version 1810 (0.0018) +[2023-07-23 06:17:24,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 7417856. Throughput: 0: 933.9. Samples: 1853416. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:17:24,761][00397] Avg episode reward: [(0, '29.073')] +[2023-07-23 06:17:24,777][07571] Saving new best policy, reward=29.073! +[2023-07-23 06:17:29,762][00397] Fps is (10 sec: 4504.4, 60 sec: 3891.3, 300 sec: 3596.1). Total num frames: 7442432. Throughput: 0: 988.6. Samples: 1860840. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 06:17:29,764][00397] Avg episode reward: [(0, '28.125')] +[2023-07-23 06:17:32,679][07585] Updated weights for policy 0, policy_version 1820 (0.0013) +[2023-07-23 06:17:34,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3823.3, 300 sec: 3596.1). Total num frames: 7458816. Throughput: 0: 987.2. Samples: 1866528. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:17:34,765][00397] Avg episode reward: [(0, '26.556')] +[2023-07-23 06:17:39,759][00397] Fps is (10 sec: 3277.7, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 7475200. Throughput: 0: 960.7. Samples: 1868992. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 06:17:39,766][00397] Avg episode reward: [(0, '26.823')] +[2023-07-23 06:17:44,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 7491584. Throughput: 0: 919.1. Samples: 1874048. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:17:44,762][00397] Avg episode reward: [(0, '26.029')] +[2023-07-23 06:17:45,346][07585] Updated weights for policy 0, policy_version 1830 (0.0017) +[2023-07-23 06:17:49,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3754.7, 300 sec: 3554.5). Total num frames: 7507968. Throughput: 0: 919.5. Samples: 1879096. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:17:49,765][00397] Avg episode reward: [(0, '25.811')] +[2023-07-23 06:17:54,759][00397] Fps is (10 sec: 4096.2, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 7532544. Throughput: 0: 916.3. Samples: 1881440. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 06:17:54,764][00397] Avg episode reward: [(0, '24.835')] +[2023-07-23 06:17:55,532][07585] Updated weights for policy 0, policy_version 1840 (0.0019) +[2023-07-23 06:17:59,759][00397] Fps is (10 sec: 4505.7, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 7553024. Throughput: 0: 966.2. Samples: 1888680. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:17:59,763][00397] Avg episode reward: [(0, '25.642')] +[2023-07-23 06:18:04,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3596.2). Total num frames: 7573504. Throughput: 0: 1008.7. Samples: 1895416. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:18:04,766][00397] Avg episode reward: [(0, '27.376')] +[2023-07-23 06:18:05,336][07585] Updated weights for policy 0, policy_version 1850 (0.0015) +[2023-07-23 06:18:09,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3596.2). Total num frames: 7589888. Throughput: 0: 987.4. Samples: 1897848. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:18:09,761][00397] Avg episode reward: [(0, '28.204')] +[2023-07-23 06:18:14,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3596.2). Total num frames: 7606272. Throughput: 0: 932.1. Samples: 1902784. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:18:14,766][00397] Avg episode reward: [(0, '29.241')] +[2023-07-23 06:18:14,778][07571] Saving new best policy, reward=29.241! +[2023-07-23 06:18:17,033][07585] Updated weights for policy 0, policy_version 1860 (0.0017) +[2023-07-23 06:18:19,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 7622656. Throughput: 0: 916.4. Samples: 1907768. Policy #0 lag: (min: 0.0, avg: 0.6, max: 3.0) +[2023-07-23 06:18:19,763][00397] Avg episode reward: [(0, '29.542')] +[2023-07-23 06:18:19,765][07571] Saving new best policy, reward=29.542! +[2023-07-23 06:18:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 7639040. Throughput: 0: 912.0. Samples: 1910032. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:18:24,764][00397] Avg episode reward: [(0, '29.281')] +[2023-07-23 06:18:29,762][00397] Fps is (10 sec: 3275.8, 60 sec: 3549.8, 300 sec: 3596.2). Total num frames: 7655424. Throughput: 0: 907.0. Samples: 1914864. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:18:29,764][00397] Avg episode reward: [(0, '29.097')] +[2023-07-23 06:18:30,320][07585] Updated weights for policy 0, policy_version 1870 (0.0023) +[2023-07-23 06:18:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3623.9). Total num frames: 7671808. Throughput: 0: 907.4. Samples: 1919928. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:18:34,764][00397] Avg episode reward: [(0, '28.517')] +[2023-07-23 06:18:39,759][00397] Fps is (10 sec: 2868.0, 60 sec: 3481.6, 300 sec: 3623.9). Total num frames: 7684096. Throughput: 0: 904.0. Samples: 1922120. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:18:39,762][00397] Avg episode reward: [(0, '27.929')] +[2023-07-23 06:18:43,769][07585] Updated weights for policy 0, policy_version 1880 (0.0012) +[2023-07-23 06:18:44,759][00397] Fps is (10 sec: 2867.1, 60 sec: 3481.6, 300 sec: 3623.9). Total num frames: 7700480. Throughput: 0: 828.4. Samples: 1925960. Policy #0 lag: (min: 0.0, avg: 2.3, max: 4.0) +[2023-07-23 06:18:44,765][00397] Avg episode reward: [(0, '27.692')] +[2023-07-23 06:18:49,763][00397] Fps is (10 sec: 3275.6, 60 sec: 3481.4, 300 sec: 3623.9). Total num frames: 7716864. Throughput: 0: 766.5. Samples: 1929912. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:18:49,765][00397] Avg episode reward: [(0, '26.739')] +[2023-07-23 06:18:54,762][00397] Fps is (10 sec: 2866.4, 60 sec: 3276.6, 300 sec: 3596.1). Total num frames: 7729152. Throughput: 0: 759.6. Samples: 1932032. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:18:54,764][00397] Avg episode reward: [(0, '27.220')] +[2023-07-23 06:18:58,408][07585] Updated weights for policy 0, policy_version 1890 (0.0015) +[2023-07-23 06:18:59,759][00397] Fps is (10 sec: 2868.4, 60 sec: 3208.5, 300 sec: 3554.5). Total num frames: 7745536. Throughput: 0: 762.5. Samples: 1937096. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:18:59,768][00397] Avg episode reward: [(0, '26.614')] +[2023-07-23 06:19:04,759][00397] Fps is (10 sec: 3687.7, 60 sec: 3208.5, 300 sec: 3568.4). Total num frames: 7766016. Throughput: 0: 775.6. Samples: 1942672. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:19:04,761][00397] Avg episode reward: [(0, '27.439')] +[2023-07-23 06:19:07,705][07585] Updated weights for policy 0, policy_version 1900 (0.0012) +[2023-07-23 06:19:09,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 7786496. Throughput: 0: 806.6. Samples: 1946328. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:19:09,766][00397] Avg episode reward: [(0, '26.721')] +[2023-07-23 06:19:14,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3413.3, 300 sec: 3610.0). Total num frames: 7811072. Throughput: 0: 860.3. Samples: 1953576. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:19:14,761][00397] Avg episode reward: [(0, '25.937')] +[2023-07-23 06:19:14,775][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001907_7811072.pth... +[2023-07-23 06:19:14,917][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001699_6959104.pth +[2023-07-23 06:19:18,562][07585] Updated weights for policy 0, policy_version 1910 (0.0018) +[2023-07-23 06:19:19,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3413.3, 300 sec: 3610.0). Total num frames: 7827456. Throughput: 0: 856.2. Samples: 1958456. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:19:19,762][00397] Avg episode reward: [(0, '26.374')] +[2023-07-23 06:19:24,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3345.1, 300 sec: 3596.1). Total num frames: 7839744. Throughput: 0: 861.3. Samples: 1960880. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:19:24,761][00397] Avg episode reward: [(0, '25.530')] +[2023-07-23 06:19:29,764][00397] Fps is (10 sec: 3275.0, 60 sec: 3413.2, 300 sec: 3582.2). Total num frames: 7860224. Throughput: 0: 887.2. Samples: 1965888. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:19:29,768][00397] Avg episode reward: [(0, '25.904')] +[2023-07-23 06:19:30,514][07585] Updated weights for policy 0, policy_version 1920 (0.0012) +[2023-07-23 06:19:34,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3554.5). Total num frames: 7876608. Throughput: 0: 909.9. Samples: 1970856. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:19:34,763][00397] Avg episode reward: [(0, '27.094')] +[2023-07-23 06:19:39,759][00397] Fps is (10 sec: 4098.2, 60 sec: 3618.2, 300 sec: 3582.3). Total num frames: 7901184. Throughput: 0: 934.5. Samples: 1974080. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:19:39,764][00397] Avg episode reward: [(0, '26.298')] +[2023-07-23 06:19:40,862][07585] Updated weights for policy 0, policy_version 1930 (0.0019) +[2023-07-23 06:19:44,761][00397] Fps is (10 sec: 4914.0, 60 sec: 3754.5, 300 sec: 3610.0). Total num frames: 7925760. Throughput: 0: 982.0. Samples: 1981288. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:19:44,764][00397] Avg episode reward: [(0, '26.696')] +[2023-07-23 06:19:49,711][07585] Updated weights for policy 0, policy_version 1940 (0.0012) +[2023-07-23 06:19:49,761][00397] Fps is (10 sec: 4504.5, 60 sec: 3823.0, 300 sec: 3623.9). Total num frames: 7946240. Throughput: 0: 990.5. Samples: 1987248. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:19:49,768][00397] Avg episode reward: [(0, '26.341')] +[2023-07-23 06:19:54,761][00397] Fps is (10 sec: 3276.8, 60 sec: 3823.0, 300 sec: 3610.0). Total num frames: 7958528. Throughput: 0: 964.2. Samples: 1989720. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:19:54,764][00397] Avg episode reward: [(0, '27.730')] +[2023-07-23 06:19:59,761][00397] Fps is (10 sec: 2867.3, 60 sec: 3822.8, 300 sec: 3582.3). Total num frames: 7974912. Throughput: 0: 912.8. Samples: 1994656. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:19:59,763][00397] Avg episode reward: [(0, '27.615')] +[2023-07-23 06:20:02,635][07585] Updated weights for policy 0, policy_version 1950 (0.0012) +[2023-07-23 06:20:04,759][00397] Fps is (10 sec: 3277.6, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 7991296. Throughput: 0: 913.6. Samples: 1999568. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:20:04,766][00397] Avg episode reward: [(0, '27.678')] +[2023-07-23 06:20:09,759][00397] Fps is (10 sec: 3687.0, 60 sec: 3754.6, 300 sec: 3568.4). Total num frames: 8011776. Throughput: 0: 914.5. Samples: 2002032. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:20:09,763][00397] Avg episode reward: [(0, '28.472')] +[2023-07-23 06:20:12,571][07585] Updated weights for policy 0, policy_version 1960 (0.0013) +[2023-07-23 06:20:14,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3596.2). Total num frames: 8036352. Throughput: 0: 959.0. Samples: 2009040. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:20:14,767][00397] Avg episode reward: [(0, '28.962')] +[2023-07-23 06:20:19,759][00397] Fps is (10 sec: 4505.7, 60 sec: 3822.9, 300 sec: 3610.0). Total num frames: 8056832. Throughput: 0: 1000.0. Samples: 2015856. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:20:19,765][00397] Avg episode reward: [(0, '29.944')] +[2023-07-23 06:20:19,772][07571] Saving new best policy, reward=29.944! +[2023-07-23 06:20:23,758][07585] Updated weights for policy 0, policy_version 1970 (0.0018) +[2023-07-23 06:20:24,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3610.0). Total num frames: 8073216. Throughput: 0: 979.7. Samples: 2018168. Policy #0 lag: (min: 0.0, avg: 0.6, max: 3.0) +[2023-07-23 06:20:24,761][00397] Avg episode reward: [(0, '30.060')] +[2023-07-23 06:20:24,768][07571] Saving new best policy, reward=30.060! +[2023-07-23 06:20:29,760][00397] Fps is (10 sec: 2866.8, 60 sec: 3754.9, 300 sec: 3596.1). Total num frames: 8085504. Throughput: 0: 927.7. Samples: 2023032. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:20:29,769][00397] Avg episode reward: [(0, '29.222')] +[2023-07-23 06:20:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3568.4). Total num frames: 8105984. Throughput: 0: 907.6. Samples: 2028088. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:20:34,764][00397] Avg episode reward: [(0, '29.498')] +[2023-07-23 06:20:35,436][07585] Updated weights for policy 0, policy_version 1980 (0.0018) +[2023-07-23 06:20:39,759][00397] Fps is (10 sec: 3686.9, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 8122368. Throughput: 0: 905.8. Samples: 2030480. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 06:20:39,764][00397] Avg episode reward: [(0, '29.170')] +[2023-07-23 06:20:44,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3686.6, 300 sec: 3596.2). Total num frames: 8146944. Throughput: 0: 934.1. Samples: 2036688. Policy #0 lag: (min: 0.0, avg: 0.9, max: 3.0) +[2023-07-23 06:20:44,769][00397] Avg episode reward: [(0, '30.037')] +[2023-07-23 06:20:45,857][07585] Updated weights for policy 0, policy_version 1990 (0.0013) +[2023-07-23 06:20:49,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3754.8, 300 sec: 3624.0). Total num frames: 8171520. Throughput: 0: 989.2. Samples: 2044080. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:20:49,761][00397] Avg episode reward: [(0, '29.802')] +[2023-07-23 06:20:54,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3823.1, 300 sec: 3624.0). Total num frames: 8187904. Throughput: 0: 997.5. Samples: 2046920. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:20:54,761][00397] Avg episode reward: [(0, '29.260')] +[2023-07-23 06:20:55,237][07585] Updated weights for policy 0, policy_version 2000 (0.0015) +[2023-07-23 06:20:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3823.1, 300 sec: 3637.8). Total num frames: 8204288. Throughput: 0: 954.3. Samples: 2051984. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 06:20:59,762][00397] Avg episode reward: [(0, '28.490')] +[2023-07-23 06:21:04,759][00397] Fps is (10 sec: 2457.5, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 8212480. Throughput: 0: 889.1. Samples: 2055864. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 06:21:04,771][00397] Avg episode reward: [(0, '28.211')] +[2023-07-23 06:21:09,763][00397] Fps is (10 sec: 2456.5, 60 sec: 3617.9, 300 sec: 3637.8). Total num frames: 8228864. Throughput: 0: 879.6. Samples: 2057752. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 06:21:09,766][00397] Avg episode reward: [(0, '29.499')] +[2023-07-23 06:21:11,518][07585] Updated weights for policy 0, policy_version 2010 (0.0012) +[2023-07-23 06:21:14,763][00397] Fps is (10 sec: 3275.5, 60 sec: 3481.3, 300 sec: 3637.7). Total num frames: 8245248. Throughput: 0: 857.0. Samples: 2061600. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:21:14,766][00397] Avg episode reward: [(0, '29.780')] +[2023-07-23 06:21:14,783][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002013_8245248.pth... +[2023-07-23 06:21:14,971][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001801_7376896.pth +[2023-07-23 06:21:19,759][00397] Fps is (10 sec: 2458.7, 60 sec: 3276.8, 300 sec: 3596.1). Total num frames: 8253440. Throughput: 0: 828.6. Samples: 2065376. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:21:19,767][00397] Avg episode reward: [(0, '29.202')] +[2023-07-23 06:21:24,760][00397] Fps is (10 sec: 2458.3, 60 sec: 3276.7, 300 sec: 3596.2). Total num frames: 8269824. Throughput: 0: 827.5. Samples: 2067720. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:21:24,763][00397] Avg episode reward: [(0, '29.616')] +[2023-07-23 06:21:25,406][07585] Updated weights for policy 0, policy_version 2020 (0.0012) +[2023-07-23 06:21:29,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3413.4, 300 sec: 3596.2). Total num frames: 8290304. Throughput: 0: 811.0. Samples: 2073184. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:21:29,762][00397] Avg episode reward: [(0, '30.672')] +[2023-07-23 06:21:29,768][07571] Saving new best policy, reward=30.672! +[2023-07-23 06:21:34,759][00397] Fps is (10 sec: 3686.9, 60 sec: 3345.1, 300 sec: 3596.1). Total num frames: 8306688. Throughput: 0: 770.8. Samples: 2078768. Policy #0 lag: (min: 0.0, avg: 0.9, max: 4.0) +[2023-07-23 06:21:34,761][00397] Avg episode reward: [(0, '29.648')] +[2023-07-23 06:21:36,051][07585] Updated weights for policy 0, policy_version 2030 (0.0013) +[2023-07-23 06:21:39,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3345.1, 300 sec: 3596.1). Total num frames: 8323072. Throughput: 0: 760.5. Samples: 2081144. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:21:39,762][00397] Avg episode reward: [(0, '29.968')] +[2023-07-23 06:21:44,761][00397] Fps is (10 sec: 3685.6, 60 sec: 3276.7, 300 sec: 3596.1). Total num frames: 8343552. Throughput: 0: 758.4. Samples: 2086112. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:21:44,768][00397] Avg episode reward: [(0, '30.184')] +[2023-07-23 06:21:48,300][07585] Updated weights for policy 0, policy_version 2040 (0.0045) +[2023-07-23 06:21:49,759][00397] Fps is (10 sec: 3686.5, 60 sec: 3140.3, 300 sec: 3568.4). Total num frames: 8359936. Throughput: 0: 780.8. Samples: 2091000. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:21:49,764][00397] Avg episode reward: [(0, '28.459')] +[2023-07-23 06:21:54,759][00397] Fps is (10 sec: 3277.4, 60 sec: 3140.2, 300 sec: 3568.4). Total num frames: 8376320. Throughput: 0: 795.3. Samples: 2093536. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:21:54,762][00397] Avg episode reward: [(0, '28.049')] +[2023-07-23 06:21:58,499][07585] Updated weights for policy 0, policy_version 2050 (0.0015) +[2023-07-23 06:21:59,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3345.1, 300 sec: 3596.1). Total num frames: 8404992. Throughput: 0: 873.2. Samples: 2100888. Policy #0 lag: (min: 0.0, avg: 1.5, max: 5.0) +[2023-07-23 06:21:59,761][00397] Avg episode reward: [(0, '28.115')] +[2023-07-23 06:22:04,761][00397] Fps is (10 sec: 4504.9, 60 sec: 3481.5, 300 sec: 3596.1). Total num frames: 8421376. Throughput: 0: 931.3. Samples: 2107288. Policy #0 lag: (min: 0.0, avg: 1.5, max: 5.0) +[2023-07-23 06:22:04,764][00397] Avg episode reward: [(0, '28.155')] +[2023-07-23 06:22:08,609][07585] Updated weights for policy 0, policy_version 2060 (0.0012) +[2023-07-23 06:22:09,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3481.9, 300 sec: 3596.1). Total num frames: 8437760. Throughput: 0: 935.9. Samples: 2109832. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:22:09,761][00397] Avg episode reward: [(0, '29.346')] +[2023-07-23 06:22:14,759][00397] Fps is (10 sec: 3277.4, 60 sec: 3481.9, 300 sec: 3582.3). Total num frames: 8454144. Throughput: 0: 923.7. Samples: 2114752. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:22:14,766][00397] Avg episode reward: [(0, '29.425')] +[2023-07-23 06:22:19,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 8474624. Throughput: 0: 908.3. Samples: 2119640. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:22:19,768][00397] Avg episode reward: [(0, '29.305')] +[2023-07-23 06:22:22,182][07585] Updated weights for policy 0, policy_version 2070 (0.0015) +[2023-07-23 06:22:24,759][00397] Fps is (10 sec: 3686.3, 60 sec: 3686.5, 300 sec: 3554.5). Total num frames: 8491008. Throughput: 0: 910.9. Samples: 2122136. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:22:24,762][00397] Avg episode reward: [(0, '29.497')] +[2023-07-23 06:22:29,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 8511488. Throughput: 0: 941.4. Samples: 2128472. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:22:29,763][00397] Avg episode reward: [(0, '27.550')] +[2023-07-23 06:22:30,263][07585] Updated weights for policy 0, policy_version 2080 (0.0013) +[2023-07-23 06:22:34,763][00397] Fps is (10 sec: 4913.2, 60 sec: 3890.9, 300 sec: 3610.0). Total num frames: 8540160. Throughput: 0: 996.5. Samples: 2135848. Policy #0 lag: (min: 0.0, avg: 1.4, max: 5.0) +[2023-07-23 06:22:34,766][00397] Avg episode reward: [(0, '26.104')] +[2023-07-23 06:22:39,762][00397] Fps is (10 sec: 4504.1, 60 sec: 3891.0, 300 sec: 3610.0). Total num frames: 8556544. Throughput: 0: 999.6. Samples: 2138520. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:22:39,769][00397] Avg episode reward: [(0, '26.897')] +[2023-07-23 06:22:42,079][07585] Updated weights for policy 0, policy_version 2090 (0.0018) +[2023-07-23 06:22:44,760][00397] Fps is (10 sec: 2868.1, 60 sec: 3754.7, 300 sec: 3596.1). Total num frames: 8568832. Throughput: 0: 944.7. Samples: 2143400. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:22:44,767][00397] Avg episode reward: [(0, '28.090')] +[2023-07-23 06:22:49,759][00397] Fps is (10 sec: 2868.2, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 8585216. Throughput: 0: 911.9. Samples: 2148320. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:22:49,766][00397] Avg episode reward: [(0, '28.691')] +[2023-07-23 06:22:54,478][07585] Updated weights for policy 0, policy_version 2100 (0.0020) +[2023-07-23 06:22:54,759][00397] Fps is (10 sec: 3277.3, 60 sec: 3754.7, 300 sec: 3554.5). Total num frames: 8601600. Throughput: 0: 909.0. Samples: 2150736. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:22:54,761][00397] Avg episode reward: [(0, '28.375')] +[2023-07-23 06:22:59,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 8626176. Throughput: 0: 918.4. Samples: 2156080. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:22:59,768][00397] Avg episode reward: [(0, '28.855')] +[2023-07-23 06:23:02,993][07585] Updated weights for policy 0, policy_version 2110 (0.0014) +[2023-07-23 06:23:04,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3754.8, 300 sec: 3582.3). Total num frames: 8646656. Throughput: 0: 971.0. Samples: 2163336. Policy #0 lag: (min: 0.0, avg: 1.1, max: 4.0) +[2023-07-23 06:23:04,761][00397] Avg episode reward: [(0, '28.562')] +[2023-07-23 06:23:09,759][00397] Fps is (10 sec: 4095.9, 60 sec: 3822.9, 300 sec: 3596.1). Total num frames: 8667136. Throughput: 0: 996.4. Samples: 2166976. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:23:09,763][00397] Avg episode reward: [(0, '28.618')] +[2023-07-23 06:23:14,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3754.5, 300 sec: 3582.2). Total num frames: 8679424. Throughput: 0: 964.9. Samples: 2171896. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:23:14,764][00397] Avg episode reward: [(0, '28.136')] +[2023-07-23 06:23:14,772][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002119_8679424.pth... +[2023-07-23 06:23:14,914][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001907_7811072.pth +[2023-07-23 06:23:15,474][07585] Updated weights for policy 0, policy_version 2120 (0.0021) +[2023-07-23 06:23:19,760][00397] Fps is (10 sec: 2866.9, 60 sec: 3686.3, 300 sec: 3582.2). Total num frames: 8695808. Throughput: 0: 909.2. Samples: 2176760. Policy #0 lag: (min: 0.0, avg: 0.8, max: 4.0) +[2023-07-23 06:23:19,762][00397] Avg episode reward: [(0, '28.185')] +[2023-07-23 06:23:24,763][00397] Fps is (10 sec: 3685.7, 60 sec: 3754.4, 300 sec: 3596.1). Total num frames: 8716288. Throughput: 0: 905.2. Samples: 2179256. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:23:24,766][00397] Avg episode reward: [(0, '28.074')] +[2023-07-23 06:23:26,495][07585] Updated weights for policy 0, policy_version 2130 (0.0016) +[2023-07-23 06:23:29,762][00397] Fps is (10 sec: 4096.5, 60 sec: 3754.7, 300 sec: 3610.0). Total num frames: 8736768. Throughput: 0: 908.1. Samples: 2184264. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:23:29,764][00397] Avg episode reward: [(0, '28.383')] +[2023-07-23 06:23:34,761][00397] Fps is (10 sec: 4096.9, 60 sec: 3618.3, 300 sec: 3637.8). Total num frames: 8757248. Throughput: 0: 945.4. Samples: 2190864. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:23:34,768][00397] Avg episode reward: [(0, '29.379')] +[2023-07-23 06:23:36,317][07585] Updated weights for policy 0, policy_version 2140 (0.0022) +[2023-07-23 06:23:39,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3618.3, 300 sec: 3637.8). Total num frames: 8773632. Throughput: 0: 955.0. Samples: 2193712. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:23:39,761][00397] Avg episode reward: [(0, '29.531')] +[2023-07-23 06:23:44,759][00397] Fps is (10 sec: 2867.9, 60 sec: 3618.2, 300 sec: 3624.0). Total num frames: 8785920. Throughput: 0: 934.9. Samples: 2198152. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:23:44,769][00397] Avg episode reward: [(0, '29.397')] +[2023-07-23 06:23:49,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3637.8). Total num frames: 8802304. Throughput: 0: 857.8. Samples: 2201936. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:23:49,763][00397] Avg episode reward: [(0, '29.508')] +[2023-07-23 06:23:52,056][07585] Updated weights for policy 0, policy_version 2150 (0.0012) +[2023-07-23 06:23:54,763][00397] Fps is (10 sec: 2865.9, 60 sec: 3549.6, 300 sec: 3623.9). Total num frames: 8814592. Throughput: 0: 820.9. Samples: 2203920. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:23:54,766][00397] Avg episode reward: [(0, '28.929')] +[2023-07-23 06:23:59,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3345.1, 300 sec: 3596.1). Total num frames: 8826880. Throughput: 0: 794.7. Samples: 2207656. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:23:59,763][00397] Avg episode reward: [(0, '28.969')] +[2023-07-23 06:24:04,759][00397] Fps is (10 sec: 2458.7, 60 sec: 3208.5, 300 sec: 3568.4). Total num frames: 8839168. Throughput: 0: 773.4. Samples: 2211560. Policy #0 lag: (min: 0.0, avg: 1.3, max: 4.0) +[2023-07-23 06:24:04,768][00397] Avg episode reward: [(0, '28.922')] +[2023-07-23 06:24:07,047][07585] Updated weights for policy 0, policy_version 2160 (0.0021) +[2023-07-23 06:24:09,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3140.3, 300 sec: 3540.6). Total num frames: 8855552. Throughput: 0: 770.0. Samples: 2213904. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:24:09,763][00397] Avg episode reward: [(0, '27.657')] +[2023-07-23 06:24:14,760][00397] Fps is (10 sec: 4504.8, 60 sec: 3413.4, 300 sec: 3582.2). Total num frames: 8884224. Throughput: 0: 796.2. Samples: 2220096. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:24:14,763][00397] Avg episode reward: [(0, '27.881')] +[2023-07-23 06:24:16,686][07585] Updated weights for policy 0, policy_version 2170 (0.0013) +[2023-07-23 06:24:19,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3413.4, 300 sec: 3596.1). Total num frames: 8900608. Throughput: 0: 811.1. Samples: 2227360. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:24:19,761][00397] Avg episode reward: [(0, '27.205')] +[2023-07-23 06:24:24,759][00397] Fps is (10 sec: 3277.3, 60 sec: 3345.3, 300 sec: 3582.3). Total num frames: 8916992. Throughput: 0: 810.5. Samples: 2230184. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:24:24,761][00397] Avg episode reward: [(0, '27.161')] +[2023-07-23 06:24:26,445][07585] Updated weights for policy 0, policy_version 2180 (0.0012) +[2023-07-23 06:24:29,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 8933376. Throughput: 0: 822.4. Samples: 2235160. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:24:29,763][00397] Avg episode reward: [(0, '27.866')] +[2023-07-23 06:24:34,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3208.7, 300 sec: 3554.5). Total num frames: 8949760. Throughput: 0: 851.2. Samples: 2240240. Policy #0 lag: (min: 0.0, avg: 2.1, max: 6.0) +[2023-07-23 06:24:34,761][00397] Avg episode reward: [(0, '27.912')] +[2023-07-23 06:24:39,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3208.5, 300 sec: 3526.8). Total num frames: 8966144. Throughput: 0: 861.4. Samples: 2242680. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 06:24:39,761][00397] Avg episode reward: [(0, '29.093')] +[2023-07-23 06:24:39,854][07585] Updated weights for policy 0, policy_version 2190 (0.0013) +[2023-07-23 06:24:44,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3526.8). Total num frames: 8986624. Throughput: 0: 891.6. Samples: 2247776. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 06:24:44,769][00397] Avg episode reward: [(0, '28.550')] +[2023-07-23 06:24:48,557][07585] Updated weights for policy 0, policy_version 2200 (0.0013) +[2023-07-23 06:24:49,759][00397] Fps is (10 sec: 4915.2, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 9015296. Throughput: 0: 965.0. Samples: 2254984. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:24:49,761][00397] Avg episode reward: [(0, '29.165')] +[2023-07-23 06:24:54,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3618.4, 300 sec: 3582.3). Total num frames: 9031680. Throughput: 0: 993.8. Samples: 2258624. Policy #0 lag: (min: 0.0, avg: 1.7, max: 5.0) +[2023-07-23 06:24:54,761][00397] Avg episode reward: [(0, '30.946')] +[2023-07-23 06:24:54,775][07571] Saving new best policy, reward=30.946! +[2023-07-23 06:24:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 9048064. Throughput: 0: 970.2. Samples: 2263752. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:24:59,761][00397] Avg episode reward: [(0, '29.989')] +[2023-07-23 06:25:00,200][07585] Updated weights for policy 0, policy_version 2210 (0.0012) +[2023-07-23 06:25:04,759][00397] Fps is (10 sec: 3686.3, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 9068544. Throughput: 0: 919.8. Samples: 2268752. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:25:04,765][00397] Avg episode reward: [(0, '29.153')] +[2023-07-23 06:25:09,760][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3554.5). Total num frames: 9084928. Throughput: 0: 911.6. Samples: 2271208. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:25:09,764][00397] Avg episode reward: [(0, '26.869')] +[2023-07-23 06:25:12,058][07585] Updated weights for policy 0, policy_version 2220 (0.0014) +[2023-07-23 06:25:14,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3540.6). Total num frames: 9101312. Throughput: 0: 910.6. Samples: 2276136. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:25:14,768][00397] Avg episode reward: [(0, '26.699')] +[2023-07-23 06:25:14,788][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002222_9101312.pth... +[2023-07-23 06:25:14,974][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002013_8245248.pth +[2023-07-23 06:25:19,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3568.4). Total num frames: 9125888. Throughput: 0: 939.4. Samples: 2282512. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:25:19,761][00397] Avg episode reward: [(0, '28.103')] +[2023-07-23 06:25:21,913][07585] Updated weights for policy 0, policy_version 2230 (0.0014) +[2023-07-23 06:25:24,759][00397] Fps is (10 sec: 4505.7, 60 sec: 3822.9, 300 sec: 3596.2). Total num frames: 9146368. Throughput: 0: 966.4. Samples: 2286168. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:25:24,767][00397] Avg episode reward: [(0, '26.350')] +[2023-07-23 06:25:29,762][00397] Fps is (10 sec: 3685.1, 60 sec: 3822.7, 300 sec: 3582.2). Total num frames: 9162752. Throughput: 0: 989.4. Samples: 2292304. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:25:29,769][00397] Avg episode reward: [(0, '27.118')] +[2023-07-23 06:25:31,876][07585] Updated weights for policy 0, policy_version 2240 (0.0012) +[2023-07-23 06:25:34,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3891.2, 300 sec: 3596.1). Total num frames: 9183232. Throughput: 0: 940.8. Samples: 2297320. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:25:34,763][00397] Avg episode reward: [(0, '26.569')] +[2023-07-23 06:25:39,759][00397] Fps is (10 sec: 3687.7, 60 sec: 3891.2, 300 sec: 3568.4). Total num frames: 9199616. Throughput: 0: 914.0. Samples: 2299752. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:25:39,764][00397] Avg episode reward: [(0, '26.822')] +[2023-07-23 06:25:44,734][07585] Updated weights for policy 0, policy_version 2250 (0.0016) +[2023-07-23 06:25:44,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3822.9, 300 sec: 3540.6). Total num frames: 9216000. Throughput: 0: 909.2. Samples: 2304664. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:25:44,764][00397] Avg episode reward: [(0, '28.438')] +[2023-07-23 06:25:49,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 9236480. Throughput: 0: 918.1. Samples: 2310064. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:25:49,761][00397] Avg episode reward: [(0, '29.140')] +[2023-07-23 06:25:53,641][07585] Updated weights for policy 0, policy_version 2260 (0.0012) +[2023-07-23 06:25:54,759][00397] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3582.3). Total num frames: 9261056. Throughput: 0: 946.5. Samples: 2313800. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:25:54,762][00397] Avg episode reward: [(0, '29.817')] +[2023-07-23 06:25:59,761][00397] Fps is (10 sec: 4095.0, 60 sec: 3822.8, 300 sec: 3610.0). Total num frames: 9277440. Throughput: 0: 994.4. Samples: 2320888. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:25:59,763][00397] Avg episode reward: [(0, '29.291')] +[2023-07-23 06:26:04,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3610.1). Total num frames: 9293824. Throughput: 0: 960.7. Samples: 2325744. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:26:04,763][00397] Avg episode reward: [(0, '29.413')] +[2023-07-23 06:26:05,414][07585] Updated weights for policy 0, policy_version 2270 (0.0019) +[2023-07-23 06:26:09,759][00397] Fps is (10 sec: 3277.5, 60 sec: 3754.7, 300 sec: 3610.1). Total num frames: 9310208. Throughput: 0: 933.7. Samples: 2328184. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:26:09,761][00397] Avg episode reward: [(0, '30.119')] +[2023-07-23 06:26:14,759][00397] Fps is (10 sec: 2867.2, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 9322496. Throughput: 0: 884.7. Samples: 2332112. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:26:14,761][00397] Avg episode reward: [(0, '29.808')] +[2023-07-23 06:26:19,469][07585] Updated weights for policy 0, policy_version 2280 (0.0017) +[2023-07-23 06:26:19,762][00397] Fps is (10 sec: 2866.3, 60 sec: 3549.7, 300 sec: 3623.9). Total num frames: 9338880. Throughput: 0: 857.7. Samples: 2335920. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:26:19,773][00397] Avg episode reward: [(0, '28.649')] +[2023-07-23 06:26:24,759][00397] Fps is (10 sec: 2457.6, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 9347072. Throughput: 0: 845.3. Samples: 2337792. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:26:24,761][00397] Avg episode reward: [(0, '28.154')] +[2023-07-23 06:26:29,759][00397] Fps is (10 sec: 2868.1, 60 sec: 3413.5, 300 sec: 3596.1). Total num frames: 9367552. Throughput: 0: 834.0. Samples: 2342192. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:26:29,766][00397] Avg episode reward: [(0, '26.034')] +[2023-07-23 06:26:33,343][07585] Updated weights for policy 0, policy_version 2290 (0.0013) +[2023-07-23 06:26:34,759][00397] Fps is (10 sec: 3686.3, 60 sec: 3345.1, 300 sec: 3596.1). Total num frames: 9383936. Throughput: 0: 823.1. Samples: 2347104. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:26:34,762][00397] Avg episode reward: [(0, '26.651')] +[2023-07-23 06:26:39,759][00397] Fps is (10 sec: 3276.7, 60 sec: 3345.0, 300 sec: 3582.3). Total num frames: 9400320. Throughput: 0: 795.2. Samples: 2349584. Policy #0 lag: (min: 0.0, avg: 2.3, max: 6.0) +[2023-07-23 06:26:39,762][00397] Avg episode reward: [(0, '26.815')] +[2023-07-23 06:26:44,759][00397] Fps is (10 sec: 3276.9, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 9416704. Throughput: 0: 746.7. Samples: 2354488. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:26:44,769][00397] Avg episode reward: [(0, '27.600')] +[2023-07-23 06:26:46,847][07585] Updated weights for policy 0, policy_version 2300 (0.0018) +[2023-07-23 06:26:49,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 9433088. Throughput: 0: 748.3. Samples: 2359416. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:26:49,768][00397] Avg episode reward: [(0, '27.177')] +[2023-07-23 06:26:54,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3140.3, 300 sec: 3540.6). Total num frames: 9449472. Throughput: 0: 748.8. Samples: 2361880. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:26:54,761][00397] Avg episode reward: [(0, '26.616')] +[2023-07-23 06:26:58,096][07585] Updated weights for policy 0, policy_version 2310 (0.0014) +[2023-07-23 06:26:59,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3140.4, 300 sec: 3540.6). Total num frames: 9465856. Throughput: 0: 771.2. Samples: 2366816. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:26:59,761][00397] Avg episode reward: [(0, '27.442')] +[2023-07-23 06:27:04,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3276.8, 300 sec: 3568.4). Total num frames: 9490432. Throughput: 0: 846.1. Samples: 2373992. Policy #0 lag: (min: 0.0, avg: 2.3, max: 5.0) +[2023-07-23 06:27:04,767][00397] Avg episode reward: [(0, '27.336')] +[2023-07-23 06:27:06,855][07585] Updated weights for policy 0, policy_version 2320 (0.0012) +[2023-07-23 06:27:09,759][00397] Fps is (10 sec: 4505.8, 60 sec: 3345.1, 300 sec: 3582.3). Total num frames: 9510912. Throughput: 0: 885.5. Samples: 2377640. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:27:09,768][00397] Avg episode reward: [(0, '26.382')] +[2023-07-23 06:27:14,759][00397] Fps is (10 sec: 3686.3, 60 sec: 3413.3, 300 sec: 3568.4). Total num frames: 9527296. Throughput: 0: 910.6. Samples: 2383168. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:27:14,762][00397] Avg episode reward: [(0, '26.223')] +[2023-07-23 06:27:14,778][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002326_9527296.pth... +[2023-07-23 06:27:14,899][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002119_8679424.pth +[2023-07-23 06:27:19,060][07585] Updated weights for policy 0, policy_version 2330 (0.0015) +[2023-07-23 06:27:19,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3413.5, 300 sec: 3568.4). Total num frames: 9543680. Throughput: 0: 910.6. Samples: 2388080. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 06:27:19,764][00397] Avg episode reward: [(0, '25.480')] +[2023-07-23 06:27:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 9560064. Throughput: 0: 910.2. Samples: 2390544. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:27:24,762][00397] Avg episode reward: [(0, '26.468')] +[2023-07-23 06:27:29,761][00397] Fps is (10 sec: 3276.0, 60 sec: 3481.5, 300 sec: 3512.9). Total num frames: 9576448. Throughput: 0: 910.7. Samples: 2395472. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:27:29,769][00397] Avg episode reward: [(0, '26.403')] +[2023-07-23 06:27:31,335][07585] Updated weights for policy 0, policy_version 2340 (0.0018) +[2023-07-23 06:27:34,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3540.7). Total num frames: 9601024. Throughput: 0: 935.8. Samples: 2401528. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:27:34,764][00397] Avg episode reward: [(0, '27.230')] +[2023-07-23 06:27:39,760][00397] Fps is (10 sec: 4506.1, 60 sec: 3686.3, 300 sec: 3568.4). Total num frames: 9621504. Throughput: 0: 962.5. Samples: 2405192. Policy #0 lag: (min: 0.0, avg: 1.8, max: 5.0) +[2023-07-23 06:27:39,770][00397] Avg episode reward: [(0, '27.323')] +[2023-07-23 06:27:40,033][07585] Updated weights for policy 0, policy_version 2350 (0.0014) +[2023-07-23 06:27:44,759][00397] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 9641984. Throughput: 0: 999.7. Samples: 2411800. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:27:44,763][00397] Avg episode reward: [(0, '28.110')] +[2023-07-23 06:27:49,759][00397] Fps is (10 sec: 3686.9, 60 sec: 3754.7, 300 sec: 3582.3). Total num frames: 9658368. Throughput: 0: 951.8. Samples: 2416824. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:27:49,763][00397] Avg episode reward: [(0, '27.085')] +[2023-07-23 06:27:51,089][07585] Updated weights for policy 0, policy_version 2360 (0.0012) +[2023-07-23 06:27:54,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3568.4). Total num frames: 9678848. Throughput: 0: 924.6. Samples: 2419248. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:27:54,761][00397] Avg episode reward: [(0, '27.221')] +[2023-07-23 06:27:59,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3823.0, 300 sec: 3554.5). Total num frames: 9695232. Throughput: 0: 912.2. Samples: 2424216. Policy #0 lag: (min: 0.0, avg: 1.6, max: 4.0) +[2023-07-23 06:27:59,763][00397] Avg episode reward: [(0, '28.380')] +[2023-07-23 06:28:03,896][07585] Updated weights for policy 0, policy_version 2370 (0.0015) +[2023-07-23 06:28:04,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3540.6). Total num frames: 9711616. Throughput: 0: 911.5. Samples: 2429096. Policy #0 lag: (min: 0.0, avg: 1.0, max: 4.0) +[2023-07-23 06:28:04,761][00397] Avg episode reward: [(0, '27.707')] +[2023-07-23 06:28:09,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 9732096. Throughput: 0: 940.3. Samples: 2432856. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:28:09,769][00397] Avg episode reward: [(0, '27.099')] +[2023-07-23 06:28:12,156][07585] Updated weights for policy 0, policy_version 2380 (0.0019) +[2023-07-23 06:28:14,759][00397] Fps is (10 sec: 4915.3, 60 sec: 3891.2, 300 sec: 3610.0). Total num frames: 9760768. Throughput: 0: 993.1. Samples: 2440160. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:28:14,764][00397] Avg episode reward: [(0, '26.034')] +[2023-07-23 06:28:19,762][00397] Fps is (10 sec: 4504.4, 60 sec: 3891.0, 300 sec: 3596.2). Total num frames: 9777152. Throughput: 0: 974.5. Samples: 2445384. Policy #0 lag: (min: 0.0, avg: 1.4, max: 4.0) +[2023-07-23 06:28:19,766][00397] Avg episode reward: [(0, '25.680')] +[2023-07-23 06:28:23,662][07585] Updated weights for policy 0, policy_version 2390 (0.0014) +[2023-07-23 06:28:24,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3891.2, 300 sec: 3582.3). Total num frames: 9793536. Throughput: 0: 947.8. Samples: 2447840. Policy #0 lag: (min: 0.0, avg: 0.7, max: 3.0) +[2023-07-23 06:28:24,761][00397] Avg episode reward: [(0, '24.386')] +[2023-07-23 06:28:29,759][00397] Fps is (10 sec: 2868.0, 60 sec: 3823.1, 300 sec: 3554.5). Total num frames: 9805824. Throughput: 0: 906.8. Samples: 2452608. Policy #0 lag: (min: 0.0, avg: 1.7, max: 4.0) +[2023-07-23 06:28:29,761][00397] Avg episode reward: [(0, '24.299')] +[2023-07-23 06:28:34,759][00397] Fps is (10 sec: 2867.1, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 9822208. Throughput: 0: 905.4. Samples: 2457568. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:28:34,764][00397] Avg episode reward: [(0, '25.809')] +[2023-07-23 06:28:36,133][07585] Updated weights for policy 0, policy_version 2400 (0.0016) +[2023-07-23 06:28:39,759][00397] Fps is (10 sec: 3686.4, 60 sec: 3686.5, 300 sec: 3582.3). Total num frames: 9842688. Throughput: 0: 910.6. Samples: 2460224. Policy #0 lag: (min: 0.0, avg: 1.2, max: 4.0) +[2023-07-23 06:28:39,767][00397] Avg episode reward: [(0, '26.489')] +[2023-07-23 06:28:44,759][00397] Fps is (10 sec: 4505.8, 60 sec: 3754.7, 300 sec: 3610.0). Total num frames: 9867264. Throughput: 0: 966.6. Samples: 2467712. Policy #0 lag: (min: 0.0, avg: 1.8, max: 4.0) +[2023-07-23 06:28:44,768][00397] Avg episode reward: [(0, '26.512')] +[2023-07-23 06:28:44,824][07585] Updated weights for policy 0, policy_version 2410 (0.0012) +[2023-07-23 06:28:49,761][00397] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3624.0). Total num frames: 9883648. Throughput: 0: 981.7. Samples: 2473272. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:28:49,763][00397] Avg episode reward: [(0, '27.305')] +[2023-07-23 06:28:54,759][00397] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 9900032. Throughput: 0: 939.2. Samples: 2475120. Policy #0 lag: (min: 0.0, avg: 2.1, max: 4.0) +[2023-07-23 06:28:54,762][00397] Avg episode reward: [(0, '28.283')] +[2023-07-23 06:28:59,764][00397] Fps is (10 sec: 2456.3, 60 sec: 3549.5, 300 sec: 3623.9). Total num frames: 9908224. Throughput: 0: 864.8. Samples: 2479080. Policy #0 lag: (min: 0.0, avg: 2.1, max: 5.0) +[2023-07-23 06:28:59,766][00397] Avg episode reward: [(0, '29.314')] +[2023-07-23 06:28:59,979][07585] Updated weights for policy 0, policy_version 2420 (0.0014) +[2023-07-23 06:29:04,764][00397] Fps is (10 sec: 2456.3, 60 sec: 3549.6, 300 sec: 3623.9). Total num frames: 9924608. Throughput: 0: 833.5. Samples: 2482896. Policy #0 lag: (min: 0.0, avg: 2.0, max: 4.0) +[2023-07-23 06:29:04,766][00397] Avg episode reward: [(0, '30.568')] +[2023-07-23 06:29:09,759][00397] Fps is (10 sec: 2868.7, 60 sec: 3413.3, 300 sec: 3568.4). Total num frames: 9936896. Throughput: 0: 823.1. Samples: 2484880. Policy #0 lag: (min: 0.0, avg: 1.9, max: 4.0) +[2023-07-23 06:29:09,761][00397] Avg episode reward: [(0, '30.211')] +[2023-07-23 06:29:14,760][00397] Fps is (10 sec: 2458.7, 60 sec: 3140.2, 300 sec: 3554.5). Total num frames: 9949184. Throughput: 0: 801.9. Samples: 2488696. Policy #0 lag: (min: 0.0, avg: 2.0, max: 5.0) +[2023-07-23 06:29:14,765][00397] Avg episode reward: [(0, '29.118')] +[2023-07-23 06:29:14,782][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002429_9949184.pth... +[2023-07-23 06:29:14,969][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002222_9101312.pth +[2023-07-23 06:29:15,803][07585] Updated weights for policy 0, policy_version 2430 (0.0015) +[2023-07-23 06:29:19,762][00397] Fps is (10 sec: 2866.2, 60 sec: 3140.2, 300 sec: 3554.5). Total num frames: 9965568. Throughput: 0: 789.1. Samples: 2493080. Policy #0 lag: (min: 0.0, avg: 1.9, max: 5.0) +[2023-07-23 06:29:19,765][00397] Avg episode reward: [(0, '29.093')] +[2023-07-23 06:29:24,759][00397] Fps is (10 sec: 4096.2, 60 sec: 3276.8, 300 sec: 3582.3). Total num frames: 9990144. Throughput: 0: 811.2. Samples: 2496728. Policy #0 lag: (min: 0.0, avg: 1.5, max: 4.0) +[2023-07-23 06:29:24,769][00397] Avg episode reward: [(0, '31.069')] +[2023-07-23 06:29:24,779][07571] Saving new best policy, reward=31.069! +[2023-07-23 06:29:25,882][07585] Updated weights for policy 0, policy_version 2440 (0.0015) +[2023-07-23 06:29:27,436][07571] Stopping Batcher_0... +[2023-07-23 06:29:27,437][07571] Loop batcher_evt_loop terminating... +[2023-07-23 06:29:27,438][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth... +[2023-07-23 06:29:27,437][00397] Component Batcher_0 stopped! +[2023-07-23 06:29:27,553][07585] Weights refcount: 2 0 +[2023-07-23 06:29:27,559][00397] Component InferenceWorker_p0-w0 stopped! +[2023-07-23 06:29:27,564][07585] Stopping InferenceWorker_p0-w0... +[2023-07-23 06:29:27,565][07585] Loop inference_proc0-0_evt_loop terminating... +[2023-07-23 06:29:27,612][07571] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002326_9527296.pth +[2023-07-23 06:29:27,630][07571] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth... +[2023-07-23 06:29:27,716][00397] Component RolloutWorker_w2 stopped! +[2023-07-23 06:29:27,722][07591] Stopping RolloutWorker_w2... +[2023-07-23 06:29:27,732][07591] Loop rollout_proc2_evt_loop terminating... +[2023-07-23 06:29:27,771][00397] Component RolloutWorker_w1 stopped! +[2023-07-23 06:29:27,774][07586] Stopping RolloutWorker_w1... +[2023-07-23 06:29:27,778][07586] Loop rollout_proc1_evt_loop terminating... +[2023-07-23 06:29:27,907][07571] Stopping LearnerWorker_p0... +[2023-07-23 06:29:27,908][07571] Loop learner_proc0_evt_loop terminating... +[2023-07-23 06:29:27,909][00397] Component LearnerWorker_p0 stopped! +[2023-07-23 06:29:27,938][07588] Stopping RolloutWorker_w4... +[2023-07-23 06:29:27,942][00397] Component RolloutWorker_w4 stopped! +[2023-07-23 06:29:27,940][07588] Loop rollout_proc4_evt_loop terminating... +[2023-07-23 06:29:27,990][07584] Stopping RolloutWorker_w0... +[2023-07-23 06:29:27,989][00397] Component RolloutWorker_w0 stopped! +[2023-07-23 06:29:27,994][07584] Loop rollout_proc0_evt_loop terminating... +[2023-07-23 06:29:28,017][00397] Component RolloutWorker_w6 stopped! +[2023-07-23 06:29:28,010][07592] Stopping RolloutWorker_w6... +[2023-07-23 06:29:28,026][07592] Loop rollout_proc6_evt_loop terminating... +[2023-07-23 06:29:28,137][07589] Stopping RolloutWorker_w5... +[2023-07-23 06:29:28,137][07589] Loop rollout_proc5_evt_loop terminating... +[2023-07-23 06:29:28,137][00397] Component RolloutWorker_w5 stopped! +[2023-07-23 06:29:28,177][07590] Stopping RolloutWorker_w7... +[2023-07-23 06:29:28,177][07590] Loop rollout_proc7_evt_loop terminating... +[2023-07-23 06:29:28,177][00397] Component RolloutWorker_w7 stopped! +[2023-07-23 06:29:28,192][07587] Stopping RolloutWorker_w3... +[2023-07-23 06:29:28,192][00397] Component RolloutWorker_w3 stopped! +[2023-07-23 06:29:28,193][07587] Loop rollout_proc3_evt_loop terminating... +[2023-07-23 06:29:28,194][00397] Waiting for process learner_proc0 to stop... +[2023-07-23 06:29:30,350][00397] Waiting for process inference_proc0-0 to join... +[2023-07-23 06:29:30,354][00397] Waiting for process rollout_proc0 to join... +[2023-07-23 06:29:33,240][00397] Waiting for process rollout_proc1 to join... +[2023-07-23 06:29:33,243][00397] Waiting for process rollout_proc2 to join... +[2023-07-23 06:29:33,244][00397] Waiting for process rollout_proc3 to join... +[2023-07-23 06:29:33,246][00397] Waiting for process rollout_proc4 to join... +[2023-07-23 06:29:33,249][00397] Waiting for process rollout_proc5 to join... +[2023-07-23 06:29:33,251][00397] Waiting for process rollout_proc6 to join... +[2023-07-23 06:29:33,252][00397] Waiting for process rollout_proc7 to join... +[2023-07-23 06:29:33,253][00397] Batcher 0 profile tree view: +batching: 64.5395, releasing_batches: 0.0615 +[2023-07-23 06:29:33,254][00397] InferenceWorker_p0-w0 profile tree view: +wait_policy: 0.0066 + wait_policy_total: 2178.5763 +update_model: 10.8343 + weight_update: 0.0016 +one_step: 0.0030 + handle_policy_step: 609.3636 + deserialize: 25.3856, stack: 3.3900, obs_to_device_normalize: 128.4549, forward: 323.3208, send_messages: 21.7177 + prepare_outputs: 80.5940 + to_cpu: 44.5224 +[2023-07-23 06:29:33,259][00397] Learner 0 profile tree view: +misc: 0.0135, prepare_batch: 38.5693 +train: 183.0175 + epoch_init: 0.0191, minibatch_init: 0.0501, losses_postprocess: 1.1710, kl_divergence: 1.3780, after_optimizer: 7.8108 + calculate_losses: 65.4491 + losses_init: 0.0130, forward_head: 3.3277, bptt_initial: 42.0206, tail: 3.0908, advantages_returns: 0.7358, losses: 9.7635 + bptt: 5.6446 + bptt_forward_core: 5.3996 + update: 105.5241 + clip: 78.1981 +[2023-07-23 06:29:33,260][00397] RolloutWorker_w0 profile tree view: +wait_for_trajectories: 0.8513, enqueue_policy_requests: 255.2206, env_step: 2333.3151, overhead: 69.2144, complete_rollouts: 8.2499 +save_policy_outputs: 72.3385 + split_output_tensors: 32.3206 +[2023-07-23 06:29:33,261][00397] RolloutWorker_w7 profile tree view: +wait_for_trajectories: 1.0052, enqueue_policy_requests: 262.1097, env_step: 2326.2914, overhead: 71.3692, complete_rollouts: 7.9236 +save_policy_outputs: 71.0904 + split_output_tensors: 32.2919 +[2023-07-23 06:29:33,262][00397] Loop Runner_EvtLoop terminating... +[2023-07-23 06:29:33,264][00397] Runner profile tree view: +main_loop: 2889.8960 +[2023-07-23 06:29:33,267][00397] Collected {0: 10006528}, FPS: 3462.6 +[2023-07-23 06:30:15,071][00397] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2023-07-23 06:30:15,073][00397] Overriding arg 'num_workers' with value 1 passed from command line +[2023-07-23 06:30:15,075][00397] Adding new argument 'no_render'=True that is not in the saved config file! +[2023-07-23 06:30:15,077][00397] Adding new argument 'save_video'=True that is not in the saved config file! +[2023-07-23 06:30:15,079][00397] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2023-07-23 06:30:15,081][00397] Adding new argument 'video_name'=None that is not in the saved config file! +[2023-07-23 06:30:15,083][00397] Adding new argument 'max_num_frames'=100000 that is not in the saved config file! +[2023-07-23 06:30:15,085][00397] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2023-07-23 06:30:15,087][00397] Adding new argument 'push_to_hub'=True that is not in the saved config file! +[2023-07-23 06:30:15,088][00397] Adding new argument 'hf_repository'='Corianas/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file! +[2023-07-23 06:30:15,089][00397] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2023-07-23 06:30:15,090][00397] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2023-07-23 06:30:15,091][00397] Adding new argument 'train_script'=None that is not in the saved config file! +[2023-07-23 06:30:15,092][00397] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2023-07-23 06:30:15,094][00397] Using frameskip 1 and render_action_repeat=4 for evaluation +[2023-07-23 06:30:15,143][00397] Doom resolution: 160x120, resize resolution: (128, 72) +[2023-07-23 06:30:15,147][00397] RunningMeanStd input shape: (3, 72, 128) +[2023-07-23 06:30:15,149][00397] RunningMeanStd input shape: (1,) +[2023-07-23 06:30:15,169][00397] ConvEncoder: input_channels=3 +[2023-07-23 06:30:15,306][00397] Conv encoder output size: 512 +[2023-07-23 06:30:15,308][00397] Policy head output size: 512 +[2023-07-23 06:30:17,822][00397] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000002443_10006528.pth... +[2023-07-23 06:30:19,150][00397] Num frames 100... +[2023-07-23 06:30:19,285][00397] Num frames 200... +[2023-07-23 06:30:19,408][00397] Num frames 300... +[2023-07-23 06:30:19,533][00397] Num frames 400... +[2023-07-23 06:30:19,665][00397] Num frames 500... +[2023-07-23 06:30:19,794][00397] Num frames 600... +[2023-07-23 06:30:19,925][00397] Num frames 700... +[2023-07-23 06:30:20,055][00397] Num frames 800... +[2023-07-23 06:30:20,186][00397] Num frames 900... +[2023-07-23 06:30:20,324][00397] Num frames 1000... +[2023-07-23 06:30:20,453][00397] Num frames 1100... +[2023-07-23 06:30:20,591][00397] Num frames 1200... +[2023-07-23 06:30:20,721][00397] Num frames 1300... +[2023-07-23 06:30:20,850][00397] Num frames 1400... +[2023-07-23 06:30:20,984][00397] Num frames 1500... +[2023-07-23 06:30:21,115][00397] Num frames 1600... +[2023-07-23 06:30:21,258][00397] Num frames 1700... +[2023-07-23 06:30:21,391][00397] Num frames 1800... +[2023-07-23 06:30:21,532][00397] Avg episode rewards: #0: 47.659, true rewards: #0: 18.660 +[2023-07-23 06:30:21,533][00397] Avg episode reward: 47.659, avg true_objective: 18.660 +[2023-07-23 06:30:21,584][00397] Num frames 1900... +[2023-07-23 06:30:21,723][00397] Num frames 2000... +[2023-07-23 06:30:21,852][00397] Num frames 2100... +[2023-07-23 06:30:21,984][00397] Num frames 2200... +[2023-07-23 06:30:22,108][00397] Num frames 2300... +[2023-07-23 06:30:22,239][00397] Num frames 2400... +[2023-07-23 06:30:22,385][00397] Num frames 2500... +[2023-07-23 06:30:22,569][00397] Num frames 2600... +[2023-07-23 06:30:22,762][00397] Num frames 2700... +[2023-07-23 06:30:22,952][00397] Num frames 2800... +[2023-07-23 06:30:23,137][00397] Num frames 2900... +[2023-07-23 06:30:23,324][00397] Num frames 3000... +[2023-07-23 06:30:23,516][00397] Num frames 3100... +[2023-07-23 06:30:23,705][00397] Num frames 3200... +[2023-07-23 06:30:23,802][00397] Avg episode rewards: #0: 42.095, true rewards: #0: 16.095 +[2023-07-23 06:30:23,803][00397] Avg episode reward: 42.095, avg true_objective: 16.095 +[2023-07-23 06:30:23,951][00397] Num frames 3300... +[2023-07-23 06:30:24,133][00397] Num frames 3400... +[2023-07-23 06:30:24,315][00397] Num frames 3500... +[2023-07-23 06:30:24,499][00397] Num frames 3600... +[2023-07-23 06:30:24,574][00397] Avg episode rewards: #0: 31.356, true rewards: #0: 12.023 +[2023-07-23 06:30:24,576][00397] Avg episode reward: 31.356, avg true_objective: 12.023 +[2023-07-23 06:30:24,759][00397] Num frames 3700... +[2023-07-23 06:30:24,957][00397] Num frames 3800... +[2023-07-23 06:30:25,143][00397] Num frames 3900... +[2023-07-23 06:30:25,324][00397] Num frames 4000... +[2023-07-23 06:30:25,505][00397] Num frames 4100... +[2023-07-23 06:30:25,691][00397] Num frames 4200... +[2023-07-23 06:30:25,874][00397] Num frames 4300... +[2023-07-23 06:30:26,066][00397] Num frames 4400... +[2023-07-23 06:30:26,131][00397] Avg episode rewards: #0: 27.760, true rewards: #0: 11.010 +[2023-07-23 06:30:26,132][00397] Avg episode reward: 27.760, avg true_objective: 11.010 +[2023-07-23 06:30:26,312][00397] Num frames 4500... +[2023-07-23 06:30:26,467][00397] Num frames 4600... +[2023-07-23 06:30:26,596][00397] Num frames 4700... +[2023-07-23 06:30:26,723][00397] Num frames 4800... +[2023-07-23 06:30:26,851][00397] Num frames 4900... +[2023-07-23 06:30:26,985][00397] Num frames 5000... +[2023-07-23 06:30:27,115][00397] Num frames 5100... +[2023-07-23 06:30:27,244][00397] Num frames 5200... +[2023-07-23 06:30:27,368][00397] Num frames 5300... +[2023-07-23 06:30:27,501][00397] Num frames 5400... +[2023-07-23 06:30:27,641][00397] Num frames 5500... +[2023-07-23 06:30:27,782][00397] Num frames 5600... +[2023-07-23 06:30:27,918][00397] Num frames 5700... +[2023-07-23 06:30:28,035][00397] Avg episode rewards: #0: 28.496, true rewards: #0: 11.496 +[2023-07-23 06:30:28,036][00397] Avg episode reward: 28.496, avg true_objective: 11.496 +[2023-07-23 06:30:28,108][00397] Num frames 5800... +[2023-07-23 06:30:28,235][00397] Num frames 5900... +[2023-07-23 06:30:28,370][00397] Num frames 6000... +[2023-07-23 06:30:28,506][00397] Num frames 6100... +[2023-07-23 06:30:28,639][00397] Num frames 6200... +[2023-07-23 06:30:28,763][00397] Num frames 6300... +[2023-07-23 06:30:28,893][00397] Num frames 6400... +[2023-07-23 06:30:29,028][00397] Num frames 6500... +[2023-07-23 06:30:29,168][00397] Num frames 6600... +[2023-07-23 06:30:29,321][00397] Avg episode rewards: #0: 27.122, true rewards: #0: 11.122 +[2023-07-23 06:30:29,323][00397] Avg episode reward: 27.122, avg true_objective: 11.122 +[2023-07-23 06:30:29,359][00397] Num frames 6700... +[2023-07-23 06:30:29,491][00397] Num frames 6800... +[2023-07-23 06:30:29,622][00397] Num frames 6900... +[2023-07-23 06:30:29,746][00397] Num frames 7000... +[2023-07-23 06:30:29,877][00397] Num frames 7100... +[2023-07-23 06:30:30,013][00397] Num frames 7200... +[2023-07-23 06:30:30,138][00397] Num frames 7300... +[2023-07-23 06:30:30,269][00397] Num frames 7400... +[2023-07-23 06:30:30,394][00397] Num frames 7500... +[2023-07-23 06:30:30,526][00397] Num frames 7600... +[2023-07-23 06:30:30,659][00397] Num frames 7700... +[2023-07-23 06:30:30,793][00397] Num frames 7800... +[2023-07-23 06:30:30,929][00397] Num frames 7900... +[2023-07-23 06:30:31,056][00397] Num frames 8000... +[2023-07-23 06:30:31,182][00397] Num frames 8100... +[2023-07-23 06:30:31,300][00397] Avg episode rewards: #0: 27.921, true rewards: #0: 11.636 +[2023-07-23 06:30:31,302][00397] Avg episode reward: 27.921, avg true_objective: 11.636 +[2023-07-23 06:30:31,371][00397] Num frames 8200... +[2023-07-23 06:30:31,498][00397] Num frames 8300... +[2023-07-23 06:30:31,635][00397] Num frames 8400... +[2023-07-23 06:30:31,760][00397] Num frames 8500... +[2023-07-23 06:30:31,887][00397] Num frames 8600... +[2023-07-23 06:30:32,019][00397] Num frames 8700... +[2023-07-23 06:30:32,142][00397] Num frames 8800... +[2023-07-23 06:30:32,269][00397] Num frames 8900... +[2023-07-23 06:30:32,395][00397] Avg episode rewards: #0: 26.946, true rewards: #0: 11.196 +[2023-07-23 06:30:32,397][00397] Avg episode reward: 26.946, avg true_objective: 11.196 +[2023-07-23 06:30:32,452][00397] Num frames 9000... +[2023-07-23 06:30:32,586][00397] Num frames 9100... +[2023-07-23 06:30:32,710][00397] Num frames 9200... +[2023-07-23 06:30:32,842][00397] Num frames 9300... +[2023-07-23 06:30:32,976][00397] Num frames 9400... +[2023-07-23 06:30:33,101][00397] Num frames 9500... +[2023-07-23 06:30:33,225][00397] Num frames 9600... +[2023-07-23 06:30:33,356][00397] Num frames 9700... +[2023-07-23 06:30:33,481][00397] Num frames 9800... +[2023-07-23 06:30:33,616][00397] Num frames 9900... +[2023-07-23 06:30:33,750][00397] Avg episode rewards: #0: 26.843, true rewards: #0: 11.066 +[2023-07-23 06:30:33,751][00397] Avg episode reward: 26.843, avg true_objective: 11.066 +[2023-07-23 06:30:33,808][00397] Num frames 10000... +[2023-07-23 06:30:33,934][00397] Num frames 10100... +[2023-07-23 06:30:34,063][00397] Num frames 10200... +[2023-07-23 06:30:34,192][00397] Num frames 10300... +[2023-07-23 06:30:34,317][00397] Num frames 10400... +[2023-07-23 06:30:34,446][00397] Num frames 10500... +[2023-07-23 06:30:34,580][00397] Num frames 10600... +[2023-07-23 06:30:34,679][00397] Avg episode rewards: #0: 25.331, true rewards: #0: 10.631 +[2023-07-23 06:30:34,680][00397] Avg episode reward: 25.331, avg true_objective: 10.631 +[2023-07-23 06:31:43,492][00397] Replay video saved to /content/train_dir/default_experiment/replay.mp4!