poca-SoccerTwos / run_logs /timers.json
shahafw's picture
First Push
4c27279
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.178101062774658,
"min": 2.8673300743103027,
"max": 3.1946921348571777,
"count": 167
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 63053.52734375,
"min": 17664.4375,
"max": 132405.703125,
"count": 167
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 840.8333333333334,
"min": 456.1666666666667,
"max": 999.0,
"count": 167
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20180.0,
"min": 3996.0,
"max": 31968.0,
"count": 167
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1177.422018400382,
"min": 1175.0434706461886,
"max": 1186.7360154851992,
"count": 128
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4709.688073601528,
"min": 2350.5841092524156,
"max": 11847.773513894224,
"count": 128
},
"SoccerTwos.Step.mean": {
"value": 6659170.0,
"min": 4999844.0,
"max": 6659170.0,
"count": 167
},
"SoccerTwos.Step.sum": {
"value": 6659170.0,
"min": 4999844.0,
"max": 6659170.0,
"count": 167
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.004412549082189798,
"min": -0.02612762711942196,
"max": 0.005425532814115286,
"count": 167
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.05295059084892273,
"min": -0.3348146677017212,
"max": 0.05818541347980499,
"count": 167
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.004174693953245878,
"min": -0.0269156526774168,
"max": 0.0038520265370607376,
"count": 167
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.05009632557630539,
"min": -0.3421228528022766,
"max": 0.0568871945142746,
"count": 167
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 167
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 167
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.15826666293044886,
"min": -1.0,
"max": 0.2853714312825884,
"count": 167
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.8991999551653862,
"min": -8.0,
"max": 3.995200037956238,
"count": 167
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.15826666293044886,
"min": -1.0,
"max": 0.2853714312825884,
"count": 167
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.8991999551653862,
"min": -8.0,
"max": 3.995200037956238,
"count": 167
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 167
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 167
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018589642740941294,
"min": 0.010128085433583086,
"max": 0.02375313483935315,
"count": 77
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018589642740941294,
"min": 0.010128085433583086,
"max": 0.02375313483935315,
"count": 77
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0005466308672718393,
"min": 1.3286295673727486e-06,
"max": 0.005060140167673429,
"count": 77
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0005466308672718393,
"min": 1.3286295673727486e-06,
"max": 0.005060140167673429,
"count": 77
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.00054686176784647,
"min": 1.457611718554593e-06,
"max": 0.0050610301472867525,
"count": 77
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.00054686176784647,
"min": 1.457611718554593e-06,
"max": 0.0050610301472867525,
"count": 77
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 77
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 77
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 77
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 77
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 77
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 77
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688200444",
"python_version": "3.9.16 (main, May 15 2023, 23:46:34) \n[GCC 11.2.0]",
"command_line_arguments": "/home/ubuntu/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688213300"
},
"total": 12856.376549971,
"count": 1,
"self": 0.01999147100104892,
"children": {
"run_training.setup": {
"total": 0.02498179999999195,
"count": 1,
"self": 0.02498179999999195
},
"TrainerController.start_learning": {
"total": 12856.3315767,
"count": 1,
"self": 4.982563114072036,
"children": {
"TrainerController._reset_env": {
"total": 3.6307337830013466,
"count": 10,
"self": 3.6307337830013466
},
"TrainerController.advance": {
"total": 12846.941077976926,
"count": 108648,
"self": 5.051357533362534,
"children": {
"env_step": {
"total": 4003.7427598878726,
"count": 108648,
"self": 3062.470342485636,
"children": {
"SubprocessEnvManager._take_step": {
"total": 938.7875249652064,
"count": 108648,
"self": 24.152122987769076,
"children": {
"TorchPolicy.evaluate": {
"total": 914.6354019774374,
"count": 215758,
"self": 914.6354019774374
}
}
},
"workers": {
"total": 2.4848924370297283,
"count": 108648,
"self": 0.0,
"children": {
"worker_root": {
"total": 12720.231992268977,
"count": 108648,
"is_parallel": true,
"self": 10318.803902657073,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00453893599998878,
"count": 2,
"is_parallel": true,
"self": 0.002252499999997326,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002286435999991454,
"count": 8,
"is_parallel": true,
"self": 0.002286435999991454
}
}
},
"UnityEnvironment.step": {
"total": 0.03333030799998937,
"count": 1,
"is_parallel": true,
"self": 0.0006778139999852328,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005530499999935046,
"count": 1,
"is_parallel": true,
"self": 0.0005530499999935046
},
"communicator.exchange": {
"total": 0.02987785300000212,
"count": 1,
"is_parallel": true,
"self": 0.02987785300000212
},
"steps_from_proto": {
"total": 0.002221591000008516,
"count": 2,
"is_parallel": true,
"self": 0.0004955029999820226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017260880000264933,
"count": 8,
"is_parallel": true,
"self": 0.0017260880000264933
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.019919461000299066,
"count": 18,
"is_parallel": true,
"self": 0.004278941994286356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01564051900601271,
"count": 72,
"is_parallel": true,
"self": 0.01564051900601271
}
}
},
"UnityEnvironment.step": {
"total": 2401.408170150902,
"count": 108647,
"is_parallel": true,
"self": 116.08875030647414,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 50.96959109091962,
"count": 108647,
"is_parallel": true,
"self": 50.96959109091962
},
"communicator.exchange": {
"total": 1643.7480166697937,
"count": 108647,
"is_parallel": true,
"self": 1643.7480166697937
},
"steps_from_proto": {
"total": 590.6018120837148,
"count": 217294,
"is_parallel": true,
"self": 123.40401506600534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 467.19779701770943,
"count": 869176,
"is_parallel": true,
"self": 467.19779701770943
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8838.14696055569,
"count": 108648,
"self": 36.00129477788505,
"children": {
"process_trajectory": {
"total": 746.8148805558056,
"count": 108648,
"self": 745.3623481188051,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4525324370004569,
"count": 4,
"self": 1.4525324370004569
}
}
},
"_update_policy": {
"total": 8055.3307852220005,
"count": 78,
"self": 462.32130745402173,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7593.009477767979,
"count": 2343,
"self": 7593.009477767979
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.282999048475176e-06,
"count": 1,
"self": 2.282999048475176e-06
},
"TrainerController._save_models": {
"total": 0.7771995430011884,
"count": 1,
"self": 0.014882686000419199,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7623168570007692,
"count": 1,
"self": 0.7623168570007692
}
}
}
}
}
}
}