poca-SoccerTwos / run_logs /timers.json
GillesEverling's picture
First Push
0c8b7c3
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.601577877998352,
"min": 1.5636346340179443,
"max": 3.295750856399536,
"count": 1003
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32339.060546875,
"min": 15664.6953125,
"max": 112831.828125,
"count": 1003
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 58.627906976744185,
"min": 36.02962962962963,
"max": 999.0,
"count": 1003
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20168.0,
"min": 14004.0,
"max": 26060.0,
"count": 1003
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1623.1869950634712,
"min": 1195.8593302065567,
"max": 1671.6834722475915,
"count": 983
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 279188.16315091704,
"min": 2392.3790637106417,
"max": 423211.87002880627,
"count": 983
},
"SoccerTwos.Step.mean": {
"value": 10029706.0,
"min": 9048.0,
"max": 10029706.0,
"count": 1003
},
"SoccerTwos.Step.sum": {
"value": 10029706.0,
"min": 9048.0,
"max": 10029706.0,
"count": 1003
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.016616741195321083,
"min": -0.11547587066888809,
"max": 0.19086655974388123,
"count": 1003
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.858079433441162,
"min": -23.788028717041016,
"max": 29.637088775634766,
"count": 1003
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.013339678756892681,
"min": -0.11674820631742477,
"max": 0.19290082156658173,
"count": 1003
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.2944247722625732,
"min": -24.05013084411621,
"max": 29.790895462036133,
"count": 1003
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1003
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1003
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.01914418436760126,
"min": -0.5213866670926411,
"max": 0.6197111085057259,
"count": 1003
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.292799711227417,
"min": -54.46280014514923,
"max": 54.36500072479248,
"count": 1003
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.01914418436760126,
"min": -0.5213866670926411,
"max": 0.6197111085057259,
"count": 1003
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.292799711227417,
"min": -54.46280014514923,
"max": 54.36500072479248,
"count": 1003
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1003
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1003
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015971961586425703,
"min": 0.011165675827457259,
"max": 0.023715842918803295,
"count": 483
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015971961586425703,
"min": 0.011165675827457259,
"max": 0.023715842918803295,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1058881791929404,
"min": 9.719703848531936e-06,
"max": 0.12382420475284259,
"count": 483
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1058881791929404,
"min": 9.719703848531936e-06,
"max": 0.12382420475284259,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10714588115612665,
"min": 1.0255586797332702e-05,
"max": 0.12667151143153507,
"count": 483
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10714588115612665,
"min": 1.0255586797332702e-05,
"max": 0.12667151143153507,
"count": 483
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 483
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 483
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 483
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 483
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 483
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693248900",
"python_version": "3.9.17 | packaged by conda-forge | (main, Aug 10 2023, 07:02:31) \n[GCC 12.3.0]",
"command_line_arguments": "/home/webdev/mambaforge/envs/HFU7/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693277931"
},
"total": 29031.620333282,
"count": 1,
"self": 0.05287812399910763,
"children": {
"run_training.setup": {
"total": 0.009515609999652952,
"count": 1,
"self": 0.009515609999652952
},
"TrainerController.start_learning": {
"total": 29031.557939548,
"count": 1,
"self": 18.175838661936723,
"children": {
"TrainerController._reset_env": {
"total": 2.156283196993172,
"count": 51,
"self": 2.156283196993172
},
"TrainerController.advance": {
"total": 29011.01186484007,
"count": 688680,
"self": 18.19091983104954,
"children": {
"env_step": {
"total": 10782.167248136095,
"count": 688680,
"self": 8597.337880749792,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2174.479044507286,
"count": 688680,
"self": 85.00831666234353,
"children": {
"TorchPolicy.evaluate": {
"total": 2089.4707278449423,
"count": 1264134,
"self": 2089.4707278449423
}
}
},
"workers": {
"total": 10.350322879016858,
"count": 688680,
"self": 0.0,
"children": {
"worker_root": {
"total": 28990.45162019948,
"count": 688680,
"is_parallel": true,
"self": 22156.694621427374,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023145959994508303,
"count": 2,
"is_parallel": true,
"self": 0.0005671239996445365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017474719998062938,
"count": 8,
"is_parallel": true,
"self": 0.0017474719998062938
}
}
},
"UnityEnvironment.step": {
"total": 0.025921381000443944,
"count": 1,
"is_parallel": true,
"self": 0.0005362040001273272,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045210500047687674,
"count": 1,
"is_parallel": true,
"self": 0.00045210500047687674
},
"communicator.exchange": {
"total": 0.02323471999989124,
"count": 1,
"is_parallel": true,
"self": 0.02323471999989124
},
"steps_from_proto": {
"total": 0.0016983519999484997,
"count": 2,
"is_parallel": true,
"self": 0.00039471499894716544,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013036370010013343,
"count": 8,
"is_parallel": true,
"self": 0.0013036370010013343
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6833.672253721114,
"count": 688679,
"is_parallel": true,
"self": 364.2248479545833,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 255.25064751194077,
"count": 688679,
"is_parallel": true,
"self": 255.25064751194077
},
"communicator.exchange": {
"total": 5147.227819189504,
"count": 688679,
"is_parallel": true,
"self": 5147.227819189504
},
"steps_from_proto": {
"total": 1066.9689390650865,
"count": 1377358,
"is_parallel": true,
"self": 238.2093864804374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 828.7595525846491,
"count": 5509432,
"is_parallel": true,
"self": 828.7595525846491
}
}
}
}
},
"steps_from_proto": {
"total": 0.0847450509927512,
"count": 100,
"is_parallel": true,
"self": 0.018489625949769106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0662554250429821,
"count": 400,
"is_parallel": true,
"self": 0.0662554250429821
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 18210.653696872927,
"count": 688680,
"self": 131.16370465547516,
"children": {
"process_trajectory": {
"total": 2630.6715978244383,
"count": 688680,
"self": 2626.737615402437,
"children": {
"RLTrainer._checkpoint": {
"total": 3.9339824220014634,
"count": 20,
"self": 3.9339824220014634
}
}
},
"_update_policy": {
"total": 15448.818394393013,
"count": 484,
"self": 1496.9449924147339,
"children": {
"TorchPOCAOptimizer.update": {
"total": 13951.873401978279,
"count": 14503,
"self": 13951.873401978279
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1840020306408405e-06,
"count": 1,
"self": 1.1840020306408405e-06
},
"TrainerController._save_models": {
"total": 0.21395166499860352,
"count": 1,
"self": 0.0018202789942733943,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21213138600433012,
"count": 1,
"self": 0.21213138600433012
}
}
}
}
}
}
}