| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.6736977100372314, | |
| "min": 0.6198792457580566, | |
| "max": 1.4141618013381958, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 20286.384765625, | |
| "min": 18774.90234375, | |
| "max": 42900.01171875, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989978.0, | |
| "min": 29952.0, | |
| "max": 989978.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989978.0, | |
| "min": 29952.0, | |
| "max": 989978.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.02592300996184349, | |
| "min": -0.10772595554590225, | |
| "max": 0.02592300996184349, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 6.402983665466309, | |
| "min": -25.961956024169922, | |
| "max": 6.402983665466309, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.02117227576673031, | |
| "min": 0.004633665084838867, | |
| "max": 0.6760766506195068, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 5.229552268981934, | |
| "min": 1.1259806156158447, | |
| "max": 160.23016357421875, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06769739676742026, | |
| "min": 0.06341377033498882, | |
| "max": 0.07244526982478579, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9477635547438836, | |
| "min": 0.49909575964222486, | |
| "max": 1.070488970690206, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.00458868943730015, | |
| "min": 0.00011175098238129912, | |
| "max": 0.023822861587580835, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.0642416521222021, | |
| "min": 0.0015645137533381876, | |
| "max": 0.16676003111306584, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.265369006814283e-06, | |
| "min": 7.265369006814283e-06, | |
| "max": 0.00029515063018788575, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00010171516609539996, | |
| "min": 0.00010171516609539996, | |
| "max": 0.0033711007762997993, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10242175714285714, | |
| "min": 0.10242175714285714, | |
| "max": 0.19838354285714285, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4339046, | |
| "min": 1.3886848, | |
| "max": 2.442973, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0002519335385714285, | |
| "min": 0.0002519335385714285, | |
| "max": 0.00983851593142857, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.0035270695399999986, | |
| "min": 0.0035270695399999986, | |
| "max": 0.11238764998, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.01812569424510002, | |
| "min": 0.01812569424510002, | |
| "max": 0.7999270558357239, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.2537597119808197, | |
| "min": 0.2537597119808197, | |
| "max": 5.599489212036133, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 829.0263157894736, | |
| "min": 811.6111111111111, | |
| "max": 999.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 31503.0, | |
| "min": 15984.0, | |
| "max": 33238.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": -0.14521056668538795, | |
| "min": -1.0000000521540642, | |
| "max": -0.058109141208908775, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": -5.518001534044743, | |
| "min": -30.597001671791077, | |
| "max": -1.9176016598939896, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": -0.14521056668538795, | |
| "min": -1.0000000521540642, | |
| "max": -0.058109141208908775, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": -5.518001534044743, | |
| "min": -30.597001671791077, | |
| "max": -1.9176016598939896, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.15536519685625344, | |
| "min": 0.15536519685625344, | |
| "max": 17.52510759420693, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 5.903877480537631, | |
| "min": 5.506261245813221, | |
| "max": 280.40172150731087, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1722173776", | |
| "python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "1.1.0.dev0", | |
| "mlagents_envs_version": "1.1.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "2.3.1+cu121", | |
| "numpy_version": "1.23.5", | |
| "end_time_seconds": "1722176148" | |
| }, | |
| "total": 2372.5388752540002, | |
| "count": 1, | |
| "self": 0.5332041679998838, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.05503801700024269, | |
| "count": 1, | |
| "self": 0.05503801700024269 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2371.950633069, | |
| "count": 1, | |
| "self": 1.9759006100121042, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 2.142032736999681, | |
| "count": 1, | |
| "self": 2.142032736999681 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2367.739921567988, | |
| "count": 63204, | |
| "self": 2.0344011850661445, | |
| "children": { | |
| "env_step": { | |
| "total": 1678.7388236719694, | |
| "count": 63204, | |
| "self": 1504.6707382740155, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 172.85837494195266, | |
| "count": 63204, | |
| "self": 5.987318446861536, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 166.87105649509112, | |
| "count": 62565, | |
| "self": 166.87105649509112 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 1.2097104560011758, | |
| "count": 63204, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2365.6948671360406, | |
| "count": 63204, | |
| "is_parallel": true, | |
| "self": 1015.012387977114, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0022589320001316082, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.000703372000316449, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0015555599998151592, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0015555599998151592 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.05585919100030878, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0007087430008141382, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0005386119996728667, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005386119996728667 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.050047713999902044, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.050047713999902044 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.004564121999919735, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0030152290000842186, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.001548892999835516, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.001548892999835516 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1350.6824791589265, | |
| "count": 63203, | |
| "is_parallel": true, | |
| "self": 40.62036701064244, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 27.47282384003529, | |
| "count": 63203, | |
| "is_parallel": true, | |
| "self": 27.47282384003529 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1167.1019556301749, | |
| "count": 63203, | |
| "is_parallel": true, | |
| "self": 1167.1019556301749 | |
| }, | |
| "steps_from_proto": { | |
| "total": 115.48733267807393, | |
| "count": 63203, | |
| "is_parallel": true, | |
| "self": 25.00317458384552, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 90.4841580942284, | |
| "count": 505624, | |
| "is_parallel": true, | |
| "self": 90.4841580942284 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 686.9666967109524, | |
| "count": 63204, | |
| "self": 3.582054438896648, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 141.5031872690497, | |
| "count": 63204, | |
| "self": 141.28295461604876, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.22023265300094863, | |
| "count": 2, | |
| "self": 0.22023265300094863 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 541.8814550030061, | |
| "count": 446, | |
| "self": 318.56586804496374, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 223.31558695804233, | |
| "count": 22905, | |
| "self": 223.31558695804233 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.0260000635753386e-06, | |
| "count": 1, | |
| "self": 1.0260000635753386e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.09277712800030713, | |
| "count": 1, | |
| "self": 0.0014536150001731585, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.09132351300013397, | |
| "count": 1, | |
| "self": 0.09132351300013397 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |