Legend005's picture
First Push
74bf98f verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2148943841457367,
"min": 0.2148943841457367,
"max": 1.3386504650115967,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 6398.6953125,
"min": 6398.6953125,
"max": 40609.30078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989922.0,
"min": 29952.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989922.0,
"min": 29952.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5728213787078857,
"min": -0.08288775384426117,
"max": 0.6978721022605896,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 157.52587890625,
"min": -19.975948333740234,
"max": 198.1956787109375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01156570389866829,
"min": -0.04491960629820824,
"max": 0.5567572712898254,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.1805684566497803,
"min": -12.802087783813477,
"max": 131.95147705078125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06910566072231934,
"min": 0.06428759624198058,
"max": 0.07498097757964466,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9674792501124707,
"min": 0.5248668430575126,
"max": 1.053749337457417,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015646130343416838,
"min": 0.0009753062071172911,
"max": 0.016817470663113007,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21904582480783574,
"min": 0.011703674485407493,
"max": 0.2522620599466951,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5599260514857115e-06,
"min": 7.5599260514857115e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010583896472079996,
"min": 0.00010583896472079996,
"max": 0.0035089070303643997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251994285714286,
"min": 0.10251994285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352792,
"min": 1.3886848,
"max": 2.5696356000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026174229142857135,
"min": 0.00026174229142857135,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003664392079999999,
"min": 0.003664392079999999,
"max": 0.11698659644,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016733918339014053,
"min": 0.016733918339014053,
"max": 0.5846266150474548,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23427484929561615,
"min": 0.23427484929561615,
"max": 4.092386245727539,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 311.79787234042556,
"min": 290.2857142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29309.0,
"min": 15984.0,
"max": 32463.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6215677235716133,
"min": -1.0000000521540642,
"max": 1.709314273084913,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 150.80579829216003,
"min": -29.74420166760683,
"max": 179.47799867391586,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6215677235716133,
"min": -1.0000000521540642,
"max": 1.709314273084913,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 150.80579829216003,
"min": -29.74420166760683,
"max": 179.47799867391586,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05444995630363513,
"min": 0.052327333814441906,
"max": 12.250981282442808,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.063845936238067,
"min": 5.063845936238067,
"max": 196.01570051908493,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748335716",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748337998"
},
"total": 2281.415340696,
"count": 1,
"self": 0.47622666400002345,
"children": {
"run_training.setup": {
"total": 0.020646119000048202,
"count": 1,
"self": 0.020646119000048202
},
"TrainerController.start_learning": {
"total": 2280.918467913,
"count": 1,
"self": 1.2549265739744442,
"children": {
"TrainerController._reset_env": {
"total": 2.314429399999881,
"count": 1,
"self": 2.314429399999881
},
"TrainerController.advance": {
"total": 2277.2665988450262,
"count": 64308,
"self": 1.3288470261200018,
"children": {
"env_step": {
"total": 1617.1538337620004,
"count": 64308,
"self": 1473.4455414089152,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.99040322702422,
"count": 64308,
"self": 4.42058182801884,
"children": {
"TorchPolicy.evaluate": {
"total": 138.56982139900538,
"count": 62554,
"self": 138.56982139900538
}
}
},
"workers": {
"total": 0.7178891260609817,
"count": 64308,
"self": 0.0,
"children": {
"worker_root": {
"total": 2276.1095034649675,
"count": 64308,
"is_parallel": true,
"self": 907.9736187839444,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019760710001719417,
"count": 1,
"is_parallel": true,
"self": 0.0006298550001702097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001346216000001732,
"count": 8,
"is_parallel": true,
"self": 0.001346216000001732
}
}
},
"UnityEnvironment.step": {
"total": 0.04968630400026086,
"count": 1,
"is_parallel": true,
"self": 0.0005314970003382768,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045573299985335325,
"count": 1,
"is_parallel": true,
"self": 0.00045573299985335325
},
"communicator.exchange": {
"total": 0.047139161999893986,
"count": 1,
"is_parallel": true,
"self": 0.047139161999893986
},
"steps_from_proto": {
"total": 0.0015599120001752453,
"count": 1,
"is_parallel": true,
"self": 0.0003219290001652553,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00123798300000999,
"count": 8,
"is_parallel": true,
"self": 0.00123798300000999
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1368.135884681023,
"count": 64307,
"is_parallel": true,
"self": 30.83816747100127,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.572130223010845,
"count": 64307,
"is_parallel": true,
"self": 22.572130223010845
},
"communicator.exchange": {
"total": 1222.5201133649452,
"count": 64307,
"is_parallel": true,
"self": 1222.5201133649452
},
"steps_from_proto": {
"total": 92.20547362206571,
"count": 64307,
"is_parallel": true,
"self": 18.087119729541428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.11835389252428,
"count": 514456,
"is_parallel": true,
"self": 74.11835389252428
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 658.7839180569058,
"count": 64308,
"self": 2.474764453014359,
"children": {
"process_trajectory": {
"total": 124.77488808389307,
"count": 64308,
"self": 124.57553375389307,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19935433000000558,
"count": 2,
"self": 0.19935433000000558
}
}
},
"_update_policy": {
"total": 531.5342655199984,
"count": 455,
"self": 292.96134459393306,
"children": {
"TorchPPOOptimizer.update": {
"total": 238.5729209260653,
"count": 22839,
"self": 238.5729209260653
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.319992386735976e-07,
"count": 1,
"self": 9.319992386735976e-07
},
"TrainerController._save_models": {
"total": 0.08251216200005729,
"count": 1,
"self": 0.0015813310001249192,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08093083099993237,
"count": 1,
"self": 0.08093083099993237
}
}
}
}
}
}
}