SolomonBlue's picture
First Push
18f33de verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35770946741104126,
"min": 0.35770946741104126,
"max": 1.4576085805892944,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10645.43359375,
"min": 10645.43359375,
"max": 44218.015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5462323427200317,
"min": -0.126254603266716,
"max": 0.6217602491378784,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 151.30636596679688,
"min": -30.301105499267578,
"max": 177.82342529296875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.022888317704200745,
"min": -0.022888317704200745,
"max": 0.3432686924934387,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.34006404876709,
"min": -6.34006404876709,
"max": 81.35468292236328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0706488924659919,
"min": 0.06513877789795149,
"max": 0.07167538732652823,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9890844945238866,
"min": 0.49750003069217696,
"max": 1.0505044340291836,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01573239199660493,
"min": 0.00015326874495410285,
"max": 0.01573239199660493,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.220253487952469,
"min": 0.001839224939449234,
"max": 0.220253487952469,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.428633238107144e-06,
"min": 7.428633238107144e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010400086533350001,
"min": 0.00010400086533350001,
"max": 0.0031167793610735995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247617857142859,
"min": 0.10247617857142859,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346665000000003,
"min": 1.3691136000000002,
"max": 2.4007851000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025737023928571444,
"min": 0.00025737023928571444,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003603183350000002,
"min": 0.003603183350000002,
"max": 0.10390874736,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0078036305494606495,
"min": 0.0078036305494606495,
"max": 0.4196743667125702,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10925082862377167,
"min": 0.10925082862377167,
"max": 2.937720537185669,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 347.0,
"min": 304.5268817204301,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29495.0,
"min": 15984.0,
"max": 32588.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6059270378421335,
"min": -1.0000000521540642,
"max": 1.6739612691184527,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 136.50379821658134,
"min": -32.000001668930054,
"max": 155.6783980280161,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6059270378421335,
"min": -1.0000000521540642,
"max": 1.6739612691184527,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 136.50379821658134,
"min": -32.000001668930054,
"max": 155.6783980280161,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027981048805454253,
"min": 0.025285319663942242,
"max": 9.194417202845216,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.3783891484636115,
"min": 2.3515347287466284,
"max": 147.11067524552345,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742161629",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742164054"
},
"total": 2424.462341949001,
"count": 1,
"self": 0.4908497020005598,
"children": {
"run_training.setup": {
"total": 0.02141954099988652,
"count": 1,
"self": 0.02141954099988652
},
"TrainerController.start_learning": {
"total": 2423.9500727060004,
"count": 1,
"self": 1.6165560220370025,
"children": {
"TrainerController._reset_env": {
"total": 2.440285273999507,
"count": 1,
"self": 2.440285273999507
},
"TrainerController.advance": {
"total": 2419.7949695859643,
"count": 63687,
"self": 1.5972076888911033,
"children": {
"env_step": {
"total": 1676.3387588160758,
"count": 63687,
"self": 1504.8989421069846,
"children": {
"SubprocessEnvManager._take_step": {
"total": 170.54626897312846,
"count": 63687,
"self": 5.077192054063744,
"children": {
"TorchPolicy.evaluate": {
"total": 165.46907691906472,
"count": 62559,
"self": 165.46907691906472
}
}
},
"workers": {
"total": 0.8935477359627839,
"count": 63687,
"self": 0.0,
"children": {
"worker_root": {
"total": 2418.1762590098488,
"count": 63687,
"is_parallel": true,
"self": 1037.361536041727,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023863859996708925,
"count": 1,
"is_parallel": true,
"self": 0.0007600710014230572,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016263149982478353,
"count": 8,
"is_parallel": true,
"self": 0.0016263149982478353
}
}
},
"UnityEnvironment.step": {
"total": 0.072759968000355,
"count": 1,
"is_parallel": true,
"self": 0.000620430999333621,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005245550000836374,
"count": 1,
"is_parallel": true,
"self": 0.0005245550000836374
},
"communicator.exchange": {
"total": 0.06979246200080524,
"count": 1,
"is_parallel": true,
"self": 0.06979246200080524
},
"steps_from_proto": {
"total": 0.0018225200001324993,
"count": 1,
"is_parallel": true,
"self": 0.00041465000140306074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014078699987294385,
"count": 8,
"is_parallel": true,
"self": 0.0014078699987294385
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1380.8147229681217,
"count": 63686,
"is_parallel": true,
"self": 34.89657265520509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.515771951015267,
"count": 63686,
"is_parallel": true,
"self": 24.515771951015267
},
"communicator.exchange": {
"total": 1216.4732157398003,
"count": 63686,
"is_parallel": true,
"self": 1216.4732157398003
},
"steps_from_proto": {
"total": 104.9291626221011,
"count": 63686,
"is_parallel": true,
"self": 21.722518409297663,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.20664421280344,
"count": 509488,
"is_parallel": true,
"self": 83.20664421280344
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 741.8590030809974,
"count": 63687,
"self": 2.9273186370937765,
"children": {
"process_trajectory": {
"total": 136.82329280391423,
"count": 63687,
"self": 136.6093289569153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21396384699892224,
"count": 2,
"self": 0.21396384699892224
}
}
},
"_update_policy": {
"total": 602.1083916399893,
"count": 433,
"self": 330.3866990549541,
"children": {
"TorchPPOOptimizer.update": {
"total": 271.72169258503527,
"count": 22872,
"self": 271.72169258503527
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1819993233075365e-06,
"count": 1,
"self": 1.1819993233075365e-06
},
"TrainerController._save_models": {
"total": 0.0982606420002412,
"count": 1,
"self": 0.001597373000549851,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09666326899969135,
"count": 1,
"self": 0.09666326899969135
}
}
}
}
}
}
}