ppo-Huggy / run_logs /timers.json
s14pe's picture
Huggy
b66da80 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4082201719284058,
"min": 1.4082026481628418,
"max": 1.4263930320739746,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69739.2890625,
"min": 67939.1015625,
"max": 78673.0078125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.21206896551725,
"min": 79.4437194127243,
"max": 373.7985074626866,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49423.0,
"min": 48699.0,
"max": 50157.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999883.0,
"min": 49845.0,
"max": 1999883.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999883.0,
"min": 49845.0,
"max": 1999883.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4954235553741455,
"min": 0.027569342404603958,
"max": 2.4954235553741455,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1447.345703125,
"min": 3.666722536087036,
"max": 1495.729248046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9726123377166944,
"min": 1.8023125717514439,
"max": 4.001025256758441,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2304.115155875683,
"min": 239.70757204294205,
"max": 2336.1711114645004,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9726123377166944,
"min": 1.8023125717514439,
"max": 4.001025256758441,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2304.115155875683,
"min": 239.70757204294205,
"max": 2336.1711114645004,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01639635819075112,
"min": 0.014087977182255903,
"max": 0.02119220102322288,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049189074572253355,
"min": 0.028175954364511806,
"max": 0.05638963562817025,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06267289308210214,
"min": 0.02219303964326779,
"max": 0.06400606961299976,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1880186792463064,
"min": 0.04438607928653558,
"max": 0.1880186792463064,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.71359876216667e-06,
"min": 3.71359876216667e-06,
"max": 0.00029534475155175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.114079628650001e-05,
"min": 1.114079628650001e-05,
"max": 0.0008442639185786997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123783333333336,
"min": 0.10123783333333336,
"max": 0.19844824999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037135000000001,
"min": 0.2076048000000001,
"max": 0.5814213000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.17678833333334e-05,
"min": 7.17678833333334e-05,
"max": 0.0049225676749999996,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021530365000000022,
"min": 0.00021530365000000022,
"max": 0.014072922869999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709650051",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709655220"
},
"total": 5169.3869960679995,
"count": 1,
"self": 0.6522706989999278,
"children": {
"run_training.setup": {
"total": 0.06626014200003283,
"count": 1,
"self": 0.06626014200003283
},
"TrainerController.start_learning": {
"total": 5168.668465227,
"count": 1,
"self": 9.278705944175272,
"children": {
"TrainerController._reset_env": {
"total": 4.738418195999998,
"count": 1,
"self": 4.738418195999998
},
"TrainerController.advance": {
"total": 5154.520630912824,
"count": 232620,
"self": 10.547140932661932,
"children": {
"env_step": {
"total": 3419.435167479021,
"count": 232620,
"self": 2845.2673726329735,
"children": {
"SubprocessEnvManager._take_step": {
"total": 567.9835163989267,
"count": 232620,
"self": 36.17726019303893,
"children": {
"TorchPolicy.evaluate": {
"total": 531.8062562058877,
"count": 222882,
"self": 531.8062562058877
}
}
},
"workers": {
"total": 6.184278447120846,
"count": 232620,
"self": 0.0,
"children": {
"worker_root": {
"total": 5150.980136523809,
"count": 232620,
"is_parallel": true,
"self": 2865.4825829186157,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001392607000013868,
"count": 1,
"is_parallel": true,
"self": 0.0003804830000149195,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010121239999989484,
"count": 2,
"is_parallel": true,
"self": 0.0010121239999989484
}
}
},
"UnityEnvironment.step": {
"total": 0.041041565000000446,
"count": 1,
"is_parallel": true,
"self": 0.0005038620000163974,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002804239999818492,
"count": 1,
"is_parallel": true,
"self": 0.0002804239999818492
},
"communicator.exchange": {
"total": 0.0392880640000044,
"count": 1,
"is_parallel": true,
"self": 0.0392880640000044
},
"steps_from_proto": {
"total": 0.0009692149999978028,
"count": 1,
"is_parallel": true,
"self": 0.00027270599997564204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006965090000221608,
"count": 2,
"is_parallel": true,
"self": 0.0006965090000221608
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2285.4975536051934,
"count": 232619,
"is_parallel": true,
"self": 77.64235449220405,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 112.5580571999003,
"count": 232619,
"is_parallel": true,
"self": 112.5580571999003
},
"communicator.exchange": {
"total": 1937.8268308849615,
"count": 232619,
"is_parallel": true,
"self": 1937.8268308849615
},
"steps_from_proto": {
"total": 157.4703110281277,
"count": 232619,
"is_parallel": true,
"self": 49.33288081438201,
"children": {
"_process_rank_one_or_two_observation": {
"total": 108.13743021374569,
"count": 465238,
"is_parallel": true,
"self": 108.13743021374569
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1724.5383225011415,
"count": 232620,
"self": 15.75108520005665,
"children": {
"process_trajectory": {
"total": 288.4187498530842,
"count": 232620,
"self": 287.0565956320848,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3621542209994004,
"count": 10,
"self": 1.3621542209994004
}
}
},
"_update_policy": {
"total": 1420.3684874480007,
"count": 97,
"self": 352.1675598019863,
"children": {
"TorchPPOOptimizer.update": {
"total": 1068.2009276460144,
"count": 2910,
"self": 1068.2009276460144
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.174999852082692e-06,
"count": 1,
"self": 1.174999852082692e-06
},
"TrainerController._save_models": {
"total": 0.1307089990004897,
"count": 1,
"self": 0.0033132670005215914,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12739573199996812,
"count": 1,
"self": 0.12739573199996812
}
}
}
}
}
}
}