ppo-Huggy / run_logs /timers.json
buildthemachine's picture
Huggy
07f572c verified
raw
history blame
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4065760374069214,
"min": 1.4065760374069214,
"max": 1.428511142730713,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71147.4296875,
"min": 69028.921875,
"max": 77202.765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.0867924528302,
"min": 73.90419161676647,
"max": 395.984126984127,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48806.0,
"min": 48806.0,
"max": 50111.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999961.0,
"min": 49270.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999961.0,
"min": 49270.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4375555515289307,
"min": 0.09388220310211182,
"max": 2.483473300933838,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1291.9044189453125,
"min": 11.735275268554688,
"max": 1600.18994140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7814017796291495,
"min": 1.7683164225816728,
"max": 3.9941716484255174,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2004.1429432034492,
"min": 221.03955282270908,
"max": 2479.044963002205,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7814017796291495,
"min": 1.7683164225816728,
"max": 3.9941716484255174,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2004.1429432034492,
"min": 221.03955282270908,
"max": 2479.044963002205,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018248013691562745,
"min": 0.013711116932669635,
"max": 0.022420570952817798,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.054744041074688235,
"min": 0.02742223386533927,
"max": 0.06414937443235734,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.048517807407511604,
"min": 0.021804422854135433,
"max": 0.06086087574561437,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1455534222225348,
"min": 0.043608845708270866,
"max": 0.1825201090425253,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.531598822833344e-06,
"min": 3.531598822833344e-06,
"max": 0.00029537647654117493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0594796468500032e-05,
"min": 1.0594796468500032e-05,
"max": 0.0008440555686481499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117716666666669,
"min": 0.10117716666666669,
"max": 0.198458825,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30353150000000007,
"min": 0.2075393,
"max": 0.58135185,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.874061666666682e-05,
"min": 6.874061666666682e-05,
"max": 0.0049230953675,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020622185000000046,
"min": 0.00020622185000000046,
"max": 0.014069457314999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722822258",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722826613"
},
"total": 4354.473612222,
"count": 1,
"self": 0.8594241069995405,
"children": {
"run_training.setup": {
"total": 0.07628237900007662,
"count": 1,
"self": 0.07628237900007662
},
"TrainerController.start_learning": {
"total": 4353.5379057360005,
"count": 1,
"self": 7.5291313938068924,
"children": {
"TrainerController._reset_env": {
"total": 3.6978101610000067,
"count": 1,
"self": 3.6978101610000067
},
"TrainerController.advance": {
"total": 4342.164813691193,
"count": 233245,
"self": 8.071735377140612,
"children": {
"env_step": {
"total": 2797.072742511087,
"count": 233245,
"self": 2348.3644894478844,
"children": {
"SubprocessEnvManager._take_step": {
"total": 443.5528334081024,
"count": 233245,
"self": 28.498919020094718,
"children": {
"TorchPolicy.evaluate": {
"total": 415.0539143880077,
"count": 223102,
"self": 415.0539143880077
}
}
},
"workers": {
"total": 5.155419655100445,
"count": 233245,
"self": 0.0,
"children": {
"worker_root": {
"total": 4340.55038467896,
"count": 233245,
"is_parallel": true,
"self": 2482.210087254043,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009973429999945438,
"count": 1,
"is_parallel": true,
"self": 0.0002543230000355834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007430199999589604,
"count": 2,
"is_parallel": true,
"self": 0.0007430199999589604
}
}
},
"UnityEnvironment.step": {
"total": 0.05446826299998975,
"count": 1,
"is_parallel": true,
"self": 0.00042178399996828375,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000201231000005464,
"count": 1,
"is_parallel": true,
"self": 0.000201231000005464
},
"communicator.exchange": {
"total": 0.05300433399997928,
"count": 1,
"is_parallel": true,
"self": 0.05300433399997928
},
"steps_from_proto": {
"total": 0.0008409140000367188,
"count": 1,
"is_parallel": true,
"self": 0.00022744299997157214,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006134710000651467,
"count": 2,
"is_parallel": true,
"self": 0.0006134710000651467
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1858.3402974249168,
"count": 233244,
"is_parallel": true,
"self": 59.78132670580612,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 105.87364292700363,
"count": 233244,
"is_parallel": true,
"self": 105.87364292700363
},
"communicator.exchange": {
"total": 1559.8961288641165,
"count": 233244,
"is_parallel": true,
"self": 1559.8961288641165
},
"steps_from_proto": {
"total": 132.78919892799047,
"count": 233244,
"is_parallel": true,
"self": 43.25368482229237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.5355141056981,
"count": 466488,
"is_parallel": true,
"self": 89.5355141056981
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1537.0203358029646,
"count": 233245,
"self": 12.012931510013914,
"children": {
"process_trajectory": {
"total": 243.24202211395027,
"count": 233245,
"self": 241.88630355895089,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3557185549993847,
"count": 10,
"self": 1.3557185549993847
}
}
},
"_update_policy": {
"total": 1281.7653821790004,
"count": 97,
"self": 332.48670919500364,
"children": {
"TorchPPOOptimizer.update": {
"total": 949.2786729839968,
"count": 2910,
"self": 949.2786729839968
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4080005712457933e-06,
"count": 1,
"self": 1.4080005712457933e-06
},
"TrainerController._save_models": {
"total": 0.14614908200019272,
"count": 1,
"self": 0.0030084150002949173,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1431406669998978,
"count": 1,
"self": 0.1431406669998978
}
}
}
}
}
}
}