ppo-Huggy / run_logs /timers.json
photel's picture
Huggy
2cc968c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4088417291641235,
"min": 1.4088417291641235,
"max": 1.4264957904815674,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70351.921875,
"min": 65262.6484375,
"max": 76418.734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.88557213930348,
"min": 76.01694915254237,
"max": 376.14285714285717,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49377.0,
"min": 47394.0,
"max": 50504.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999922.0,
"min": 49693.0,
"max": 1999922.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999922.0,
"min": 49693.0,
"max": 1999922.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4483633041381836,
"min": 0.09402307122945786,
"max": 2.52449369430542,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1476.363037109375,
"min": 11.752883911132812,
"max": 1558.70947265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7619344168041478,
"min": 1.7638482704162597,
"max": 3.9733553009278126,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2268.446453332901,
"min": 220.48103380203247,
"max": 2475.4003524780273,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7619344168041478,
"min": 1.7638482704162597,
"max": 3.9733553009278126,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2268.446453332901,
"min": 220.48103380203247,
"max": 2475.4003524780273,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01873558638811422,
"min": 0.01383289415486312,
"max": 0.02049874672763205,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05620675916434266,
"min": 0.02766578830972624,
"max": 0.05620675916434266,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05769668858912256,
"min": 0.022609764399627842,
"max": 0.05950507236023744,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17309006576736768,
"min": 0.045219528799255684,
"max": 0.1742266144603491,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.202898932399995e-06,
"min": 3.202898932399995e-06,
"max": 0.0002948929517023499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.608696797199985e-06,
"min": 9.608696797199985e-06,
"max": 0.00084273286908905,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106759999999998,
"min": 0.10106759999999998,
"max": 0.19829765000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30320279999999994,
"min": 0.20731065000000004,
"max": 0.58091095,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.32732399999999e-05,
"min": 6.32732399999999e-05,
"max": 0.004915052735000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001898197199999997,
"min": 0.0001898197199999997,
"max": 0.014047456405,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683703516",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683706018"
},
"total": 2502.82116085,
"count": 1,
"self": 0.42645715600019685,
"children": {
"run_training.setup": {
"total": 0.06013412499999049,
"count": 1,
"self": 0.06013412499999049
},
"TrainerController.start_learning": {
"total": 2502.334569569,
"count": 1,
"self": 4.577042138922934,
"children": {
"TrainerController._reset_env": {
"total": 3.5998098569999684,
"count": 1,
"self": 3.5998098569999684
},
"TrainerController.advance": {
"total": 2493.964969120077,
"count": 232206,
"self": 4.6638498483494,
"children": {
"env_step": {
"total": 1960.2833720698932,
"count": 232206,
"self": 1654.767936068854,
"children": {
"SubprocessEnvManager._take_step": {
"total": 302.57932118703764,
"count": 232206,
"self": 16.95787763599617,
"children": {
"TorchPolicy.evaluate": {
"total": 285.62144355104147,
"count": 222632,
"self": 285.62144355104147
}
}
},
"workers": {
"total": 2.9361148140014848,
"count": 232206,
"self": 0.0,
"children": {
"worker_root": {
"total": 2493.764226772004,
"count": 232206,
"is_parallel": true,
"self": 1140.8971100670378,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007921100000203296,
"count": 1,
"is_parallel": true,
"self": 0.00023244999999860738,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005596600000217222,
"count": 2,
"is_parallel": true,
"self": 0.0005596600000217222
}
}
},
"UnityEnvironment.step": {
"total": 0.02954100700003437,
"count": 1,
"is_parallel": true,
"self": 0.0002972800001543874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002186429999255779,
"count": 1,
"is_parallel": true,
"self": 0.0002186429999255779
},
"communicator.exchange": {
"total": 0.028319551999970827,
"count": 1,
"is_parallel": true,
"self": 0.028319551999970827
},
"steps_from_proto": {
"total": 0.0007055319999835774,
"count": 1,
"is_parallel": true,
"self": 0.00021147700010715198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004940549998764254,
"count": 2,
"is_parallel": true,
"self": 0.0004940549998764254
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1352.867116704966,
"count": 232205,
"is_parallel": true,
"self": 38.02819692014782,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.34890721189709,
"count": 232205,
"is_parallel": true,
"self": 82.34890721189709
},
"communicator.exchange": {
"total": 1136.235629170876,
"count": 232205,
"is_parallel": true,
"self": 1136.235629170876
},
"steps_from_proto": {
"total": 96.25438340204494,
"count": 232205,
"is_parallel": true,
"self": 37.77529742007334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.47908598197159,
"count": 464410,
"is_parallel": true,
"self": 58.47908598197159
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 529.0177472018343,
"count": 232206,
"self": 6.930085092801619,
"children": {
"process_trajectory": {
"total": 144.9835247850325,
"count": 232206,
"self": 143.45001244603225,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5335123390002536,
"count": 10,
"self": 1.5335123390002536
}
}
},
"_update_policy": {
"total": 377.10413732400013,
"count": 97,
"self": 317.8614841179992,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.24265320600091,
"count": 2910,
"self": 59.24265320600091
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3539997780753765e-06,
"count": 1,
"self": 1.3539997780753765e-06
},
"TrainerController._save_models": {
"total": 0.19274709900037124,
"count": 1,
"self": 0.0029007830007685698,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18984631599960267,
"count": 1,
"self": 0.18984631599960267
}
}
}
}
}
}
}