ppo-Huggy / run_logs /timers.json
suryaanthony's picture
Huggy
ecd8b2b verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4042694568634033,
"min": 1.4042694568634033,
"max": 1.4282770156860352,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71547.53125,
"min": 68256.640625,
"max": 76697.2734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 108.39168490153173,
"min": 95.32369942196532,
"max": 382.7175572519084,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49535.0,
"min": 48745.0,
"max": 50244.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999990.0,
"min": 49642.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999990.0,
"min": 49642.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.337568521499634,
"min": 0.12143395096063614,
"max": 2.4426989555358887,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1068.268798828125,
"min": 15.786413192749023,
"max": 1236.41650390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5034643299470063,
"min": 1.8304124340415,
"max": 3.8956585245973923,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1601.0831987857819,
"min": 237.953616425395,
"max": 1972.3458533883095,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5034643299470063,
"min": 1.8304124340415,
"max": 3.8956585245973923,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1601.0831987857819,
"min": 237.953616425395,
"max": 1972.3458533883095,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01639505666211739,
"min": 0.013032501782678688,
"max": 0.019717937690590284,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04918516998635217,
"min": 0.026065003565357375,
"max": 0.058016949156687286,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04880992306603326,
"min": 0.021427371973792714,
"max": 0.07285010168949763,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14642976919809977,
"min": 0.04285474394758543,
"max": 0.21624431361754737,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.089498970199999e-06,
"min": 3.089498970199999e-06,
"max": 0.000295263076578975,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.268496910599997e-06,
"min": 9.268496910599997e-06,
"max": 0.0008439541686819499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10102980000000002,
"min": 0.10102980000000002,
"max": 0.19842102500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30308940000000006,
"min": 0.20725159999999998,
"max": 0.58131805,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.138701999999996e-05,
"min": 6.138701999999996e-05,
"max": 0.004921209147499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018416105999999988,
"min": 0.00018416105999999988,
"max": 0.014067770695000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714548706",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714551139"
},
"total": 2433.831466496,
"count": 1,
"self": 0.4315492829996401,
"children": {
"run_training.setup": {
"total": 0.07196187499999951,
"count": 1,
"self": 0.07196187499999951
},
"TrainerController.start_learning": {
"total": 2433.3279553380003,
"count": 1,
"self": 4.400427845129798,
"children": {
"TrainerController._reset_env": {
"total": 3.173013327000035,
"count": 1,
"self": 3.173013327000035
},
"TrainerController.advance": {
"total": 2425.6418911038704,
"count": 231611,
"self": 4.547377595867147,
"children": {
"env_step": {
"total": 1963.6247171670236,
"count": 231611,
"self": 1631.9342244839859,
"children": {
"SubprocessEnvManager._take_step": {
"total": 328.7766254261031,
"count": 231611,
"self": 17.00341330409566,
"children": {
"TorchPolicy.evaluate": {
"total": 311.77321212200746,
"count": 223061,
"self": 311.77321212200746
}
}
},
"workers": {
"total": 2.913867256934509,
"count": 231611,
"self": 0.0,
"children": {
"worker_root": {
"total": 2426.2184897548273,
"count": 231611,
"is_parallel": true,
"self": 1110.2106532937912,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010438279999789302,
"count": 1,
"is_parallel": true,
"self": 0.0003064949999611599,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007373330000177702,
"count": 2,
"is_parallel": true,
"self": 0.0007373330000177702
}
}
},
"UnityEnvironment.step": {
"total": 0.030333517999963533,
"count": 1,
"is_parallel": true,
"self": 0.000384814999961236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021690100004434498,
"count": 1,
"is_parallel": true,
"self": 0.00021690100004434498
},
"communicator.exchange": {
"total": 0.028961838999975953,
"count": 1,
"is_parallel": true,
"self": 0.028961838999975953
},
"steps_from_proto": {
"total": 0.000769962999981999,
"count": 1,
"is_parallel": true,
"self": 0.00021711899995580097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000552844000026198,
"count": 2,
"is_parallel": true,
"self": 0.000552844000026198
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1316.007836461036,
"count": 231610,
"is_parallel": true,
"self": 39.987127004982085,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.64885780097063,
"count": 231610,
"is_parallel": true,
"self": 86.64885780097063
},
"communicator.exchange": {
"total": 1096.434760348021,
"count": 231610,
"is_parallel": true,
"self": 1096.434760348021
},
"steps_from_proto": {
"total": 92.93709130706219,
"count": 231610,
"is_parallel": true,
"self": 35.30902572403903,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.62806558302316,
"count": 463220,
"is_parallel": true,
"self": 57.62806558302316
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.4697963409798,
"count": 231611,
"self": 6.402758804004293,
"children": {
"process_trajectory": {
"total": 150.91434380697632,
"count": 231611,
"self": 149.58965929097593,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3246845160003886,
"count": 10,
"self": 1.3246845160003886
}
}
},
"_update_policy": {
"total": 300.1526937299992,
"count": 97,
"self": 239.9314492530063,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.22124447699292,
"count": 2910,
"self": 60.22124447699292
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3630001376441214e-06,
"count": 1,
"self": 1.3630001376441214e-06
},
"TrainerController._save_models": {
"total": 0.11262169900010122,
"count": 1,
"self": 0.002102768999975524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11051893000012569,
"count": 1,
"self": 0.11051893000012569
}
}
}
}
}
}
}