ppo-Huggy / run_logs /timers.json
arrandi's picture
Huggy
edc728b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4094297885894775,
"min": 1.4094297885894775,
"max": 1.4300824403762817,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71083.1796875,
"min": 68008.1875,
"max": 77948.6875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.94360902255639,
"min": 80.72249589490968,
"max": 408.609756097561,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49446.0,
"min": 48825.0,
"max": 50259.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999952.0,
"min": 49930.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999952.0,
"min": 49930.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4642529487609863,
"min": 0.08819211274385452,
"max": 2.4748287200927734,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1310.9825439453125,
"min": 10.759437561035156,
"max": 1486.62158203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.843432180863574,
"min": 1.9168427115092512,
"max": 3.969630683843906,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2044.7059202194214,
"min": 233.85481080412865,
"max": 2332.430609345436,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.843432180863574,
"min": 1.9168427115092512,
"max": 3.969630683843906,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2044.7059202194214,
"min": 233.85481080412865,
"max": 2332.430609345436,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014273935747203521,
"min": 0.014273935747203521,
"max": 0.02019394244804668,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04282180724161056,
"min": 0.02977414053166285,
"max": 0.05455490564636421,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05961300167772505,
"min": 0.02214867395038406,
"max": 0.06421136396626631,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17883900503317515,
"min": 0.04429734790076812,
"max": 0.1832060938080152,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6048487984166673e-06,
"min": 3.6048487984166673e-06,
"max": 0.00029527905157364994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0814546395250002e-05,
"min": 1.0814546395250002e-05,
"max": 0.0008439474186841999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120158333333333,
"min": 0.10120158333333333,
"max": 0.19842635000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30360475,
"min": 0.20753690000000002,
"max": 0.5813158,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.995900833333336e-05,
"min": 6.995900833333336e-05,
"max": 0.004921474865000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020987702500000009,
"min": 0.00020987702500000009,
"max": 0.014067658420000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671543023",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671545227"
},
"total": 2203.098993009,
"count": 1,
"self": 0.4410756110000875,
"children": {
"run_training.setup": {
"total": 0.10686310100004448,
"count": 1,
"self": 0.10686310100004448
},
"TrainerController.start_learning": {
"total": 2202.551054297,
"count": 1,
"self": 3.7809863619750104,
"children": {
"TrainerController._reset_env": {
"total": 7.48850744899994,
"count": 1,
"self": 7.48850744899994
},
"TrainerController.advance": {
"total": 2191.167232867025,
"count": 232653,
"self": 3.8527108599500934,
"children": {
"env_step": {
"total": 1721.4894948390147,
"count": 232653,
"self": 1447.2082196190554,
"children": {
"SubprocessEnvManager._take_step": {
"total": 271.76867531812445,
"count": 232653,
"self": 14.044532142149137,
"children": {
"TorchPolicy.evaluate": {
"total": 257.7241431759753,
"count": 222954,
"self": 64.91530438591417,
"children": {
"TorchPolicy.sample_actions": {
"total": 192.80883879006115,
"count": 222954,
"self": 192.80883879006115
}
}
}
}
},
"workers": {
"total": 2.5125999018348466,
"count": 232653,
"self": 0.0,
"children": {
"worker_root": {
"total": 2194.915919880904,
"count": 232653,
"is_parallel": true,
"self": 1001.3887619309153,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002053421000027811,
"count": 1,
"is_parallel": true,
"self": 0.00032636800006002886,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001727052999967782,
"count": 2,
"is_parallel": true,
"self": 0.001727052999967782
}
}
},
"UnityEnvironment.step": {
"total": 0.0274623770000062,
"count": 1,
"is_parallel": true,
"self": 0.00028014500014705845,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001682700000174009,
"count": 1,
"is_parallel": true,
"self": 0.0001682700000174009
},
"communicator.exchange": {
"total": 0.026322088999904736,
"count": 1,
"is_parallel": true,
"self": 0.026322088999904736
},
"steps_from_proto": {
"total": 0.0006918729999370044,
"count": 1,
"is_parallel": true,
"self": 0.0002400559999387042,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045181699999830016,
"count": 2,
"is_parallel": true,
"self": 0.00045181699999830016
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1193.5271579499886,
"count": 232652,
"is_parallel": true,
"self": 34.71241564194952,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.37772521513057,
"count": 232652,
"is_parallel": true,
"self": 75.37772521513057
},
"communicator.exchange": {
"total": 990.5407556930146,
"count": 232652,
"is_parallel": true,
"self": 990.5407556930146
},
"steps_from_proto": {
"total": 92.89626139989389,
"count": 232652,
"is_parallel": true,
"self": 38.116709800943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.77955159895089,
"count": 465304,
"is_parallel": true,
"self": 54.77955159895089
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 465.8250271680604,
"count": 232653,
"self": 5.725262491069202,
"children": {
"process_trajectory": {
"total": 146.57509986499144,
"count": 232653,
"self": 145.26180925699157,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3132906079998747,
"count": 10,
"self": 1.3132906079998747
}
}
},
"_update_policy": {
"total": 313.52466481199974,
"count": 97,
"self": 260.84644420699954,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.678220605000206,
"count": 2910,
"self": 52.678220605000206
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0680000741558615e-06,
"count": 1,
"self": 1.0680000741558615e-06
},
"TrainerController._save_models": {
"total": 0.11432655099997646,
"count": 1,
"self": 0.0019319009998071124,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11239465000016935,
"count": 1,
"self": 0.11239465000016935
}
}
}
}
}
}
}