ppo-Huggy / run_logs /timers.json
KerilDeng's picture
Huggy
210f4c2 verified
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4006654024124146,
"min": 1.4006654024124146,
"max": 1.4266661405563354,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69760.140625,
"min": 69424.859375,
"max": 76901.25,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 114.55299539170507,
"min": 75.67331288343559,
"max": 406.2032520325203,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49716.0,
"min": 48788.0,
"max": 50202.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999637.0,
"min": 49920.0,
"max": 1999637.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999637.0,
"min": 49920.0,
"max": 1999637.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.335498809814453,
"min": 0.10202540457248688,
"max": 2.485460042953491,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1013.6065063476562,
"min": 12.447099685668945,
"max": 1606.594482421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.471413233175805,
"min": 1.9476474631028098,
"max": 4.007787316434962,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1506.5933431982994,
"min": 237.61299049854279,
"max": 2526.294544994831,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.471413233175805,
"min": 1.9476474631028098,
"max": 4.007787316434962,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1506.5933431982994,
"min": 237.61299049854279,
"max": 2526.294544994831,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015983500133087445,
"min": 0.01321517914790699,
"max": 0.020463351239838327,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04795050039926234,
"min": 0.02643035829581398,
"max": 0.055069775696999085,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.03958214964303706,
"min": 0.02206591965837611,
"max": 0.059383882644275826,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11874644892911117,
"min": 0.04448703533659379,
"max": 0.17730518976847331,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7845987385000023e-06,
"min": 3.7845987385000023e-06,
"max": 0.00029533117655627494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1353796215500007e-05,
"min": 1.1353796215500007e-05,
"max": 0.0008441361186213,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1012615,
"min": 0.1012615,
"max": 0.19844372500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037845,
"min": 0.20768015000000006,
"max": 0.5813787000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.294885000000004e-05,
"min": 7.294885000000004e-05,
"max": 0.0049223418775,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021884655000000013,
"min": 0.00021884655000000013,
"max": 0.01407079713,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719323279",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719325693"
},
"total": 2413.244013599,
"count": 1,
"self": 0.4402269880001768,
"children": {
"run_training.setup": {
"total": 0.05436427100005403,
"count": 1,
"self": 0.05436427100005403
},
"TrainerController.start_learning": {
"total": 2412.74942234,
"count": 1,
"self": 4.2981584070785175,
"children": {
"TrainerController._reset_env": {
"total": 2.9288738019999982,
"count": 1,
"self": 2.9288738019999982
},
"TrainerController.advance": {
"total": 2405.4072806639215,
"count": 232850,
"self": 4.757405072857182,
"children": {
"env_step": {
"total": 1911.9843326320781,
"count": 232850,
"self": 1575.1531840180933,
"children": {
"SubprocessEnvManager._take_step": {
"total": 333.9694759329801,
"count": 232850,
"self": 17.344625701968766,
"children": {
"TorchPolicy.evaluate": {
"total": 316.62485023101135,
"count": 223190,
"self": 316.62485023101135
}
}
},
"workers": {
"total": 2.861672681004734,
"count": 232850,
"self": 0.0,
"children": {
"worker_root": {
"total": 2405.569058398063,
"count": 232850,
"is_parallel": true,
"self": 1138.981753376131,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009277320000364853,
"count": 1,
"is_parallel": true,
"self": 0.00023939800007610756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006883339999603777,
"count": 2,
"is_parallel": true,
"self": 0.0006883339999603777
}
}
},
"UnityEnvironment.step": {
"total": 0.04336765799996556,
"count": 1,
"is_parallel": true,
"self": 0.00037646399982804724,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001996010000766546,
"count": 1,
"is_parallel": true,
"self": 0.0001996010000766546
},
"communicator.exchange": {
"total": 0.04204996200007827,
"count": 1,
"is_parallel": true,
"self": 0.04204996200007827
},
"steps_from_proto": {
"total": 0.0007416309999825899,
"count": 1,
"is_parallel": true,
"self": 0.00019161799991707085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000550013000065519,
"count": 2,
"is_parallel": true,
"self": 0.000550013000065519
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1266.587305021932,
"count": 232849,
"is_parallel": true,
"self": 39.145474204780385,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.5761541989499,
"count": 232849,
"is_parallel": true,
"self": 82.5761541989499
},
"communicator.exchange": {
"total": 1053.8364213560899,
"count": 232849,
"is_parallel": true,
"self": 1053.8364213560899
},
"steps_from_proto": {
"total": 91.02925526211186,
"count": 232849,
"is_parallel": true,
"self": 33.980046149999794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.04920911211207,
"count": 465698,
"is_parallel": true,
"self": 57.04920911211207
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 488.6655429589863,
"count": 232850,
"self": 6.272244079888537,
"children": {
"process_trajectory": {
"total": 156.8103734000989,
"count": 232850,
"self": 155.50935917109962,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3010142289992928,
"count": 10,
"self": 1.3010142289992928
}
}
},
"_update_policy": {
"total": 325.58292547899885,
"count": 97,
"self": 262.5242195559954,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.05870592300346,
"count": 2910,
"self": 63.05870592300346
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0009998732130043e-06,
"count": 1,
"self": 1.0009998732130043e-06
},
"TrainerController._save_models": {
"total": 0.11510846600003788,
"count": 1,
"self": 0.001924481000060041,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11318398499997784,
"count": 1,
"self": 0.11318398499997784
}
}
}
}
}
}
}