ppo-Huggy / run_logs /timers.json
TPK-MAKG's picture
Huggy w/ ML-Agents
7897e9f verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4019708633422852,
"min": 1.4019708633422852,
"max": 1.4248415231704712,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70568.203125,
"min": 68849.9375,
"max": 76591.3671875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.14109347442681,
"min": 78.36507936507937,
"max": 393.5703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48842.0,
"min": 48842.0,
"max": 50377.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999963.0,
"min": 49978.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999963.0,
"min": 49978.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.468919515609741,
"min": 0.08144420385360718,
"max": 2.510969638824463,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1399.8773193359375,
"min": 10.343414306640625,
"max": 1531.04541015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.803425944671429,
"min": 1.6742711077759584,
"max": 4.115231629344229,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2156.5425106287003,
"min": 212.63243068754673,
"max": 2455.6030318140984,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.803425944671429,
"min": 1.6742711077759584,
"max": 4.115231629344229,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2156.5425106287003,
"min": 212.63243068754673,
"max": 2455.6030318140984,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017418143222484004,
"min": 0.01436676245830313,
"max": 0.0219887286623513,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052254429667452015,
"min": 0.02873352491660626,
"max": 0.05445080256904475,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054819266498088846,
"min": 0.02392118979866306,
"max": 0.05887789155046145,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16445779949426653,
"min": 0.04784237959732612,
"max": 0.17663367465138435,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.591448802883336e-06,
"min": 3.591448802883336e-06,
"max": 0.00029537707654097493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0774346408650009e-05,
"min": 1.0774346408650009e-05,
"max": 0.0008441092686302499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011971166666667,
"min": 0.1011971166666667,
"max": 0.198459025,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035913500000001,
"min": 0.20752379999999992,
"max": 0.5813697500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.973612166666673e-05,
"min": 6.973612166666673e-05,
"max": 0.0049231053475,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020920836500000017,
"min": 0.00020920836500000017,
"max": 0.014070350525000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1728092959",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1728095547"
},
"total": 2587.1820354310003,
"count": 1,
"self": 0.42566800400027205,
"children": {
"run_training.setup": {
"total": 0.057476245000088966,
"count": 1,
"self": 0.057476245000088966
},
"TrainerController.start_learning": {
"total": 2586.698891182,
"count": 1,
"self": 4.507027115871097,
"children": {
"TrainerController._reset_env": {
"total": 2.51366744500001,
"count": 1,
"self": 2.51366744500001
},
"TrainerController.advance": {
"total": 2579.567643172129,
"count": 232952,
"self": 4.746829723299925,
"children": {
"env_step": {
"total": 2044.0032431499737,
"count": 232952,
"self": 1616.8305907110778,
"children": {
"SubprocessEnvManager._take_step": {
"total": 424.25670140293346,
"count": 232952,
"self": 16.005441872938036,
"children": {
"TorchPolicy.evaluate": {
"total": 408.2512595299954,
"count": 223063,
"self": 408.2512595299954
}
}
},
"workers": {
"total": 2.9159510359625074,
"count": 232952,
"self": 0.0,
"children": {
"worker_root": {
"total": 2579.2629552450817,
"count": 232952,
"is_parallel": true,
"self": 1271.0867294409122,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008376609998776985,
"count": 1,
"is_parallel": true,
"self": 0.000226993999831393,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006106670000463055,
"count": 2,
"is_parallel": true,
"self": 0.0006106670000463055
}
}
},
"UnityEnvironment.step": {
"total": 0.031219320999980482,
"count": 1,
"is_parallel": true,
"self": 0.00037633000010828255,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002107299999352108,
"count": 1,
"is_parallel": true,
"self": 0.0002107299999352108
},
"communicator.exchange": {
"total": 0.029890751000039018,
"count": 1,
"is_parallel": true,
"self": 0.029890751000039018
},
"steps_from_proto": {
"total": 0.0007415099998979713,
"count": 1,
"is_parallel": true,
"self": 0.00019459699979051948,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005469130001074518,
"count": 2,
"is_parallel": true,
"self": 0.0005469130001074518
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1308.1762258041695,
"count": 232951,
"is_parallel": true,
"self": 38.86994986901118,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.29171958407233,
"count": 232951,
"is_parallel": true,
"self": 89.29171958407233
},
"communicator.exchange": {
"total": 1086.3855185630252,
"count": 232951,
"is_parallel": true,
"self": 1086.3855185630252
},
"steps_from_proto": {
"total": 93.62903778806071,
"count": 232951,
"is_parallel": true,
"self": 35.68722439814701,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.941813389913705,
"count": 465902,
"is_parallel": true,
"self": 57.941813389913705
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 530.8175702988553,
"count": 232952,
"self": 6.601788424953384,
"children": {
"process_trajectory": {
"total": 168.65675801190082,
"count": 232952,
"self": 167.1405929679006,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5161650440002177,
"count": 10,
"self": 1.5161650440002177
}
}
},
"_update_policy": {
"total": 355.55902386200114,
"count": 97,
"self": 288.5074985949959,
"children": {
"TorchPPOOptimizer.update": {
"total": 67.05152526700522,
"count": 2910,
"self": 67.05152526700522
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0699995982577093e-06,
"count": 1,
"self": 1.0699995982577093e-06
},
"TrainerController._save_models": {
"total": 0.11055237900018255,
"count": 1,
"self": 0.0017963280001822568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1087560510000003,
"count": 1,
"self": 0.1087560510000003
}
}
}
}
}
}
}