{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4018336534500122, "min": 1.4018336534500122, "max": 1.4262577295303345, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69557.5859375, "min": 68735.6171875, "max": 78269.4140625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 95.22137404580153, "min": 73.95508982035928, "max": 381.77862595419845, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49896.0, "min": 49005.0, "max": 50013.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999605.0, "min": 49950.0, "max": 1999605.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999605.0, "min": 49950.0, "max": 1999605.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4634392261505127, "min": 0.1532023549079895, "max": 2.504218816757202, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1288.378662109375, "min": 19.916305541992188, "max": 1633.9404296875, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.742580357860653, "min": 1.8448546070318956, "max": 4.073107984094393, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1957.3695271611214, "min": 239.83109891414642, "max": 2622.512893676758, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.742580357860653, "min": 1.8448546070318956, "max": 4.073107984094393, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1957.3695271611214, "min": 239.83109891414642, "max": 2622.512893676758, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.015273515708233593, "min": 0.013211133411257632, "max": 0.019576082052662966, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04582054712470078, "min": 0.02683051419953699, "max": 0.0535138249824134, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.054408744101723035, "min": 0.02118611251935363, "max": 0.058371569050682914, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1632262323051691, "min": 0.04237222503870726, "max": 0.17511470715204874, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.595698801466665e-06, "min": 3.595698801466665e-06, "max": 0.0002952918015694, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0787096404399995e-05, "min": 1.0787096404399995e-05, "max": 0.00084370816876395, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10119853333333335, "min": 0.10119853333333335, "max": 0.1984306, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3035956000000001, "min": 0.20754640000000002, "max": 0.5812360500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.980681333333334e-05, "min": 6.980681333333334e-05, "max": 0.0049216869399999995, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00020942044000000001, "min": 0.00020942044000000001, "max": 0.014063678894999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1690978012", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1690980582" }, "total": 2570.5168193109994, "count": 1, "self": 0.44388485299805325, "children": { "run_training.setup": { "total": 0.0669983610005147, "count": 1, "self": 0.0669983610005147 }, "TrainerController.start_learning": { "total": 2570.005936097001, "count": 1, "self": 4.7979171301767565, "children": { "TrainerController._reset_env": { "total": 5.548088637000546, "count": 1, "self": 5.548088637000546 }, "TrainerController.advance": { "total": 2559.5433647158234, "count": 232919, "self": 5.000355190019945, "children": { "env_step": { "total": 1992.041589452986, "count": 232919, "self": 1686.5958986171245, "children": { "SubprocessEnvManager._take_step": { "total": 302.31099217407154, "count": 232919, "self": 17.56998738510174, "children": { "TorchPolicy.evaluate": { "total": 284.7410047889698, "count": 223013, "self": 284.7410047889698 } } }, "workers": { "total": 3.1346986617900257, "count": 232919, "self": 0.0, "children": { "worker_root": { "total": 2562.144551186844, "count": 232919, "is_parallel": true, "self": 1187.0160433347692, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0008903449997887947, "count": 1, "is_parallel": true, "self": 0.0002512049995857524, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006391400002030423, "count": 2, "is_parallel": true, "self": 0.0006391400002030423 } } }, "UnityEnvironment.step": { "total": 0.04638306899960298, "count": 1, "is_parallel": true, "self": 0.0003592850007407833, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00024245999975391896, "count": 1, "is_parallel": true, "self": 0.00024245999975391896 }, "communicator.exchange": { "total": 0.044994918999691436, "count": 1, "is_parallel": true, "self": 0.044994918999691436 }, "steps_from_proto": { "total": 0.0007864049994168454, "count": 1, "is_parallel": true, "self": 0.00024094599939417094, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005454590000226744, "count": 2, "is_parallel": true, "self": 0.0005454590000226744 } } } } } } }, "UnityEnvironment.step": { "total": 1375.128507852075, "count": 232918, "is_parallel": true, "self": 42.09514527323245, "children": { "UnityEnvironment._generate_step_input": { "total": 88.24942301181181, "count": 232918, "is_parallel": true, "self": 88.24942301181181 }, "communicator.exchange": { "total": 1140.8790752868827, "count": 232918, "is_parallel": true, "self": 1140.8790752868827 }, "steps_from_proto": { "total": 103.9048642801481, "count": 232918, "is_parallel": true, "self": 39.74022010617682, "children": { "_process_rank_one_or_two_observation": { "total": 64.16464417397128, "count": 465836, "is_parallel": true, "self": 64.16464417397128 } } } } } } } } } } }, "trainer_advance": { "total": 562.5014200728174, "count": 232919, "self": 7.2229687065764665, "children": { "process_trajectory": { "total": 147.4576044412388, "count": 232919, "self": 146.1139419912388, "children": { "RLTrainer._checkpoint": { "total": 1.3436624500000107, "count": 10, "self": 1.3436624500000107 } } }, "_update_policy": { "total": 407.8208469250021, "count": 97, "self": 348.007928232998, "children": { "TorchPPOOptimizer.update": { "total": 59.81291869200413, "count": 2910, "self": 59.81291869200413 } } } } } } }, "trainer_threads": { "total": 1.1050005923607387e-06, "count": 1, "self": 1.1050005923607387e-06 }, "TrainerController._save_models": { "total": 0.11656450899954507, "count": 1, "self": 0.002283536999129865, "children": { "RLTrainer._checkpoint": { "total": 0.1142809720004152, "count": 1, "self": 0.1142809720004152 } } } } } } }