{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4061975479125977, "min": 1.4061975479125977, "max": 1.4265944957733154, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69163.828125, "min": 69156.15625, "max": 78202.796875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 84.20783645655878, "min": 78.43650793650794, "max": 404.5483870967742, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49430.0, "min": 49080.0, "max": 50228.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999997.0, "min": 49771.0, "max": 1999997.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999997.0, "min": 49771.0, "max": 1999997.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4382264614105225, "min": -0.020325694233179092, "max": 2.4899277687072754, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1431.2388916015625, "min": -2.5000603199005127, "max": 1508.333740234375, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.8367349050520225, "min": 1.8469048620360653, "max": 4.008192855445191, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2252.1633892655373, "min": 227.16929803043604, "max": 2417.7147986888885, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.8367349050520225, "min": 1.8469048620360653, "max": 4.008192855445191, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2252.1633892655373, "min": 227.16929803043604, "max": 2417.7147986888885, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.018152510685387018, "min": 0.013775842923496383, "max": 0.020684100156843972, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05445753205616105, "min": 0.027551685846992767, "max": 0.057334895069167634, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.06059290340377225, "min": 0.02228111702327927, "max": 0.06234606943196721, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.18177871021131675, "min": 0.04456223404655854, "max": 0.18703820829590162, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.5691988103000023e-06, "min": 3.5691988103000023e-06, "max": 0.0002952854265715249, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0707596430900007e-05, "min": 1.0707596430900007e-05, "max": 0.0008439451686849499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10118970000000001, "min": 0.10118970000000001, "max": 0.19842847500000002, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30356910000000004, "min": 0.20753600000000003, "max": 0.5813150500000002, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.936603000000004e-05, "min": 6.936603000000004e-05, "max": 0.0049215809025, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0002080980900000001, "min": 0.0002080980900000001, "max": 0.014067620995000002, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1699523560", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.0+cu118", "numpy_version": "1.23.5", "end_time_seconds": "1699525877" }, "total": 2317.486786398, "count": 1, "self": 0.43696986800023296, "children": { "run_training.setup": { "total": 0.046848404999991544, "count": 1, "self": 0.046848404999991544 }, "TrainerController.start_learning": { "total": 2317.002968125, "count": 1, "self": 4.294820459004313, "children": { "TrainerController._reset_env": { "total": 9.073444387999984, "count": 1, "self": 9.073444387999984 }, "TrainerController.advance": { "total": 2303.5386018369954, "count": 232371, "self": 4.279644417088093, "children": { "env_step": { "total": 1818.289096823935, "count": 232371, "self": 1495.9826368468812, "children": { "SubprocessEnvManager._take_step": { "total": 319.61681896905526, "count": 232371, "self": 16.54532248007132, "children": { "TorchPolicy.evaluate": { "total": 303.07149648898394, "count": 222826, "self": 303.07149648898394 } } }, "workers": { "total": 2.6896410079986595, "count": 232371, "self": 0.0, "children": { "worker_root": { "total": 2309.5852819909924, "count": 232371, "is_parallel": true, "self": 1088.9012495390252, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.000821149999978843, "count": 1, "is_parallel": true, "self": 0.0002238710000028732, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005972789999759698, "count": 2, "is_parallel": true, "self": 0.0005972789999759698 } } }, "UnityEnvironment.step": { "total": 0.028552577000027668, "count": 1, "is_parallel": true, "self": 0.00031113500006085815, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00020607999999811, "count": 1, "is_parallel": true, "self": 0.00020607999999811 }, "communicator.exchange": { "total": 0.027273110000010092, "count": 1, "is_parallel": true, "self": 0.027273110000010092 }, "steps_from_proto": { "total": 0.0007622519999586075, "count": 1, "is_parallel": true, "self": 0.00019608799993875436, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005661640000198531, "count": 2, "is_parallel": true, "self": 0.0005661640000198531 } } } } } } }, "UnityEnvironment.step": { "total": 1220.6840324519671, "count": 232370, "is_parallel": true, "self": 39.146734360943356, "children": { "UnityEnvironment._generate_step_input": { "total": 81.164014048007, "count": 232370, "is_parallel": true, "self": 81.164014048007 }, "communicator.exchange": { "total": 1013.9310787810014, "count": 232370, "is_parallel": true, "self": 1013.9310787810014 }, "steps_from_proto": { "total": 86.44220526201536, "count": 232370, "is_parallel": true, "self": 30.17965629912277, "children": { "_process_rank_one_or_two_observation": { "total": 56.262548962892595, "count": 464740, "is_parallel": true, "self": 56.262548962892595 } } } } } } } } } } }, "trainer_advance": { "total": 480.9698605959724, "count": 232371, "self": 6.344602887038661, "children": { "process_trajectory": { "total": 146.1761981829344, "count": 232371, "self": 145.04713183293495, "children": { "RLTrainer._checkpoint": { "total": 1.129066349999448, "count": 10, "self": 1.129066349999448 } } }, "_update_policy": { "total": 328.44905952599936, "count": 97, "self": 267.50063988499204, "children": { "TorchPPOOptimizer.update": { "total": 60.94841964100732, "count": 2910, "self": 60.94841964100732 } } } } } } }, "trainer_threads": { "total": 8.639999578008428e-07, "count": 1, "self": 8.639999578008428e-07 }, "TrainerController._save_models": { "total": 0.09610057700001562, "count": 1, "self": 0.0019310909997329873, "children": { "RLTrainer._checkpoint": { "total": 0.09416948600028263, "count": 1, "self": 0.09416948600028263 } } } } } } }