{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4022345542907715, "min": 1.4022345542907715, "max": 1.4287482500076294, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69862.1328125, "min": 68672.7734375, "max": 78249.125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 90.63919413919415, "min": 84.53264604810997, "max": 391.28125, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49489.0, "min": 49103.0, "max": 50084.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999971.0, "min": 49687.0, "max": 1999971.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999971.0, "min": 49687.0, "max": 1999971.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.372682809829712, "min": -0.06834828108549118, "max": 2.41623592376709, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1295.48486328125, "min": -8.680232048034668, "max": 1378.394775390625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.741445175239018, "min": 1.6476573376205024, "max": 3.8705743417573073, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2042.8290656805038, "min": 209.2524818778038, "max": 2137.0558540821075, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.741445175239018, "min": 1.6476573376205024, "max": 3.8705743417573073, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2042.8290656805038, "min": 209.2524818778038, "max": 2137.0558540821075, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.014591573089516412, "min": 0.013362857564061414, "max": 0.02179738537258042, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.043774719268549235, "min": 0.02672571512812283, "max": 0.05721003912961654, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05368930891984039, "min": 0.024164532652745645, "max": 0.058780586098631224, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.16106792675952117, "min": 0.04832906530549129, "max": 0.169324084247152, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.196098934666665e-06, "min": 3.196098934666665e-06, "max": 0.00029533035155655, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.588296803999994e-06, "min": 9.588296803999994e-06, "max": 0.0008438631187122998, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10106533333333335, "min": 0.10106533333333335, "max": 0.19844345000000002, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3031960000000001, "min": 0.20728305000000002, "max": 0.5812877, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.316013333333333e-05, "min": 6.316013333333333e-05, "max": 0.004922328155000001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00018948039999999998, "min": 0.00018948039999999998, "max": 0.01406625623, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1685285417", "python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1685287794" }, "total": 2376.448326781, "count": 1, "self": 0.49194054800045706, "children": { "run_training.setup": { "total": 0.04385877000004257, "count": 1, "self": 0.04385877000004257 }, "TrainerController.start_learning": { "total": 2375.9125274629996, "count": 1, "self": 4.148726027017801, "children": { "TrainerController._reset_env": { "total": 5.09885606499995, "count": 1, "self": 5.09885606499995 }, "TrainerController.advance": { "total": 2366.5386389469822, "count": 231479, "self": 4.4153691370802335, "children": { "env_step": { "total": 1842.9011931000073, "count": 231479, "self": 1557.609585916981, "children": { "SubprocessEnvManager._take_step": { "total": 282.5250592059907, "count": 231479, "self": 16.962244892167064, "children": { "TorchPolicy.evaluate": { "total": 265.56281431382365, "count": 222872, "self": 265.56281431382365 } } }, "workers": { "total": 2.7665479770355432, "count": 231479, "self": 0.0, "children": { "worker_root": { "total": 2368.164105355057, "count": 231479, "is_parallel": true, "self": 1092.7053759440823, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009414959999958228, "count": 1, "is_parallel": true, "self": 0.00025582199998552824, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006856740000102945, "count": 2, "is_parallel": true, "self": 0.0006856740000102945 } } }, "UnityEnvironment.step": { "total": 0.04623164299994187, "count": 1, "is_parallel": true, "self": 0.00041263899993282394, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00022155900001052942, "count": 1, "is_parallel": true, "self": 0.00022155900001052942 }, "communicator.exchange": { "total": 0.04481924200001686, "count": 1, "is_parallel": true, "self": 0.04481924200001686 }, "steps_from_proto": { "total": 0.0007782029999816586, "count": 1, "is_parallel": true, "self": 0.0002421490000870108, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005360539998946479, "count": 2, "is_parallel": true, "self": 0.0005360539998946479 } } } } } } }, "UnityEnvironment.step": { "total": 1275.4587294109747, "count": 231478, "is_parallel": true, "self": 38.72337926685486, "children": { "UnityEnvironment._generate_step_input": { "total": 77.94276945709817, "count": 231478, "is_parallel": true, "self": 77.94276945709817 }, "communicator.exchange": { "total": 1066.5183201128784, "count": 231478, "is_parallel": true, "self": 1066.5183201128784 }, "steps_from_proto": { "total": 92.27426057414323, "count": 231478, "is_parallel": true, "self": 33.398592190986506, "children": { "_process_rank_one_or_two_observation": { "total": 58.87566838315672, "count": 462956, "is_parallel": true, "self": 58.87566838315672 } } } } } } } } } } }, "trainer_advance": { "total": 519.222076709895, "count": 231479, "self": 6.587305581045143, "children": { "process_trajectory": { "total": 131.25283159184744, "count": 231479, "self": 129.93182909184713, "children": { "RLTrainer._checkpoint": { "total": 1.3210025000003043, "count": 10, "self": 1.3210025000003043 } } }, "_update_policy": { "total": 381.3819395370024, "count": 97, "self": 321.2695272320045, "children": { "TorchPPOOptimizer.update": { "total": 60.11241230499786, "count": 2910, "self": 60.11241230499786 } } } } } } }, "trainer_threads": { "total": 9.079999472305644e-07, "count": 1, "self": 9.079999472305644e-07 }, "TrainerController._save_models": { "total": 0.1263055159997748, "count": 1, "self": 0.0021186419994592143, "children": { "RLTrainer._checkpoint": { "total": 0.12418687400031558, "count": 1, "self": 0.12418687400031558 } } } } } } }