{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.407454490661621, "min": 1.407454490661621, "max": 1.4266873598098755, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 68876.6015625, "min": 68595.8125, "max": 77509.84375, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 76.2936630602782, "min": 70.19885550786839, "max": 385.6, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49362.0, "min": 49069.0, "max": 50128.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999958.0, "min": 49868.0, "max": 1999958.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999958.0, "min": 49868.0, "max": 1999958.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.5113062858581543, "min": 0.02269906736910343, "max": 2.553631544113159, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1624.815185546875, "min": 2.9281797409057617, "max": 1774.77392578125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.814240711733952, "min": 1.6593452530313832, "max": 4.073235086843925, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2467.813740491867, "min": 214.05553764104843, "max": 2765.2540959119797, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.814240711733952, "min": 1.6593452530313832, "max": 4.073235086843925, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2467.813740491867, "min": 214.05553764104843, "max": 2765.2540959119797, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01704820689487355, "min": 0.013922861330017137, "max": 0.01970077566608476, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.051144620684620654, "min": 0.027845722660034274, "max": 0.05296187096149273, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.057225987646314834, "min": 0.022853302055348954, "max": 0.06474080731471381, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1716779629389445, "min": 0.04570660411069791, "max": 0.1942224219441414, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.864948711716665e-06, "min": 3.864948711716665e-06, "max": 0.00029534940155019986, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1594846135149994e-05, "min": 1.1594846135149994e-05, "max": 0.0008443678685440499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10128828333333334, "min": 0.10128828333333334, "max": 0.19844979999999995, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30386485, "min": 0.2077041, "max": 0.5814559500000002, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.428533833333331e-05, "min": 7.428533833333331e-05, "max": 0.0049226450200000015, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00022285601499999992, "min": 0.00022285601499999992, "max": 0.014074651904999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1719952750", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1719955073" }, "total": 2323.308614045, "count": 1, "self": 0.43868853800040597, "children": { "run_training.setup": { "total": 0.05721917300002133, "count": 1, "self": 0.05721917300002133 }, "TrainerController.start_learning": { "total": 2322.812706334, "count": 1, "self": 4.378674549916013, "children": { "TrainerController._reset_env": { "total": 3.1153685459998997, "count": 1, "self": 3.1153685459998997 }, "TrainerController.advance": { "total": 2315.1913331660844, "count": 233427, "self": 4.7334680800440765, "children": { "env_step": { "total": 1825.8481900890768, "count": 233427, "self": 1502.9700351029505, "children": { "SubprocessEnvManager._take_step": { "total": 320.08434327999373, "count": 233427, "self": 17.11798045306341, "children": { "TorchPolicy.evaluate": { "total": 302.9663628269303, "count": 222912, "self": 302.9663628269303 } } }, "workers": { "total": 2.7938117061323737, "count": 233427, "self": 0.0, "children": { "worker_root": { "total": 2315.957653677079, "count": 233427, "is_parallel": true, "self": 1108.8567058559759, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0008624079999890455, "count": 1, "is_parallel": true, "self": 0.00024261299995487207, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006197950000341734, "count": 2, "is_parallel": true, "self": 0.0006197950000341734 } } }, "UnityEnvironment.step": { "total": 0.028880148999974153, "count": 1, "is_parallel": true, "self": 0.00038115799998195143, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002031279999528124, "count": 1, "is_parallel": true, "self": 0.0002031279999528124 }, "communicator.exchange": { "total": 0.027468985000041357, "count": 1, "is_parallel": true, "self": 0.027468985000041357 }, "steps_from_proto": { "total": 0.0008268779999980325, "count": 1, "is_parallel": true, "self": 0.00021779899986995588, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006090790001280766, "count": 2, "is_parallel": true, "self": 0.0006090790001280766 } } } } } } }, "UnityEnvironment.step": { "total": 1207.1009478211033, "count": 233426, "is_parallel": true, "self": 38.2464117802208, "children": { "UnityEnvironment._generate_step_input": { "total": 79.68694337891134, "count": 233426, "is_parallel": true, "self": 79.68694337891134 }, "communicator.exchange": { "total": 1000.7146601679383, "count": 233426, "is_parallel": true, "self": 1000.7146601679383 }, "steps_from_proto": { "total": 88.45293249403278, "count": 233426, "is_parallel": true, "self": 31.18764633207161, "children": { "_process_rank_one_or_two_observation": { "total": 57.26528616196117, "count": 466852, "is_parallel": true, "self": 57.26528616196117 } } } } } } } } } } }, "trainer_advance": { "total": 484.60967499696335, "count": 233427, "self": 6.022469315064541, "children": { "process_trajectory": { "total": 157.40679584589918, "count": 233427, "self": 155.97448951989918, "children": { "RLTrainer._checkpoint": { "total": 1.4323063260000026, "count": 10, "self": 1.4323063260000026 } } }, "_update_policy": { "total": 321.1804098359996, "count": 97, "self": 258.83626340800276, "children": { "TorchPPOOptimizer.update": { "total": 62.34414642799686, "count": 2910, "self": 62.34414642799686 } } } } } } }, "trainer_threads": { "total": 1.3049998415226582e-06, "count": 1, "self": 1.3049998415226582e-06 }, "TrainerController._save_models": { "total": 0.12732876699965345, "count": 1, "self": 0.0019643319992610486, "children": { "RLTrainer._checkpoint": { "total": 0.1253644350003924, "count": 1, "self": 0.1253644350003924 } } } } } } }