{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.406630516052246, "min": 1.406630516052246, "max": 1.4299650192260742, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70844.9453125, "min": 69031.1015625, "max": 76959.546875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 88.42526690391459, "min": 80.55409836065574, "max": 419.8666666666667, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49695.0, "min": 49028.0, "max": 50384.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999531.0, "min": 49948.0, "max": 1999531.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999531.0, "min": 49948.0, "max": 1999531.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.42584490776062, "min": 0.06266618520021439, "max": 2.484623670578003, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1363.3248291015625, "min": 7.457276344299316, "max": 1468.7637939453125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7890091900087337, "min": 1.7741054821164668, "max": 3.9327938818505834, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2129.4231647849083, "min": 211.11855237185955, "max": 2328.404710829258, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7890091900087337, "min": 1.7741054821164668, "max": 3.9327938818505834, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2129.4231647849083, "min": 211.11855237185955, "max": 2328.404710829258, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.015510857134682332, "min": 0.013365046620128851, "max": 0.021183792505487672, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.046532571404047, "min": 0.026730093240257703, "max": 0.06355137751646302, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05275366633302636, "min": 0.02319464369987448, "max": 0.05872912767032782, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.15826099899907908, "min": 0.04638928739974896, "max": 0.17618738301098347, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.6112987962666683e-06, "min": 3.6112987962666683e-06, "max": 0.00029534505155164996, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0833896388800005e-05, "min": 1.0833896388800005e-05, "max": 0.0008440108686630498, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10120373333333332, "min": 0.10120373333333332, "max": 0.19844835, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30361119999999997, "min": 0.20754340000000004, "max": 0.5813369500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.006629333333342e-05, "min": 7.006629333333342e-05, "max": 0.004922572665, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00021019888000000023, "min": 0.00021019888000000023, "max": 0.014068713804999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1716291426", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1716293833" }, "total": 2406.901770901, "count": 1, "self": 0.7517536629998176, "children": { "run_training.setup": { "total": 0.0760687429999507, "count": 1, "self": 0.0760687429999507 }, "TrainerController.start_learning": { "total": 2406.073948495, "count": 1, "self": 4.298537044116529, "children": { "TrainerController._reset_env": { "total": 3.448168836000036, "count": 1, "self": 3.448168836000036 }, "TrainerController.advance": { "total": 2398.153493192883, "count": 232292, "self": 4.603851070843575, "children": { "env_step": { "total": 1896.6476689980746, "count": 232292, "self": 1570.4959839389312, "children": { "SubprocessEnvManager._take_step": { "total": 323.3756660500019, "count": 232292, "self": 16.475407664942395, "children": { "TorchPolicy.evaluate": { "total": 306.9002583850595, "count": 222946, "self": 306.9002583850595 } } }, "workers": { "total": 2.7760190091414643, "count": 232292, "self": 0.0, "children": { "worker_root": { "total": 2398.7972544701106, "count": 232292, "is_parallel": true, "self": 1131.998519930099, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0010881869999366245, "count": 1, "is_parallel": true, "self": 0.0003111959999841929, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007769909999524316, "count": 2, "is_parallel": true, "self": 0.0007769909999524316 } } }, "UnityEnvironment.step": { "total": 0.028824194999970132, "count": 1, "is_parallel": true, "self": 0.0003877050000937743, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00020639699994262628, "count": 1, "is_parallel": true, "self": 0.00020639699994262628 }, "communicator.exchange": { "total": 0.02750483499994516, "count": 1, "is_parallel": true, "self": 0.02750483499994516 }, "steps_from_proto": { "total": 0.0007252579999885711, "count": 1, "is_parallel": true, "self": 0.00019428100006280147, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005309769999257696, "count": 2, "is_parallel": true, "self": 0.0005309769999257696 } } } } } } }, "UnityEnvironment.step": { "total": 1266.7987345400115, "count": 232291, "is_parallel": true, "self": 39.02663482188382, "children": { "UnityEnvironment._generate_step_input": { "total": 81.20109887300407, "count": 232291, "is_parallel": true, "self": 81.20109887300407 }, "communicator.exchange": { "total": 1056.807314405068, "count": 232291, "is_parallel": true, "self": 1056.807314405068 }, "steps_from_proto": { "total": 89.7636864400555, "count": 232291, "is_parallel": true, "self": 32.098340189969804, "children": { "_process_rank_one_or_two_observation": { "total": 57.66534625008569, "count": 464582, "is_parallel": true, "self": 57.66534625008569 } } } } } } } } } } }, "trainer_advance": { "total": 496.90197312396526, "count": 232292, "self": 6.468088568991448, "children": { "process_trajectory": { "total": 158.46811606897234, "count": 232292, "self": 156.8891865359725, "children": { "RLTrainer._checkpoint": { "total": 1.5789295329998367, "count": 10, "self": 1.5789295329998367 } } }, "_update_policy": { "total": 331.9657684860015, "count": 97, "self": 267.59606183000574, "children": { "TorchPPOOptimizer.update": { "total": 64.36970665599574, "count": 2910, "self": 64.36970665599574 } } } } } } }, "trainer_threads": { "total": 1.154000074166106e-06, "count": 1, "self": 1.154000074166106e-06 }, "TrainerController._save_models": { "total": 0.17374826800005394, "count": 1, "self": 0.0028442170000744227, "children": { "RLTrainer._checkpoint": { "total": 0.17090405099997952, "count": 1, "self": 0.17090405099997952 } } } } } } }