{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4030985832214355, "min": 1.4030985832214355, "max": 1.428154706954956, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70742.828125, "min": 67681.84375, "max": 78076.59375, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 77.47252747252747, "min": 72.33432392273403, "max": 397.3095238095238, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49350.0, "min": 48681.0, "max": 50104.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999937.0, "min": 49765.0, "max": 1999937.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999937.0, "min": 49765.0, "max": 1999937.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.449110984802246, "min": 0.10019616782665253, "max": 2.5153133869171143, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1560.083740234375, "min": 12.524520874023438, "max": 1686.015869140625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7623923536933948, "min": 1.8420389697551727, "max": 4.034694222511088, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2396.6439293026924, "min": 230.2548712193966, "max": 2615.580111682415, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7623923536933948, "min": 1.8420389697551727, "max": 4.034694222511088, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2396.6439293026924, "min": 230.2548712193966, "max": 2615.580111682415, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.017406691120171066, "min": 0.014176798631807387, "max": 0.021192805933484733, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.052220073360513194, "min": 0.02863721885272147, "max": 0.0635784178004542, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.060270603746175766, "min": 0.022546236775815486, "max": 0.0673290567472577, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1808118112385273, "min": 0.04509247355163097, "max": 0.19226329823335014, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 4.001698666133339e-06, "min": 4.001698666133339e-06, "max": 0.0002953326015558, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.2005095998400018e-05, "min": 1.2005095998400018e-05, "max": 0.0008440114686628498, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10133386666666666, "min": 0.10133386666666666, "max": 0.1984442, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3040016, "min": 0.20779700000000006, "max": 0.5813371500000002, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.655994666666677e-05, "min": 7.655994666666677e-05, "max": 0.00492236558, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0002296798400000003, "min": 0.0002296798400000003, "max": 0.014068723784999997, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1679242768", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1679245115" }, "total": 2347.1687495610004, "count": 1, "self": 0.44113701100013714, "children": { "run_training.setup": { "total": 0.11114630800000214, "count": 1, "self": 0.11114630800000214 }, "TrainerController.start_learning": { "total": 2346.616466242, "count": 1, "self": 4.258298334980736, "children": { "TrainerController._reset_env": { "total": 8.090915625999969, "count": 1, "self": 8.090915625999969 }, "TrainerController.advance": { "total": 2334.1388090740197, "count": 233378, "self": 4.7551924769827565, "children": { "env_step": { "total": 1817.0610323050464, "count": 233378, "self": 1529.7526743448407, "children": { "SubprocessEnvManager._take_step": { "total": 284.4995999940521, "count": 233378, "self": 16.851726733117403, "children": { "TorchPolicy.evaluate": { "total": 267.6478732609347, "count": 223013, "self": 267.6478732609347 } } }, "workers": { "total": 2.808757966153621, "count": 233378, "self": 0.0, "children": { "worker_root": { "total": 2338.4306241579775, "count": 233378, "is_parallel": true, "self": 1093.6723974129459, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.000923148000083529, "count": 1, "is_parallel": true, "self": 0.0002845240001079219, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006386239999756071, "count": 2, "is_parallel": true, "self": 0.0006386239999756071 } } }, "UnityEnvironment.step": { "total": 0.06412658099998225, "count": 1, "is_parallel": true, "self": 0.00035678200003985694, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0001996360000475761, "count": 1, "is_parallel": true, "self": 0.0001996360000475761 }, "communicator.exchange": { "total": 0.06284921699989354, "count": 1, "is_parallel": true, "self": 0.06284921699989354 }, "steps_from_proto": { "total": 0.0007209460000012768, "count": 1, "is_parallel": true, "self": 0.00024393199998939963, "children": { "_process_rank_one_or_two_observation": { "total": 0.0004770140000118772, "count": 2, "is_parallel": true, "self": 0.0004770140000118772 } } } } } } }, "UnityEnvironment.step": { "total": 1244.7582267450316, "count": 233377, "is_parallel": true, "self": 38.17310240808479, "children": { "UnityEnvironment._generate_step_input": { "total": 78.81418930993061, "count": 233377, "is_parallel": true, "self": 78.81418930993061 }, "communicator.exchange": { "total": 1037.756119739918, "count": 233377, "is_parallel": true, "self": 1037.756119739918 }, "steps_from_proto": { "total": 90.01481528709814, "count": 233377, "is_parallel": true, "self": 36.129342123152924, "children": { "_process_rank_one_or_two_observation": { "total": 53.88547316394522, "count": 466754, "is_parallel": true, "self": 53.88547316394522 } } } } } } } } } } }, "trainer_advance": { "total": 512.3225842919903, "count": 233378, "self": 6.237752786072178, "children": { "process_trajectory": { "total": 146.55203259491736, "count": 233378, "self": 145.16376121691735, "children": { "RLTrainer._checkpoint": { "total": 1.388271378000013, "count": 10, "self": 1.388271378000013 } } }, "_update_policy": { "total": 359.5327989110008, "count": 97, "self": 302.7108312419971, "children": { "TorchPPOOptimizer.update": { "total": 56.8219676690037, "count": 2910, "self": 56.8219676690037 } } } } } } }, "trainer_threads": { "total": 8.449997039861046e-07, "count": 1, "self": 8.449997039861046e-07 }, "TrainerController._save_models": { "total": 0.12844236199998704, "count": 1, "self": 0.0020760839997819858, "children": { "RLTrainer._checkpoint": { "total": 0.12636627800020506, "count": 1, "self": 0.12636627800020506 } } } } } } }