{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4071283340454102, "min": 1.4071283340454102, "max": 1.430787205696106, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 71088.125, "min": 68169.5, "max": 75135.4921875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 88.57726465364121, "min": 87.0105633802817, "max": 398.1190476190476, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49869.0, "min": 49097.0, "max": 50210.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999721.0, "min": 49746.0, "max": 1999721.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999721.0, "min": 49746.0, "max": 1999721.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.3988826274871826, "min": -0.0036882658023387194, "max": 2.444566488265991, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1350.5709228515625, "min": -0.4610332250595093, "max": 1370.042236328125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7703003151802146, "min": 1.6127858521938323, "max": 3.8949352608573053, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2122.6790774464607, "min": 201.59823152422905, "max": 2129.7822608947754, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7703003151802146, "min": 1.6127858521938323, "max": 3.8949352608573053, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2122.6790774464607, "min": 201.59823152422905, "max": 2129.7822608947754, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.017067341889843293, "min": 0.013809026569288108, "max": 0.018652186748659004, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.051202025669529874, "min": 0.027618053138576217, "max": 0.05380319804802032, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05681938375863765, "min": 0.02160929078236222, "max": 0.07039009674141804, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.17045815127591293, "min": 0.04321858156472444, "max": 0.18707300586005052, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.225848924750006e-06, "min": 3.225848924750006e-06, "max": 0.00029530815156395007, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.677546774250018e-06, "min": 9.677546774250018e-06, "max": 0.0008438139187286998, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10107525000000002, "min": 0.10107525000000002, "max": 0.19843605000000006, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30322575000000007, "min": 0.2072887, "max": 0.5812713, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.36549750000001e-05, "min": 6.36549750000001e-05, "max": 0.004921958895, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0001909649250000003, "min": 0.0001909649250000003, "max": 0.01406543787, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1691588999", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1691592124" }, "total": 3125.2980853960007, "count": 1, "self": 0.43699562800156855, "children": { "run_training.setup": { "total": 0.07210164699995403, "count": 1, "self": 0.07210164699995403 }, "TrainerController.start_learning": { "total": 3124.7889881209994, "count": 1, "self": 7.182578073009154, "children": { "TrainerController._reset_env": { "total": 5.216347214000052, "count": 1, "self": 5.216347214000052 }, "TrainerController.advance": { "total": 3112.26121346599, "count": 231325, "self": 6.681500942095681, "children": { "env_step": { "total": 2491.8072367340174, "count": 231325, "self": 2096.5480739320037, "children": { "SubprocessEnvManager._take_step": { "total": 390.9024263140227, "count": 231325, "self": 22.697939694047363, "children": { "TorchPolicy.evaluate": { "total": 368.20448661997534, "count": 222874, "self": 368.20448661997534 } } }, "workers": { "total": 4.356736487990929, "count": 231325, "self": 0.0, "children": { "worker_root": { "total": 3114.458821132001, "count": 231325, "is_parallel": true, "self": 1416.3559730100137, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0011983319998307707, "count": 1, "is_parallel": true, "self": 0.00038800099991931347, "children": { "_process_rank_one_or_two_observation": { "total": 0.0008103309999114572, "count": 2, "is_parallel": true, "self": 0.0008103309999114572 } } }, "UnityEnvironment.step": { "total": 0.04015257900005054, "count": 1, "is_parallel": true, "self": 0.00040229999967777985, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00028489100009210233, "count": 1, "is_parallel": true, "self": 0.00028489100009210233 }, "communicator.exchange": { "total": 0.038520206000157486, "count": 1, "is_parallel": true, "self": 0.038520206000157486 }, "steps_from_proto": { "total": 0.0009451820001231681, "count": 1, "is_parallel": true, "self": 0.0002659950000634126, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006791870000597555, "count": 2, "is_parallel": true, "self": 0.0006791870000597555 } } } } } } }, "UnityEnvironment.step": { "total": 1698.1028481219873, "count": 231324, "is_parallel": true, "self": 49.33363261687555, "children": { "UnityEnvironment._generate_step_input": { "total": 101.49752662701235, "count": 231324, "is_parallel": true, "self": 101.49752662701235 }, "communicator.exchange": { "total": 1423.3763367391507, "count": 231324, "is_parallel": true, "self": 1423.3763367391507 }, "steps_from_proto": { "total": 123.89535213894874, "count": 231324, "is_parallel": true, "self": 45.20370886320188, "children": { "_process_rank_one_or_two_observation": { "total": 78.69164327574686, "count": 462648, "is_parallel": true, "self": 78.69164327574686 } } } } } } } } } } }, "trainer_advance": { "total": 613.7724757898768, "count": 231325, "self": 9.698894634839007, "children": { "process_trajectory": { "total": 169.12177574203656, "count": 231325, "self": 168.22100590803643, "children": { "RLTrainer._checkpoint": { "total": 0.9007698340001298, "count": 6, "self": 0.9007698340001298 } } }, "_update_policy": { "total": 434.95180541300124, "count": 97, "self": 370.54851991599253, "children": { "TorchPPOOptimizer.update": { "total": 64.40328549700871, "count": 2910, "self": 64.40328549700871 } } } } } } }, "trainer_threads": { "total": 1.0200001270277426e-06, "count": 1, "self": 1.0200001270277426e-06 }, "TrainerController._save_models": { "total": 0.12884834800024692, "count": 1, "self": 0.0030658059995403164, "children": { "RLTrainer._checkpoint": { "total": 0.1257825420007066, "count": 1, "self": 0.1257825420007066 } } } } } } }