{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4066708087921143, "min": 1.4066708087921143, "max": 1.4320334196090698, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70861.0390625, "min": 68928.6328125, "max": 75382.0234375, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 80.53670473083197, "min": 80.3479674796748, "max": 393.75396825396825, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49369.0, "min": 49042.0, "max": 50236.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999979.0, "min": 49986.0, "max": 1999979.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999979.0, "min": 49986.0, "max": 1999979.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4835824966430664, "min": 0.005519932135939598, "max": 2.4968693256378174, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1522.43603515625, "min": 0.6899915337562561, "max": 1522.43603515625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.8151878223528093, "min": 1.7963060657978058, "max": 3.974545169429681, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2338.710135102272, "min": 224.53825822472572, "max": 2352.022219657898, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.8151878223528093, "min": 1.7963060657978058, "max": 3.974545169429681, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2338.710135102272, "min": 224.53825822472572, "max": 2352.022219657898, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01379888583420931, "min": 0.013464466259271526, "max": 0.020022278647714605, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.02759777166841862, "min": 0.02692893251854305, "max": 0.05997894546114064, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05788745749741793, "min": 0.020992445386946203, "max": 0.06574861289312442, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.11577491499483586, "min": 0.041984890773892405, "max": 0.19518075523277123, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 4.555748481450006e-06, "min": 4.555748481450006e-06, "max": 0.000295236376587875, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.111496962900012e-06, "min": 9.111496962900012e-06, "max": 0.00084355756881415, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10151855, "min": 0.10151855, "max": 0.1984121250000001, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.2030371, "min": 0.2030371, "max": 0.5811858500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 8.577564500000008e-05, "min": 8.577564500000008e-05, "max": 0.004920765037500001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00017155129000000015, "min": 0.00017155129000000015, "max": 0.014061173915, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1672136600", "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1672138825" }, "total": 2225.444037756, "count": 1, "self": 0.373903846000303, "children": { "run_training.setup": { "total": 0.10835587099995792, "count": 1, "self": 0.10835587099995792 }, "TrainerController.start_learning": { "total": 2224.961778039, "count": 1, "self": 3.8988479209237994, "children": { "TrainerController._reset_env": { "total": 6.374010347000024, "count": 1, "self": 6.374010347000024 }, "TrainerController.advance": { "total": 2214.569802326076, "count": 232300, "self": 4.179939088025549, "children": { "env_step": { "total": 1741.8461181699931, "count": 232300, "self": 1463.244573808024, "children": { "SubprocessEnvManager._take_step": { "total": 276.00289657995546, "count": 232300, "self": 13.987330417951284, "children": { "TorchPolicy.evaluate": { "total": 262.0155661620042, "count": 222898, "self": 65.25599957393553, "children": { "TorchPolicy.sample_actions": { "total": 196.75956658806865, "count": 222898, "self": 196.75956658806865 } } } } }, "workers": { "total": 2.5986477820137566, "count": 232300, "self": 0.0, "children": { "worker_root": { "total": 2217.315665908051, "count": 232300, "is_parallel": true, "self": 1008.658252225988, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0010851279999997132, "count": 1, "is_parallel": true, "self": 0.0003006439999921895, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007844840000075237, "count": 2, "is_parallel": true, "self": 0.0007844840000075237 } } }, "UnityEnvironment.step": { "total": 0.028261290000045847, "count": 1, "is_parallel": true, "self": 0.0002680710000504405, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00017687600001181636, "count": 1, "is_parallel": true, "self": 0.00017687600001181636 }, "communicator.exchange": { "total": 0.027019768000002387, "count": 1, "is_parallel": true, "self": 0.027019768000002387 }, "steps_from_proto": { "total": 0.0007965749999812033, "count": 1, "is_parallel": true, "self": 0.0002571589999433854, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005394160000378179, "count": 2, "is_parallel": true, "self": 0.0005394160000378179 } } } } } } }, "UnityEnvironment.step": { "total": 1208.657413682063, "count": 232299, "is_parallel": true, "self": 34.85275116805224, "children": { "UnityEnvironment._generate_step_input": { "total": 76.11048699299465, "count": 232299, "is_parallel": true, "self": 76.11048699299465 }, "communicator.exchange": { "total": 1004.4572747820005, "count": 232299, "is_parallel": true, "self": 1004.4572747820005 }, "steps_from_proto": { "total": 93.23690073901543, "count": 232299, "is_parallel": true, "self": 38.20979004006546, "children": { "_process_rank_one_or_two_observation": { "total": 55.02711069894997, "count": 464598, "is_parallel": true, "self": 55.02711069894997 } } } } } } } } } } }, "trainer_advance": { "total": 468.54374506805755, "count": 232300, "self": 6.064972233011815, "children": { "process_trajectory": { "total": 148.4625698990444, "count": 232300, "self": 147.27092911804448, "children": { "RLTrainer._checkpoint": { "total": 1.1916407809999328, "count": 10, "self": 1.1916407809999328 } } }, "_update_policy": { "total": 314.0162029360013, "count": 96, "self": 261.0455596830022, "children": { "TorchPPOOptimizer.update": { "total": 52.97064325299914, "count": 2880, "self": 52.97064325299914 } } } } } } }, "trainer_threads": { "total": 1.1160000212839805e-06, "count": 1, "self": 1.1160000212839805e-06 }, "TrainerController._save_models": { "total": 0.11911632900000768, "count": 1, "self": 0.003175844999987021, "children": { "RLTrainer._checkpoint": { "total": 0.11594048400002066, "count": 1, "self": 0.11594048400002066 } } } } } } }