{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4036784172058105, "min": 1.4036784172058105, "max": 1.4302958250045776, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70058.9921875, "min": 67325.921875, "max": 78202.484375, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 80.2260162601626, "min": 78.26307448494454, "max": 397.86507936507934, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49339.0, "min": 48965.0, "max": 50131.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999938.0, "min": 49507.0, "max": 1999938.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999938.0, "min": 49507.0, "max": 1999938.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.3949050903320312, "min": 0.0543491393327713, "max": 2.467642307281494, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1472.86669921875, "min": 6.793642520904541, "max": 1548.4649658203125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.740669956246043, "min": 1.6240959672927857, "max": 3.972078300844652, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2300.512023091316, "min": 203.0119959115982, "max": 2464.700306892395, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.740669956246043, "min": 1.6240959672927857, "max": 3.972078300844652, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2300.512023091316, "min": 203.0119959115982, "max": 2464.700306892395, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.018136827473440715, "min": 0.01398662332406578, "max": 0.019651967051807634, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05441048242032215, "min": 0.02797324664813156, "max": 0.05798827091854643, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.06014080920981036, "min": 0.021528715019424757, "max": 0.06180970792969068, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1804224276294311, "min": 0.043057430038849515, "max": 0.1804224276294311, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.2508989164000087e-06, "min": 3.2508989164000087e-06, "max": 0.000295235026588325, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.752696749200027e-06, "min": 9.752696749200027e-06, "max": 0.0008435650688116499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10108360000000004, "min": 0.10108360000000004, "max": 0.198411675, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3032508000000001, "min": 0.2072946, "max": 0.5811883500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.407164000000014e-05, "min": 6.407164000000014e-05, "max": 0.0049207425825, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00019221492000000043, "min": 0.00019221492000000043, "max": 0.014061298665, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1715622911", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1715625331" }, "total": 2419.393115497, "count": 1, "self": 0.487422800999866, "children": { "run_training.setup": { "total": 0.05333867700005612, "count": 1, "self": 0.05333867700005612 }, "TrainerController.start_learning": { "total": 2418.852354019, "count": 1, "self": 4.364956203013662, "children": { "TrainerController._reset_env": { "total": 3.165143341999965, "count": 1, "self": 3.165143341999965 }, "TrainerController.advance": { "total": 2411.204707431986, "count": 232299, "self": 4.629168720997768, "children": { "env_step": { "total": 1903.990705850978, "count": 232299, "self": 1579.3913973689505, "children": { "SubprocessEnvManager._take_step": { "total": 321.75496883102505, "count": 232299, "self": 16.6563239359989, "children": { "TorchPolicy.evaluate": { "total": 305.09864489502615, "count": 222926, "self": 305.09864489502615 } } }, "workers": { "total": 2.8443396510024286, "count": 232299, "self": 0.0, "children": { "worker_root": { "total": 2411.803949019979, "count": 232299, "is_parallel": true, "self": 1135.520461750926, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.000953694999907384, "count": 1, "is_parallel": true, "self": 0.00024832299982335826, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007053720000840258, "count": 2, "is_parallel": true, "self": 0.0007053720000840258 } } }, "UnityEnvironment.step": { "total": 0.0398623010000847, "count": 1, "is_parallel": true, "self": 0.0003963879998991615, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0001908579999962967, "count": 1, "is_parallel": true, "self": 0.0001908579999962967 }, "communicator.exchange": { "total": 0.03851669200003016, "count": 1, "is_parallel": true, "self": 0.03851669200003016 }, "steps_from_proto": { "total": 0.0007583630001590791, "count": 1, "is_parallel": true, "self": 0.00021382800036917615, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005445349997899029, "count": 2, "is_parallel": true, "self": 0.0005445349997899029 } } } } } } }, "UnityEnvironment.step": { "total": 1276.283487269053, "count": 232298, "is_parallel": true, "self": 39.14943746118365, "children": { "UnityEnvironment._generate_step_input": { "total": 81.24556582995388, "count": 232298, "is_parallel": true, "self": 81.24556582995388 }, "communicator.exchange": { "total": 1065.1489923559627, "count": 232298, "is_parallel": true, "self": 1065.1489923559627 }, "steps_from_proto": { "total": 90.73949162195277, "count": 232298, "is_parallel": true, "self": 32.15860685563757, "children": { "_process_rank_one_or_two_observation": { "total": 58.580884766315194, "count": 464596, "is_parallel": true, "self": 58.580884766315194 } } } } } } } } } } }, "trainer_advance": { "total": 502.58483286001047, "count": 232299, "self": 6.457855091003921, "children": { "process_trajectory": { "total": 158.39712155600841, "count": 232299, "self": 157.03675839600805, "children": { "RLTrainer._checkpoint": { "total": 1.360363160000361, "count": 10, "self": 1.360363160000361 } } }, "_update_policy": { "total": 337.72985621299813, "count": 97, "self": 272.9294828330071, "children": { "TorchPPOOptimizer.update": { "total": 64.80037337999102, "count": 2910, "self": 64.80037337999102 } } } } } } }, "trainer_threads": { "total": 9.69999746303074e-07, "count": 1, "self": 9.69999746303074e-07 }, "TrainerController._save_models": { "total": 0.11754607200009559, "count": 1, "self": 0.0026334280000810395, "children": { "RLTrainer._checkpoint": { "total": 0.11491264400001455, "count": 1, "self": 0.11491264400001455 } } } } } } }