{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 1.0420081615447998, "min": 1.0420081615447998, "max": 2.873650074005127, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 9972.0185546875, "min": 9972.0185546875, "max": 29429.05078125, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 12.790838241577148, "min": 0.4809192419052124, "max": 12.790838241577148, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2494.21337890625, "min": 93.29833221435547, "max": 2586.3388671875, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 8756.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.07501919945934787, "min": 0.06141904263823366, "max": 0.07690657897870623, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.3000767978373915, "min": 0.24567617055293464, "max": 0.38453289489353115, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.2276877649858886, "min": 0.10787486028400999, "max": 0.257599629666291, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.9107510599435544, "min": 0.43149944113603994, "max": 1.287998148331455, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 8.082097306000005e-06, "min": 8.082097306000005e-06, "max": 0.000291882002706, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.232838922400002e-05, "min": 3.232838922400002e-05, "max": 0.00138516003828, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10269400000000001, "min": 0.10269400000000001, "max": 0.19729400000000002, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.41077600000000003, "min": 0.41077600000000003, "max": 0.96172, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001444306000000001, "min": 0.0001444306000000001, "max": 0.0048649706, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0005777224000000004, "min": 0.0005777224000000004, "max": 0.023089828, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 25.25, "min": 3.022727272727273, "max": 25.25, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1111.0, "min": 133.0, "max": 1378.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 25.25, "min": 3.022727272727273, "max": 25.25, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1111.0, "min": 133.0, "max": 1378.0, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1686598732", "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1686599184" }, "total": 451.91124948399994, "count": 1, "self": 0.44031784199989943, "children": { "run_training.setup": { "total": 0.039135553999983586, "count": 1, "self": 0.039135553999983586 }, "TrainerController.start_learning": { "total": 451.43179608800006, "count": 1, "self": 0.5314652469922407, "children": { "TrainerController._reset_env": { "total": 4.446723816999963, "count": 1, "self": 4.446723816999963 }, "TrainerController.advance": { "total": 446.3081129360079, "count": 18203, "self": 0.2394178730083354, "children": { "env_step": { "total": 446.06869506299955, "count": 18203, "self": 324.2918735999958, "children": { "SubprocessEnvManager._take_step": { "total": 121.52402590699683, "count": 18203, "self": 1.7157696790027899, "children": { "TorchPolicy.evaluate": { "total": 119.80825622799404, "count": 18203, "self": 119.80825622799404 } } }, "workers": { "total": 0.25279555600690173, "count": 18203, "self": 0.0, "children": { "worker_root": { "total": 450.0069996580039, "count": 18203, "is_parallel": true, "self": 212.49885949700138, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.004433305000020482, "count": 1, "is_parallel": true, "self": 0.0030343490000745987, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013989559999458834, "count": 10, "is_parallel": true, "self": 0.0013989559999458834 } } }, "UnityEnvironment.step": { "total": 0.05794325299996217, "count": 1, "is_parallel": true, "self": 0.0006126829999857364, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00037785600000006525, "count": 1, "is_parallel": true, "self": 0.00037785600000006525 }, "communicator.exchange": { "total": 0.05495959399996764, "count": 1, "is_parallel": true, "self": 0.05495959399996764 }, "steps_from_proto": { "total": 0.00199312000000873, "count": 1, "is_parallel": true, "self": 0.00033326999999871987, "children": { "_process_rank_one_or_two_observation": { "total": 0.0016598500000100103, "count": 10, "is_parallel": true, "self": 0.0016598500000100103 } } } } } } }, "UnityEnvironment.step": { "total": 237.50814016100253, "count": 18202, "is_parallel": true, "self": 10.027378802005217, "children": { "UnityEnvironment._generate_step_input": { "total": 5.047922238996648, "count": 18202, "is_parallel": true, "self": 5.047922238996648 }, "communicator.exchange": { "total": 189.57908694100013, "count": 18202, "is_parallel": true, "self": 189.57908694100013 }, "steps_from_proto": { "total": 32.85375217900054, "count": 18202, "is_parallel": true, "self": 5.888502617008442, "children": { "_process_rank_one_or_two_observation": { "total": 26.965249561992096, "count": 182020, "is_parallel": true, "self": 26.965249561992096 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00012802699995972944, "count": 1, "self": 0.00012802699995972944, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 443.0572103150098, "count": 432497, "is_parallel": true, "self": 9.037448516033578, "children": { "process_trajectory": { "total": 242.51130007897632, "count": 432497, "is_parallel": true, "self": 241.39785390497627, "children": { "RLTrainer._checkpoint": { "total": 1.113446174000046, "count": 4, "is_parallel": true, "self": 1.113446174000046 } } }, "_update_policy": { "total": 191.5084617199999, "count": 90, "is_parallel": true, "self": 72.96349011599807, "children": { "TorchPPOOptimizer.update": { "total": 118.54497160400183, "count": 4587, "is_parallel": true, "self": 118.54497160400183 } } } } } } } } }, "TrainerController._save_models": { "total": 0.14536606100000427, "count": 1, "self": 0.0008566830000518166, "children": { "RLTrainer._checkpoint": { "total": 0.14450937799995245, "count": 1, "self": 0.14450937799995245 } } } } } } }