{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 0.6579132080078125, "min": 0.6556541323661804, "max": 2.8424899578094482, "count": 50 }, "SnowballTarget.Policy.Entropy.sum": { "value": 6383.07373046875, "min": 6383.07373046875, "max": 29141.20703125, "count": 50 }, "SnowballTarget.Step.mean": { "value": 499976.0, "min": 9952.0, "max": 499976.0, "count": 50 }, "SnowballTarget.Step.sum": { "value": 499976.0, "min": 9952.0, "max": 499976.0, "count": 50 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 13.549287796020508, "min": 0.4598708748817444, "max": 13.718676567077637, "count": 50 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2777.60400390625, "min": 89.21495056152344, "max": 2803.13427734375, "count": 50 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 50 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 10945.0, "min": 8756.0, "max": 10945.0, "count": 50 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06667521106997702, "min": 0.06280982004671601, "max": 0.07440114256663519, "count": 50 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.3333760553498851, "min": 0.25123928018686403, "max": 0.3720057128331759, "count": 50 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.16664646938443184, "min": 0.1462357784347499, "max": 0.28920836029026437, "count": 50 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.8332323469221592, "min": 0.5849431137389997, "max": 1.3172378610162174, "count": 50 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 3.0528989824000028e-06, "min": 3.0528989824000028e-06, "max": 0.00029675280108239997, "count": 50 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 1.5264494912000015e-05, "min": 1.5264494912000015e-05, "max": 0.0014540640153119996, "count": 50 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10101760000000001, "min": 0.10101760000000001, "max": 0.1989176, "count": 50 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.5050880000000001, "min": 0.41199040000000003, "max": 0.984688, "count": 50 }, "SnowballTarget.Policy.Beta.mean": { "value": 6.0778240000000044e-05, "min": 6.0778240000000044e-05, "max": 0.00494598824, "count": 50 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0003038912000000002, "min": 0.0003038912000000002, "max": 0.0242359312, "count": 50 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 27.0, "min": 3.977272727272727, "max": 27.054545454545455, "count": 50 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1485.0, "min": 175.0, "max": 1488.0, "count": 50 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 27.0, "min": 3.977272727272727, "max": 27.054545454545455, "count": 50 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1485.0, "min": 175.0, "max": 1488.0, "count": 50 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1679754787", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1679755949" }, "total": 1162.2593916990002, "count": 1, "self": 0.7340408590000607, "children": { "run_training.setup": { "total": 0.10473603299999468, "count": 1, "self": 0.10473603299999468 }, "TrainerController.start_learning": { "total": 1161.420614807, "count": 1, "self": 1.2360281039598249, "children": { "TrainerController._reset_env": { "total": 9.751921562999996, "count": 1, "self": 9.751921562999996 }, "TrainerController.advance": { "total": 1150.2326747350403, "count": 45503, "self": 0.6189754880053897, "children": { "env_step": { "total": 1149.613699247035, "count": 45503, "self": 844.7546745620089, "children": { "SubprocessEnvManager._take_step": { "total": 304.23534500201345, "count": 45503, "self": 5.304210057016576, "children": { "TorchPolicy.evaluate": { "total": 298.93113494499687, "count": 45503, "self": 298.93113494499687 } } }, "workers": { "total": 0.6236796830125968, "count": 45503, "self": 0.0, "children": { "worker_root": { "total": 1158.0631530189783, "count": 45503, "is_parallel": true, "self": 591.2742422509973, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005272912000009455, "count": 1, "is_parallel": true, "self": 0.003694250999956239, "children": { "_process_rank_one_or_two_observation": { "total": 0.0015786610000532164, "count": 10, "is_parallel": true, "self": 0.0015786610000532164 } } }, "UnityEnvironment.step": { "total": 0.04670857700000397, "count": 1, "is_parallel": true, "self": 0.0003886409999722673, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00029316100005871704, "count": 1, "is_parallel": true, "self": 0.00029316100005871704 }, "communicator.exchange": { "total": 0.044351655000014034, "count": 1, "is_parallel": true, "self": 0.044351655000014034 }, "steps_from_proto": { "total": 0.0016751199999589517, "count": 1, "is_parallel": true, "self": 0.000364947000207394, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013101729997515577, "count": 10, "is_parallel": true, "self": 0.0013101729997515577 } } } } } } }, "UnityEnvironment.step": { "total": 566.788910767981, "count": 45502, "is_parallel": true, "self": 22.872921977962505, "children": { "UnityEnvironment._generate_step_input": { "total": 12.313542046009843, "count": 45502, "is_parallel": true, "self": 12.313542046009843 }, "communicator.exchange": { "total": 457.9982195459911, "count": 45502, "is_parallel": true, "self": 457.9982195459911 }, "steps_from_proto": { "total": 73.6042271980175, "count": 45502, "is_parallel": true, "self": 14.294320180126192, "children": { "_process_rank_one_or_two_observation": { "total": 59.309907017891305, "count": 455020, "is_parallel": true, "self": 59.309907017891305 } } } } } } } } } } } } }, "trainer_threads": { "total": 9.929399993779953e-05, "count": 1, "self": 9.929399993779953e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 1141.8382505470154, "count": 975462, "is_parallel": true, "self": 23.30068299878576, "children": { "process_trajectory": { "total": 591.156370489229, "count": 975462, "is_parallel": true, "self": 587.4149382802293, "children": { "RLTrainer._checkpoint": { "total": 3.7414322089996404, "count": 10, "is_parallel": true, "self": 3.7414322089996404 } } }, "_update_policy": { "total": 527.3811970590007, "count": 227, "is_parallel": true, "self": 206.3344661749884, "children": { "TorchPPOOptimizer.update": { "total": 321.0467308840123, "count": 15432, "is_parallel": true, "self": 321.0467308840123 } } } } } } } } }, "TrainerController._save_models": { "total": 0.1998911109999426, "count": 1, "self": 0.0011108660000900272, "children": { "RLTrainer._checkpoint": { "total": 0.19878024499985258, "count": 1, "self": 0.19878024499985258 } } } } } } }