{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 1.6072490215301514, "min": 1.6072490215301514, "max": 2.8812255859375, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 15416.732421875, "min": 15416.732421875, "max": 29601.7109375, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 8.96760082244873, "min": 0.03838074207305908, "max": 8.96760082244873, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 1748.68212890625, "min": 7.445863723754883, "max": 1773.82568359375, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 10945.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 19.545454545454547, "min": 3.272727272727273, "max": 19.545454545454547, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 860.0, "min": 144.0, "max": 1053.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 19.545454545454547, "min": 3.272727272727273, "max": 19.545454545454547, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 860.0, "min": 144.0, "max": 1053.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.04734392976891037, "min": 0.03752647929283161, "max": 0.053041986443162326, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.09468785953782075, "min": 0.03944649032976789, "max": 0.10608397288632465, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.2331096658652479, "min": 0.12033137690388795, "max": 0.3068722256092411, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.4662193317304958, "min": 0.12033137690388795, "max": 0.6137444512184822, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 6.894097702e-06, "min": 6.894097702e-06, "max": 0.00029128800290399996, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 1.3788195404e-05, "min": 1.3788195404e-05, "max": 0.000558288013904, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.102298, "min": 0.102298, "max": 0.197096, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.204596, "min": 0.1175, "max": 0.38609600000000005, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001246702, "min": 0.0001246702, "max": 0.0048550903999999995, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0002493404, "min": 0.0002493404, "max": 0.009306190400000003, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1682637757", "python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]", "command_line_arguments": "/home/qromaiko/mambaforge/envs/pytorch_rl/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.0.0", "numpy_version": "1.21.2", "end_time_seconds": "1682638049" }, "total": 292.17776610799774, "count": 1, "self": 0.21834235999631346, "children": { "run_training.setup": { "total": 0.010287131000950467, "count": 1, "self": 0.010287131000950467 }, "TrainerController.start_learning": { "total": 291.9491366170005, "count": 1, "self": 0.29030942115787184, "children": { "TrainerController._reset_env": { "total": 1.615480036001827, "count": 1, "self": 1.615480036001827 }, "TrainerController.advance": { "total": 289.96114170884175, "count": 18204, "self": 0.13826492999578477, "children": { "env_step": { "total": 289.82287677884597, "count": 18204, "self": 179.6621670131135, "children": { "SubprocessEnvManager._take_step": { "total": 110.01234486577596, "count": 18204, "self": 0.7623232078112778, "children": { "TorchPolicy.evaluate": { "total": 109.25002165796468, "count": 18204, "self": 109.25002165796468 } } }, "workers": { "total": 0.14836489995650481, "count": 18204, "self": 0.0, "children": { "worker_root": { "total": 291.30702953411674, "count": 18204, "is_parallel": true, "self": 143.2384806709706, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0018205710002803244, "count": 1, "is_parallel": true, "self": 0.0006339600040519144, "children": { "_process_rank_one_or_two_observation": { "total": 0.00118661099622841, "count": 10, "is_parallel": true, "self": 0.00118661099622841 } } }, "UnityEnvironment.step": { "total": 0.021112392998475116, "count": 1, "is_parallel": true, "self": 0.00022492000061902218, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00026531000185059384, "count": 1, "is_parallel": true, "self": 0.00026531000185059384 }, "communicator.exchange": { "total": 0.01960902199789416, "count": 1, "is_parallel": true, "self": 0.01960902199789416 }, "steps_from_proto": { "total": 0.0010131409981113393, "count": 1, "is_parallel": true, "self": 0.00017272000332013704, "children": { "_process_rank_one_or_two_observation": { "total": 0.0008404209947912022, "count": 10, "is_parallel": true, "self": 0.0008404209947912022 } } } } } } }, "UnityEnvironment.step": { "total": 148.06854886314613, "count": 18203, "is_parallel": true, "self": 3.85487336683218, "children": { "UnityEnvironment._generate_step_input": { "total": 2.3029227340884972, "count": 18203, "is_parallel": true, "self": 2.3029227340884972 }, "communicator.exchange": { "total": 130.36710955726812, "count": 18203, "is_parallel": true, "self": 130.36710955726812 }, "steps_from_proto": { "total": 11.543643204957334, "count": 18203, "is_parallel": true, "self": 2.3872092191268166, "children": { "_process_rank_one_or_two_observation": { "total": 9.156433985830517, "count": 182030, "is_parallel": true, "self": 9.156433985830517 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00011446999997133389, "count": 1, "self": 0.00011446999997133389, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 288.46193014932214, "count": 298085, "is_parallel": true, "self": 4.071494104544399, "children": { "process_trajectory": { "total": 197.49993883978095, "count": 298085, "is_parallel": true, "self": 197.13986476978607, "children": { "RLTrainer._checkpoint": { "total": 0.3600740699948801, "count": 4, "is_parallel": true, "self": 0.3600740699948801 } } }, "_update_policy": { "total": 86.89049720499679, "count": 36, "is_parallel": true, "self": 24.184545219886786, "children": { "TorchPPOOptimizer.update": { "total": 62.705951985110005, "count": 2268, "is_parallel": true, "self": 62.705951985110005 } } } } } } } } }, "TrainerController._save_models": { "total": 0.08209098099905532, "count": 1, "self": 0.00043880999874090776, "children": { "RLTrainer._checkpoint": { "total": 0.08165217100031441, "count": 1, "self": 0.08165217100031441 } } } } } } }