{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 2.8396008014678955, "min": 2.8396008014678955, "max": 2.8736937046051025, "count": 2 }, "SnowballTarget.Policy.Entropy.sum": { "value": 29361.47265625, "min": 29361.47265625, "max": 29524.328125, "count": 2 }, "SnowballTarget.Step.mean": { "value": 19992.0, "min": 9952.0, "max": 19992.0, "count": 2 }, "SnowballTarget.Step.sum": { "value": 19992.0, "min": 9952.0, "max": 19992.0, "count": 2 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 1.263075828552246, "min": 0.4022206962108612, "max": 1.263075828552246, "count": 2 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 258.9305419921875, "min": 78.03081512451172, "max": 258.9305419921875, "count": 2 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 2 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 10945.0, "min": 8756.0, "max": 10945.0, "count": 2 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06481831646798289, "min": 0.06481831646798289, "max": 0.07193488530630894, "count": 2 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.3240915823399144, "min": 0.28773954122523576, "max": 0.3240915823399144, "count": 2 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.19746566072982902, "min": 0.09731209359255016, "max": 0.19746566072982902, "count": 2 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.987328303649145, "min": 0.38924837437020066, "max": 0.987328303649145, "count": 2 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 7.032007656e-05, "min": 7.032007656e-05, "max": 0.00021882002706000002, "count": 2 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 0.0003516003828, "min": 0.0003516003828, "max": 0.0008752801082400001, "count": 2 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.12344000000000002, "min": 0.12344000000000002, "max": 0.17294, "count": 2 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.6172000000000001, "min": 0.6172000000000001, "max": 0.69176, "count": 2 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0011796560000000003, "min": 0.0011796560000000003, "max": 0.003649706, "count": 2 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.005898280000000001, "min": 0.005898280000000001, "max": 0.014598824, "count": 2 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 5.745454545454545, "min": 2.8636363636363638, "max": 5.745454545454545, "count": 2 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 316.0, "min": 126.0, "max": 316.0, "count": 2 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 5.745454545454545, "min": 2.8636363636363638, "max": 5.745454545454545, "count": 2 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 316.0, "min": 126.0, "max": 316.0, "count": 2 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 2 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 2 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1692016966", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/home/victor/.local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1692017019" }, "total": 53.44478477200005, "count": 1, "self": 1.2209125869999298, "children": { "run_training.setup": { "total": 0.02174069300008341, "count": 1, "self": 0.02174069300008341 }, "TrainerController.start_learning": { "total": 52.202131492000035, "count": 1, "self": 0.07423351099873798, "children": { "TrainerController._reset_env": { "total": 6.655812728000001, "count": 1, "self": 6.655812728000001 }, "TrainerController.advance": { "total": 45.364935828001535, "count": 1879, "self": 0.021415703004322495, "children": { "env_step": { "total": 45.34352012499721, "count": 1879, "self": 29.21571261499139, "children": { "SubprocessEnvManager._take_step": { "total": 16.105558508004606, "count": 1879, "self": 0.1321435220006606, "children": { "TorchPolicy.evaluate": { "total": 15.973414986003945, "count": 1879, "self": 15.973414986003945 } } }, "workers": { "total": 0.022249002001217377, "count": 1879, "self": 0.0, "children": { "worker_root": { "total": 52.006834045000915, "count": 1879, "is_parallel": true, "self": 30.296384265993538, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0020248989999345213, "count": 1, "is_parallel": true, "self": 0.0005751999999574764, "children": { "_process_rank_one_or_two_observation": { "total": 0.0014496989999770449, "count": 10, "is_parallel": true, "self": 0.0014496989999770449 } } }, "UnityEnvironment.step": { "total": 0.019341898999982732, "count": 1, "is_parallel": true, "self": 0.0002630000001317967, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00017419999994672253, "count": 1, "is_parallel": true, "self": 0.00017419999994672253 }, "communicator.exchange": { "total": 0.01802319900002658, "count": 1, "is_parallel": true, "self": 0.01802319900002658 }, "steps_from_proto": { "total": 0.0008814999998776329, "count": 1, "is_parallel": true, "self": 0.00018149999959859997, "children": { "_process_rank_one_or_two_observation": { "total": 0.000700000000279033, "count": 10, "is_parallel": true, "self": 0.000700000000279033 } } } } } } }, "UnityEnvironment.step": { "total": 21.710449779007376, "count": 1878, "is_parallel": true, "self": 0.522955083021543, "children": { "UnityEnvironment._generate_step_input": { "total": 0.3177918349997526, "count": 1878, "is_parallel": true, "self": 0.3177918349997526 }, "communicator.exchange": { "total": 19.193862417992932, "count": 1878, "is_parallel": true, "self": 19.193862417992932 }, "steps_from_proto": { "total": 1.6758404429931488, "count": 1878, "is_parallel": true, "self": 0.36650654699997176, "children": { "_process_rank_one_or_two_observation": { "total": 1.309333895993177, "count": 18780, "is_parallel": true, "self": 1.309333895993177 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00019559999986995535, "count": 1, "self": 0.00019559999986995535, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 45.22216580798954, "count": 31251, "is_parallel": true, "self": 0.5188741819702045, "children": { "process_trajectory": { "total": 26.509791286019208, "count": 31251, "is_parallel": true, "self": 25.827423473019508, "children": { "RLTrainer._checkpoint": { "total": 0.6823678129997006, "count": 4, "is_parallel": true, "self": 0.6823678129997006 } } }, "_update_policy": { "total": 18.193500340000128, "count": 9, "is_parallel": true, "self": 3.1610006109995084, "children": { "TorchPPOOptimizer.update": { "total": 15.03249972900062, "count": 456, "is_parallel": true, "self": 15.03249972900062 } } } } } } } } }, "TrainerController._save_models": { "total": 0.10695382499989137, "count": 1, "self": 0.0006624999998621206, "children": { "RLTrainer._checkpoint": { "total": 0.10629132500002925, "count": 1, "self": 0.10629132500002925 } } } } } } }