{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.5082602500915527, "min": 1.4549720287322998, "max": 2.265712022781372, "count": 1202 }, "SoccerTwos.Policy.Entropy.sum": { "value": 33398.9140625, "min": 25996.87109375, "max": 47126.80859375, "count": 1202 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 53.064516129032256, "min": 39.63934426229508, "max": 85.66666666666667, "count": 1202 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19740.0, "min": 11676.0, "max": 21748.0, "count": 1202 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1572.202049157151, "min": 1403.3055570745742, "max": 1610.9957334443245, "count": 1202 }, "SoccerTwos.Self-play.ELO.sum": { "value": 292429.5811432301, "min": 173663.35595911706, "max": 380185.3125300383, "count": 1202 }, "SoccerTwos.Step.mean": { "value": 14659958.0, "min": 2649975.0, "max": 14659958.0, "count": 1202 }, "SoccerTwos.Step.sum": { "value": 14659958.0, "min": 2649975.0, "max": 14659958.0, "count": 1202 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.04047131538391113, "min": -0.12949728965759277, "max": 0.10559314489364624, "count": 1202 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -7.568136215209961, "min": -24.920269012451172, "max": 19.74591827392578, "count": 1202 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.038699373602867126, "min": -0.1304614245891571, "max": 0.10752364993095398, "count": 1202 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -7.236783027648926, "min": -25.15608787536621, "max": 20.106922149658203, "count": 1202 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 1202 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 1202 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.07591336710567781, "min": -0.3485661543332613, "max": 0.3920156877804426, "count": 1202 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -14.19579964876175, "min": -73.85320019721985, "max": 59.978400230407715, "count": 1202 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.07591336710567781, "min": -0.3485661543332613, "max": 0.3920156877804426, "count": 1202 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -14.19579964876175, "min": -73.85320019721985, "max": 59.978400230407715, "count": 1202 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 1202 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 1202 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.016982776935522755, "min": 0.010868341186627125, "max": 0.02667792578189013, "count": 583 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.016982776935522755, "min": 0.010868341186627125, "max": 0.02667792578189013, "count": 583 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.11821085462967555, "min": 0.08800648426016172, "max": 0.12883232111732165, "count": 583 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.11821085462967555, "min": 0.08800648426016172, "max": 0.12883232111732165, "count": 583 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.11980451295773188, "min": 0.08937925597031911, "max": 0.13096312458316486, "count": 583 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.11980451295773188, "min": 0.08937925597031911, "max": 0.13096312458316486, "count": 583 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 583 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 583 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 583 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 583 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 583 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 583 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1680912511", "python_version": "3.9.16 (main, Mar 8 2023, 04:29:24) \n[Clang 14.0.6 ]", "command_line_arguments": "/opt/homebrew/Caskroom/miniconda/base/envs/deep-rl-class/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0", "numpy_version": "1.21.2", "end_time_seconds": "1680951275" }, "total": 38764.087656417, "count": 1, "self": 0.09442195900192019, "children": { "run_training.setup": { "total": 0.009670666999999966, "count": 1, "self": 0.009670666999999966 }, "TrainerController.start_learning": { "total": 38763.983563791, "count": 1, "self": 9.121081361445249, "children": { "TrainerController._reset_env": { "total": 3.2734744629951806, "count": 62, "self": 3.2734744629951806 }, "TrainerController.advance": { "total": 38751.51397613357, "count": 840821, "self": 7.283000948016706, "children": { "env_step": { "total": 30646.677439031813, "count": 840821, "self": 29467.50711357281, "children": { "SubprocessEnvManager._take_step": { "total": 1173.7110879960755, "count": 840821, "self": 30.728198681857293, "children": { "TorchPolicy.evaluate": { "total": 1142.9828893142183, "count": 1508960, "self": 1142.9828893142183 } } }, "workers": { "total": 5.4592374629280425, "count": 840821, "self": 0.0, "children": { "worker_root": { "total": 38741.549223342096, "count": 840821, "is_parallel": true, "self": 10345.48275503689, "children": { "steps_from_proto": { "total": 0.08496799695775925, "count": 124, "is_parallel": true, "self": 0.009829328961364903, "children": { "_process_rank_one_or_two_observation": { "total": 0.07513866799639435, "count": 496, "is_parallel": true, "self": 0.07513866799639435 } } }, "UnityEnvironment.step": { "total": 28395.981500308248, "count": 840821, "is_parallel": true, "self": 78.5241393496326, "children": { "UnityEnvironment._generate_step_input": { "total": 499.6481334747981, "count": 840821, "is_parallel": true, "self": 499.6481334747981 }, "communicator.exchange": { "total": 26759.368367345356, "count": 840821, "is_parallel": true, "self": 26759.368367345356 }, "steps_from_proto": { "total": 1058.4408601384594, "count": 1681642, "is_parallel": true, "self": 115.98925291435125, "children": { "_process_rank_one_or_two_observation": { "total": 942.4516072241081, "count": 6726568, "is_parallel": true, "self": 942.4516072241081 } } } } } } } } } } }, "trainer_advance": { "total": 8097.553536153739, "count": 840821, "self": 64.9191855755953, "children": { "process_trajectory": { "total": 2104.6275479220776, "count": 840821, "self": 2102.9520721740782, "children": { "RLTrainer._checkpoint": { "total": 1.6754757479993714, "count": 24, "self": 1.6754757479993714 } } }, "_update_policy": { "total": 5928.006802656066, "count": 584, "self": 775.4999598093327, "children": { "TorchPOCAOptimizer.update": { "total": 5152.506842846733, "count": 17512, "self": 5152.506842846733 } } } } } } }, "trainer_threads": { "total": 9.57996235229075e-07, "count": 1, "self": 9.57996235229075e-07 }, "TrainerController._save_models": { "total": 0.07503087499935646, "count": 1, "self": 0.0011619170036283322, "children": { "RLTrainer._checkpoint": { "total": 0.07386895799572812, "count": 1, "self": 0.07386895799572812 } } } } } } }