{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.5614609718322754, "min": 1.5191409587860107, "max": 1.993438482284546, "count": 1156 }, "SoccerTwos.Policy.Entropy.sum": { "value": 30829.484375, "min": 2775.701904296875, "max": 42203.4765625, "count": 1156 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 62.80769230769231, "min": 21.8, "max": 93.72727272727273, "count": 1156 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19596.0, "min": 436.0, "max": 21488.0, "count": 1156 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1644.9926584344712, "min": 1450.9584947933884, "max": 1666.5976136065917, "count": 1156 }, "SoccerTwos.Self-play.ELO.sum": { "value": 256618.8547157775, "min": 14675.541357275668, "max": 376430.228927529, "count": 1156 }, "SoccerTwos.Step.mean": { "value": 20609951.0, "min": 9059978.0, "max": 20609951.0, "count": 1156 }, "SoccerTwos.Step.sum": { "value": 20609951.0, "min": 9059978.0, "max": 20609951.0, "count": 1156 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.023509476333856583, "min": -0.11572162061929703, "max": 0.10934069007635117, "count": 1156 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -3.6909878253936768, "min": -23.880985260009766, "max": 13.756019592285156, "count": 1156 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.023259663954377174, "min": -0.11759790033102036, "max": 0.09960361570119858, "count": 1156 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -3.6517672538757324, "min": -24.803630828857422, "max": 13.793081283569336, "count": 1156 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 1156 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 1156 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.014616560784115154, "min": -0.3788945948755419, "max": 0.3006535028196444, "count": 1156 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 2.294800043106079, "min": -56.0764000415802, "max": 47.202599942684174, "count": 1156 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.014616560784115154, "min": -0.3788945948755419, "max": 0.3006535028196444, "count": 1156 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 2.294800043106079, "min": -56.0764000415802, "max": 47.202599942684174, "count": 1156 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 1156 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 1156 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.019130369467893614, "min": 0.008965946462315818, "max": 0.023765867362574983, "count": 560 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.019130369467893614, "min": 0.008965946462315818, "max": 0.023765867362574983, "count": 560 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.10202107181151708, "min": 0.07586363901694616, "max": 0.12432742193341255, "count": 560 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.10202107181151708, "min": 0.07586363901694616, "max": 0.12432742193341255, "count": 560 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.10362946639458338, "min": 0.07638966143131257, "max": 0.12680880700548489, "count": 560 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.10362946639458338, "min": 0.07638966143131257, "max": 0.12680880700548489, "count": 560 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 560 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 560 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 560 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 560 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 560 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 560 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1682379432", "python_version": "3.9.16 (main, Dec 7 2022, 01:12:08) \n[GCC 11.3.0]", "command_line_arguments": "/home/nikita/.venvs/py39/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos7 --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1682425862" }, "total": 46430.206777502, "count": 1, "self": 0.07958911496098153, "children": { "run_training.setup": { "total": 0.02251585602061823, "count": 1, "self": 0.02251585602061823 }, "TrainerController.start_learning": { "total": 46430.104672531015, "count": 1, "self": 18.856571377633372, "children": { "TrainerController._reset_env": { "total": 2.2138183759816457, "count": 59, "self": 2.2138183759816457 }, "TrainerController.advance": { "total": 46408.754721603385, "count": 798496, "self": 19.302865586447297, "children": { "env_step": { "total": 14950.792426131899, "count": 798496, "self": 12488.819591601175, "children": { "SubprocessEnvManager._take_step": { "total": 2450.5338709263597, "count": 798496, "self": 114.94879767205566, "children": { "TorchPolicy.evaluate": { "total": 2335.585073254304, "count": 1450430, "self": 2335.585073254304 } } }, "workers": { "total": 11.43896360436338, "count": 798496, "self": 0.0, "children": { "worker_root": { "total": 46343.89149625192, "count": 798496, "is_parallel": true, "self": 35827.00101177563, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.003150269971229136, "count": 2, "is_parallel": true, "self": 0.0012162739876657724, "children": { "_process_rank_one_or_two_observation": { "total": 0.0019339959835633636, "count": 8, "is_parallel": true, "self": 0.0019339959835633636 } } }, "UnityEnvironment.step": { "total": 0.03972226899350062, "count": 1, "is_parallel": true, "self": 0.0021640089980792254, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0009829799819272012, "count": 1, "is_parallel": true, "self": 0.0009829799819272012 }, "communicator.exchange": { "total": 0.033291461993940175, "count": 1, "is_parallel": true, "self": 0.033291461993940175 }, "steps_from_proto": { "total": 0.003283818019554019, "count": 2, "is_parallel": true, "self": 0.0004410270194057375, "children": { "_process_rank_one_or_two_observation": { "total": 0.0028427910001482815, "count": 8, "is_parallel": true, "self": 0.0028427910001482815 } } } } } } }, "steps_from_proto": { "total": 0.11017935519339517, "count": 116, "is_parallel": true, "self": 0.02168753181467764, "children": { "_process_rank_one_or_two_observation": { "total": 0.08849182337871753, "count": 464, "is_parallel": true, "self": 0.08849182337871753 } } }, "UnityEnvironment.step": { "total": 10516.780305121094, "count": 798495, "is_parallel": true, "self": 693.4802993710618, "children": { "UnityEnvironment._generate_step_input": { "total": 515.5107734003686, "count": 798495, "is_parallel": true, "self": 515.5107734003686 }, "communicator.exchange": { "total": 7235.281225533516, "count": 798495, "is_parallel": true, "self": 7235.281225533516 }, "steps_from_proto": { "total": 2072.508006816148, "count": 1596990, "is_parallel": true, "self": 389.5634693952452, "children": { "_process_rank_one_or_two_observation": { "total": 1682.9445374209026, "count": 6387960, "is_parallel": true, "self": 1682.9445374209026 } } } } } } } } } } }, "trainer_advance": { "total": 31438.65942988504, "count": 798496, "self": 117.7883417828125, "children": { "process_trajectory": { "total": 3157.696970941033, "count": 798496, "self": 3151.923304645985, "children": { "RLTrainer._checkpoint": { "total": 5.773666295048315, "count": 23, "self": 5.773666295048315 } } }, "_update_policy": { "total": 28163.174117161194, "count": 561, "self": 1720.919731839269, "children": { "TorchPOCAOptimizer.update": { "total": 26442.254385321925, "count": 16829, "self": 26442.254385321925 } } } } } } }, "trainer_threads": { "total": 2.0060106180608273e-06, "count": 1, "self": 2.0060106180608273e-06 }, "TrainerController._save_models": { "total": 0.279559168004198, "count": 1, "self": 0.002297809987794608, "children": { "RLTrainer._checkpoint": { "total": 0.2772613580164034, "count": 1, "self": 0.2772613580164034 } } } } } } }