{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.8067095279693604, "min": 1.7812886238098145, "max": 3.2957303524017334, "count": 502 }, "SoccerTwos.Policy.Entropy.sum": { "value": 38909.296875, "min": 27343.130859375, "max": 123741.046875, "count": 502 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 56.93103448275862, "min": 38.44094488188976, "max": 967.6, "count": 502 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19812.0, "min": 13796.0, "max": 26472.0, "count": 502 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1619.4299217928494, "min": 1192.4260417566475, "max": 1619.4299217928494, "count": 502 }, "SoccerTwos.Self-play.ELO.sum": { "value": 281780.8063919558, "min": 2386.5871956459205, "max": 384506.95127893426, "count": 502 }, "SoccerTwos.Step.mean": { "value": 5019918.0, "min": 9252.0, "max": 5019918.0, "count": 502 }, "SoccerTwos.Step.sum": { "value": 5019918.0, "min": 9252.0, "max": 5019918.0, "count": 502 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 0.034125134348869324, "min": -0.11229801923036575, "max": 0.1661233901977539, "count": 502 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 5.903648376464844, "min": -20.43824005126953, "max": 32.39406204223633, "count": 502 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 0.03661518543958664, "min": -0.10924917459487915, "max": 0.1797260344028473, "count": 502 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 6.3344268798828125, "min": -19.883350372314453, "max": 35.04657745361328, "count": 502 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 502 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 502 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.10670288725395423, "min": -0.6153846153846154, "max": 0.47377999424934386, "count": 502 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 18.459599494934082, "min": -60.135400235652924, "max": 63.6768000125885, "count": 502 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.10670288725395423, "min": -0.6153846153846154, "max": 0.47377999424934386, "count": 502 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 18.459599494934082, "min": -60.135400235652924, "max": 63.6768000125885, "count": 502 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 502 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 502 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.016927353704037765, "min": 0.010225890326546505, "max": 0.02390647333037729, "count": 242 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.016927353704037765, "min": 0.010225890326546505, "max": 0.02390647333037729, "count": 242 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.11432877431313197, "min": 0.0008438216881283249, "max": 0.12606887916723888, "count": 242 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.11432877431313197, "min": 0.0008438216881283249, "max": 0.12606887916723888, "count": 242 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.11599121515949568, "min": 0.0008475491486024112, "max": 0.12945492565631866, "count": 242 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.11599121515949568, "min": 0.0008475491486024112, "max": 0.12945492565631866, "count": 242 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 242 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 242 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 242 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 242 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 242 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 242 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1706923885", "python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "\\\\?\\C:\\Users\\dhanr\\miniconda3\\envs\\soccer_rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.0+cpu", "numpy_version": "1.23.5", "end_time_seconds": "1706961667" }, "total": 17981.592982500002, "count": 1, "self": 0.203863700004149, "children": { "run_training.setup": { "total": 0.10125999999991109, "count": 1, "self": 0.10125999999991109 }, "TrainerController.start_learning": { "total": 17981.2878588, "count": 1, "self": 8.235047399823088, "children": { "TrainerController._reset_env": { "total": 5.8604551999928844, "count": 26, "self": 5.8604551999928844 }, "TrainerController.advance": { "total": 17967.045158700184, "count": 346859, "self": 7.448338300364412, "children": { "env_step": { "total": 5343.905413099532, "count": 346859, "self": 4049.0789924993824, "children": { "SubprocessEnvManager._take_step": { "total": 1289.675657099881, "count": 346859, "self": 45.56477649991348, "children": { "TorchPolicy.evaluate": { "total": 1244.1108805999675, "count": 633202, "self": 1244.1108805999675 } } }, "workers": { "total": 5.150763500268567, "count": 346858, "self": 0.0, "children": { "worker_root": { "total": 17965.787383900584, "count": 346858, "is_parallel": true, "self": 14859.363311100657, "children": { "steps_from_proto": { "total": 0.04993329999933849, "count": 52, "is_parallel": true, "self": 0.009665499985203496, "children": { "_process_rank_one_or_two_observation": { "total": 0.04026780001413499, "count": 208, "is_parallel": true, "self": 0.04026780001413499 } } }, "UnityEnvironment.step": { "total": 3106.3741394999274, "count": 346858, "is_parallel": true, "self": 188.20646859875797, "children": { "UnityEnvironment._generate_step_input": { "total": 174.9408972012493, "count": 346858, "is_parallel": true, "self": 174.9408972012493 }, "communicator.exchange": { "total": 2149.1243471995704, "count": 346858, "is_parallel": true, "self": 2149.1243471995704 }, "steps_from_proto": { "total": 594.1024265003498, "count": 693716, "is_parallel": true, "self": 120.84388909805921, "children": { "_process_rank_one_or_two_observation": { "total": 473.2585374022906, "count": 2774864, "is_parallel": true, "self": 473.2585374022906 } } } } } } } } } } }, "trainer_advance": { "total": 12615.691407300288, "count": 346858, "self": 65.98170919965924, "children": { "process_trajectory": { "total": 1973.8463515006329, "count": 346858, "self": 1972.5603357006353, "children": { "RLTrainer._checkpoint": { "total": 1.2860157999975854, "count": 10, "self": 1.2860157999975854 } } }, "_update_policy": { "total": 10575.863346599996, "count": 242, "self": 775.593047600014, "children": { "TorchPOCAOptimizer.update": { "total": 9800.270298999982, "count": 7260, "self": 9800.270298999982 } } } } } } }, "trainer_threads": { "total": 1.6999983927235007e-06, "count": 1, "self": 1.6999983927235007e-06 }, "TrainerController._save_models": { "total": 0.1471958000001905, "count": 1, "self": 0.01100880000012694, "children": { "RLTrainer._checkpoint": { "total": 0.13618700000006356, "count": 1, "self": 0.13618700000006356 } } } } } } }