{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 2.064441442489624, "min": 2.0444977283477783, "max": 3.233220100402832, "count": 451 }, "SoccerTwos.Policy.Entropy.sum": { "value": 40562.14453125, "min": 15808.7412109375, "max": 126365.359375, "count": 451 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 45.91509433962264, "min": 40.411764705882355, "max": 999.0, "count": 451 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19468.0, "min": 672.0, "max": 23356.0, "count": 451 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1542.167952898653, "min": 1185.6428471034415, "max": 1542.167952898653, "count": 408 }, "SoccerTwos.Self-play.ELO.sum": { "value": 326939.60601451446, "min": 2371.285694206883, "max": 361833.6848822148, "count": 408 }, "SoccerTwos.Step.mean": { "value": 4999964.0, "min": 499930.0, "max": 4999964.0, "count": 451 }, "SoccerTwos.Step.sum": { "value": 4999964.0, "min": 499930.0, "max": 4999964.0, "count": 451 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 0.0024937931448221207, "min": -0.07931052148342133, "max": 0.2004467248916626, "count": 451 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 0.528684139251709, "min": -14.910377502441406, "max": 36.18144989013672, "count": 451 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 0.0056453244760632515, "min": -0.07573166489601135, "max": 0.20079714059829712, "count": 451 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 1.1968088150024414, "min": -14.237552642822266, "max": 36.935081481933594, "count": 451 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 451 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 451 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.06183396195465664, "min": -0.6656333333036552, "max": 0.9363999962806702, "count": 451 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -13.108799934387207, "min": -42.22600018978119, "max": 66.73700040578842, "count": 451 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.06183396195465664, "min": -0.6656333333036552, "max": 0.9363999962806702, "count": 451 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -13.108799934387207, "min": -42.22600018978119, "max": 66.73700040578842, "count": 451 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 451 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 451 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.014724057419031548, "min": 0.010694276247522793, "max": 0.023962919188973805, "count": 214 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.014724057419031548, "min": 0.010694276247522793, "max": 0.023962919188973805, "count": 214 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.11216364403565725, "min": 9.579119648606138e-07, "max": 0.12214635908603669, "count": 214 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.11216364403565725, "min": 9.579119648606138e-07, "max": 0.12214635908603669, "count": 214 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.11357599273324012, "min": 1.0092442342587067e-06, "max": 0.12545698160926502, "count": 214 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.11357599273324012, "min": 1.0092442342587067e-06, "max": 0.12545698160926502, "count": 214 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 214 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 214 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 214 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 214 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 214 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 214 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1684335305", "python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\Emmanuel\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.0.1+cpu", "numpy_version": "1.21.2", "end_time_seconds": "1684358224" }, "total": 22919.4503962, "count": 1, "self": 0.4187705000003916, "children": { "run_training.setup": { "total": 0.1581737000000003, "count": 1, "self": 0.1581737000000003 }, "TrainerController.start_learning": { "total": 22918.873452, "count": 1, "self": 11.475763500595349, "children": { "TrainerController._reset_env": { "total": 6.252192300003808, "count": 24, "self": 6.252192300003808 }, "TrainerController.advance": { "total": 22900.862862599406, "count": 302894, "self": 11.053901999028312, "children": { "env_step": { "total": 9182.20118870046, "count": 302894, "self": 7281.1340156005335, "children": { "SubprocessEnvManager._take_step": { "total": 1893.608975199366, "count": 302894, "self": 65.98006409878326, "children": { "TorchPolicy.evaluate": { "total": 1827.6289111005826, "count": 572372, "self": 1827.6289111005826 } } }, "workers": { "total": 7.458197900559716, "count": 302894, "self": 0.0, "children": { "worker_root": { "total": 22897.53063140028, "count": 302894, "is_parallel": true, "self": 16962.63199309952, "children": { "steps_from_proto": { "total": 0.07692339998958442, "count": 48, "is_parallel": true, "self": 0.015386199962286184, "children": { "_process_rank_one_or_two_observation": { "total": 0.06153720002729823, "count": 192, "is_parallel": true, "self": 0.06153720002729823 } } }, "UnityEnvironment.step": { "total": 5934.8217149007705, "count": 302894, "is_parallel": true, "self": 282.4958238001909, "children": { "UnityEnvironment._generate_step_input": { "total": 253.47788129961202, "count": 302894, "is_parallel": true, "self": 253.47788129961202 }, "communicator.exchange": { "total": 4408.640036500394, "count": 302894, "is_parallel": true, "self": 4408.640036500394 }, "steps_from_proto": { "total": 990.2079733005742, "count": 605788, "is_parallel": true, "self": 200.03389900121556, "children": { "_process_rank_one_or_two_observation": { "total": 790.1740742993586, "count": 2423152, "is_parallel": true, "self": 790.1740742993586 } } } } } } } } } } }, "trainer_advance": { "total": 13707.607771899919, "count": 302894, "self": 74.38110829950892, "children": { "process_trajectory": { "total": 3083.850163000402, "count": 302894, "self": 3081.0774407003983, "children": { "RLTrainer._checkpoint": { "total": 2.7727223000039025, "count": 10, "self": 2.7727223000039025 } } }, "_update_policy": { "total": 10549.376500600007, "count": 214, "self": 1170.0145669998365, "children": { "TorchPOCAOptimizer.update": { "total": 9379.36193360017, "count": 6420, "self": 9379.36193360017 } } } } } } }, "trainer_threads": { "total": 1.2999989849049598e-06, "count": 1, "self": 1.2999989849049598e-06 }, "TrainerController._save_models": { "total": 0.2826322999972035, "count": 1, "self": 0.01060449999931734, "children": { "RLTrainer._checkpoint": { "total": 0.2720277999978862, "count": 1, "self": 0.2720277999978862 } } } } } } }