{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.6789969205856323, "min": 1.6139636039733887, "max": 3.1792356967926025, "count": 974 }, "SoccerTwos.Policy.Entropy.sum": { "value": 34761.953125, "min": 12923.490234375, "max": 97019.671875, "count": 974 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 53.78651685393258, "min": 39.774193548387096, "max": 704.2857142857143, "count": 974 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19148.0, "min": 500.0, "max": 22936.0, "count": 974 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1617.5174798178518, "min": 1202.124959894051, "max": 1653.4411368174528, "count": 974 }, "SoccerTwos.Self-play.ELO.sum": { "value": 287918.11140757764, "min": 4842.336778710727, "max": 372985.3794988971, "count": 974 }, "SoccerTwos.Step.mean": { "value": 10229995.0, "min": 499984.0, "max": 10229995.0, "count": 974 }, "SoccerTwos.Step.sum": { "value": 10229995.0, "min": 499984.0, "max": 10229995.0, "count": 974 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.03288668766617775, "min": -0.14076904952526093, "max": 0.20489701628684998, "count": 974 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -5.886716842651367, "min": -26.042274475097656, "max": 27.105342864990234, "count": 974 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.03097132220864296, "min": -0.13690495491027832, "max": 0.20059315860271454, "count": 974 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -5.5438666343688965, "min": -26.245258331298828, "max": 28.4521484375, "count": 974 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 974 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 974 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.02111508326823485, "min": -1.0, "max": 0.4292565221371858, "count": 974 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 3.779599905014038, "min": -63.81559979915619, "max": 61.06059980392456, "count": 974 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.02111508326823485, "min": -1.0, "max": 0.4292565221371858, "count": 974 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 3.779599905014038, "min": -63.81559979915619, "max": 61.06059980392456, "count": 974 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 974 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 974 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.014747597804913918, "min": 0.010565502125731048, "max": 0.02409361251241838, "count": 471 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.014747597804913918, "min": 0.010565502125731048, "max": 0.02409361251241838, "count": 471 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.10497961044311524, "min": 0.006592017862324913, "max": 0.12515153388182323, "count": 471 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.10497961044311524, "min": 0.006592017862324913, "max": 0.12515153388182323, "count": 471 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.10647589216629665, "min": 0.006856041541323066, "max": 0.12717673753698666, "count": 471 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.10647589216629665, "min": 0.006856041541323066, "max": 0.12717673753698666, "count": 471 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 471 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 471 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 471 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 471 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 471 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 471 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1691562307", "python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\jabril.jacobs\\Anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.0.1+cpu", "numpy_version": "1.21.2", "end_time_seconds": "1691609914" }, "total": 47607.0949432, "count": 1, "self": 0.0294480999946245, "children": { "run_training.setup": { "total": 0.2819004999999999, "count": 1, "self": 0.2819004999999999 }, "TrainerController.start_learning": { "total": 47606.783594600005, "count": 1, "self": 25.0057035989812, "children": { "TrainerController._reset_env": { "total": 7.872740400001082, "count": 50, "self": 7.872740400001082 }, "TrainerController.advance": { "total": 47573.540504901015, "count": 674593, "self": 25.961158303405682, "children": { "env_step": { "total": 19325.817779799414, "count": 674593, "self": 15339.130630399264, "children": { "SubprocessEnvManager._take_step": { "total": 3970.8311628985143, "count": 674593, "self": 145.15546639835065, "children": { "TorchPolicy.evaluate": { "total": 3825.6756965001637, "count": 1222478, "self": 3825.6756965001637 } } }, "workers": { "total": 15.855986501636453, "count": 674593, "self": 0.0, "children": { "worker_root": { "total": 47539.671069898955, "count": 674593, "is_parallel": true, "self": 35073.97468629782, "children": { "steps_from_proto": { "total": 0.15965149998791528, "count": 100, "is_parallel": true, "self": 0.03169919991416492, "children": { "_process_rank_one_or_two_observation": { "total": 0.12795230007375036, "count": 400, "is_parallel": true, "self": 0.12795230007375036 } } }, "UnityEnvironment.step": { "total": 12465.536732101144, "count": 674593, "is_parallel": true, "self": 663.0972624047427, "children": { "UnityEnvironment._generate_step_input": { "total": 582.412609100614, "count": 674593, "is_parallel": true, "self": 582.412609100614 }, "communicator.exchange": { "total": 9052.722603199105, "count": 674593, "is_parallel": true, "self": 9052.722603199105 }, "steps_from_proto": { "total": 2167.3042573966823, "count": 1349186, "is_parallel": true, "self": 419.4124948965766, "children": { "_process_rank_one_or_two_observation": { "total": 1747.8917625001056, "count": 5396744, "is_parallel": true, "self": 1747.8917625001056 } } } } } } } } } } }, "trainer_advance": { "total": 28221.761566798195, "count": 674593, "self": 168.8990020952042, "children": { "process_trajectory": { "total": 4738.423875802926, "count": 674593, "self": 4731.62523170292, "children": { "RLTrainer._checkpoint": { "total": 6.798644100005379, "count": 20, "self": 6.798644100005379 } } }, "_update_policy": { "total": 23314.438688900067, "count": 472, "self": 2099.995094800419, "children": { "TorchPOCAOptimizer.update": { "total": 21214.443594099648, "count": 14144, "self": 21214.443594099648 } } } } } } }, "trainer_threads": { "total": 2.9000002541579306e-06, "count": 1, "self": 2.9000002541579306e-06 }, "TrainerController._save_models": { "total": 0.3646428000065498, "count": 1, "self": 0.008172100002411753, "children": { "RLTrainer._checkpoint": { "total": 0.35647070000413805, "count": 1, "self": 0.35647070000413805 } } } } } } }