{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.548988938331604, "min": 1.485166311264038, "max": 1.6092711687088013, "count": 174 }, "SoccerTwos.Policy.Entropy.sum": { "value": 32466.80859375, "min": 4698.88720703125, "max": 34608.0390625, "count": 174 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 51.947916666666664, "min": 35.75, "max": 70.77941176470588, "count": 174 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19948.0, "min": 2288.0, "max": 20400.0, "count": 174 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1708.4938309597394, "min": 1683.4743966082951, "max": 1757.9505260196202, "count": 174 }, "SoccerTwos.Self-play.ELO.sum": { "value": 328030.81554427, "min": 55690.46817514276, "max": 412438.1176888502, "count": 174 }, "SoccerTwos.Step.mean": { "value": 9999982.0, "min": 8269954.0, "max": 9999982.0, "count": 174 }, "SoccerTwos.Step.sum": { "value": 9999982.0, "min": 8269954.0, "max": 9999982.0, "count": 174 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.06604767590761185, "min": -0.18169166147708893, "max": 0.10646546632051468, "count": 174 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -12.681154251098633, "min": -36.88340759277344, "max": 19.445560455322266, "count": 174 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.0673023909330368, "min": -0.1797272264957428, "max": 0.10065256804227829, "count": 174 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -12.922059059143066, "min": -36.48462677001953, "max": 18.721588134765625, "count": 174 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 174 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 174 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.014808334410190582, "min": -0.36431724273512517, "max": 0.44370666742324827, "count": 174 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 2.843200206756592, "min": -73.95640027523041, "max": 49.253599882125854, "count": 174 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.014808334410190582, "min": -0.36431724273512517, "max": 0.44370666742324827, "count": 174 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 2.843200206756592, "min": -73.95640027523041, "max": 49.253599882125854, "count": 174 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 174 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 174 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.0218050555481265, "min": 0.012423456782319894, "max": 0.02350351942780738, "count": 84 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.0218050555481265, "min": 0.012423456782319894, "max": 0.02350351942780738, "count": 84 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.1447503884633382, "min": 0.11635837629437447, "max": 0.156904370089372, "count": 84 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.1447503884633382, "min": 0.11635837629437447, "max": 0.156904370089372, "count": 84 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.1476508542895317, "min": 0.11786224568883578, "max": 0.16039011279741924, "count": 84 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.1476508542895317, "min": 0.11786224568883578, "max": 0.16039011279741924, "count": 84 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0005000000000000001, "min": 0.0005000000000000001, "max": 0.0005000000000000001, "count": 84 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0005000000000000001, "min": 0.0005000000000000001, "max": 0.0005000000000000001, "count": 84 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 84 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 84 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 84 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 84 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1711947816", "python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]", "command_line_arguments": "/home/diego/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=Soccer-first_try --resume --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.2.2+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1711952093" }, "total": 4276.596033956, "count": 1, "self": 0.4261898860004294, "children": { "run_training.setup": { "total": 0.03723726899988833, "count": 1, "self": 0.03723726899988833 }, "TrainerController.start_learning": { "total": 4276.132606800999, "count": 1, "self": 3.251940558896422, "children": { "TrainerController._reset_env": { "total": 2.9387764390003213, "count": 7, "self": 2.9387764390003213 }, "TrainerController.advance": { "total": 4269.224948320102, "count": 120951, "self": 3.3547065291595572, "children": { "env_step": { "total": 3105.9626105929783, "count": 120951, "self": 2317.0614197979758, "children": { "SubprocessEnvManager._take_step": { "total": 786.8380504500531, "count": 120951, "self": 18.87109172723399, "children": { "TorchPolicy.evaluate": { "total": 767.9669587228191, "count": 216970, "self": 767.9669587228191 } } }, "workers": { "total": 2.0631403449497157, "count": 120951, "self": 0.0, "children": { "worker_root": { "total": 4270.53373726195, "count": 120951, "is_parallel": true, "self": 2322.9571135547894, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0032374659999732103, "count": 2, "is_parallel": true, "self": 0.0007915249996131024, "children": { "_process_rank_one_or_two_observation": { "total": 0.002445941000360108, "count": 8, "is_parallel": true, "self": 0.002445941000360108 } } }, "UnityEnvironment.step": { "total": 0.046279590000040116, "count": 1, "is_parallel": true, "self": 0.0007429590000356256, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005584170000929589, "count": 1, "is_parallel": true, "self": 0.0005584170000929589 }, "communicator.exchange": { "total": 0.042695803000015076, "count": 1, "is_parallel": true, "self": 0.042695803000015076 }, "steps_from_proto": { "total": 0.002282410999896456, "count": 2, "is_parallel": true, "self": 0.00045278000015969155, "children": { "_process_rank_one_or_two_observation": { "total": 0.0018296309997367644, "count": 8, "is_parallel": true, "self": 0.0018296309997367644 } } } } } } }, "steps_from_proto": { "total": 0.016259086999525607, "count": 12, "is_parallel": true, "self": 0.0030963879994487797, "children": { "_process_rank_one_or_two_observation": { "total": 0.013162699000076827, "count": 48, "is_parallel": true, "self": 0.013162699000076827 } } }, "UnityEnvironment.step": { "total": 1947.560364620161, "count": 120950, "is_parallel": true, "self": 101.92931683336406, "children": { "UnityEnvironment._generate_step_input": { "total": 64.88061279077442, "count": 120950, "is_parallel": true, "self": 64.88061279077442 }, "communicator.exchange": { "total": 1485.3248722199667, "count": 120950, "is_parallel": true, "self": 1485.3248722199667 }, "steps_from_proto": { "total": 295.4255627760558, "count": 241900, "is_parallel": true, "self": 55.37362536214073, "children": { "_process_rank_one_or_two_observation": { "total": 240.05193741391508, "count": 967600, "is_parallel": true, "self": 240.05193741391508 } } } } } } } } } } }, "trainer_advance": { "total": 1159.9076311979643, "count": 120951, "self": 20.41215982195149, "children": { "process_trajectory": { "total": 478.2850384120122, "count": 120951, "self": 475.30852306901147, "children": { "RLTrainer._checkpoint": { "total": 2.976515343000756, "count": 4, "self": 2.976515343000756 } } }, "_update_policy": { "total": 661.2104329640006, "count": 84, "self": 282.2376383639901, "children": { "TorchPOCAOptimizer.update": { "total": 378.9727946000105, "count": 2520, "self": 378.9727946000105 } } } } } } }, "trainer_threads": { "total": 1.2119999155402184e-06, "count": 1, "self": 1.2119999155402184e-06 }, "TrainerController._save_models": { "total": 0.716940271000567, "count": 1, "self": 0.01775737600019056, "children": { "RLTrainer._checkpoint": { "total": 0.6991828950003764, "count": 1, "self": 0.6991828950003764 } } } } } } }