SoccerTwos / run_logs /timers.json
LukeSajkowski's picture
First Push
614d215
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.3941988945007324,
"min": 2.3131208419799805,
"max": 2.7397079467773438,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 51484.8515625,
"min": 43049.46875,
"max": 61992.546875,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 55.87640449438202,
"min": 44.518181818181816,
"max": 82.40350877192982,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19892.0,
"min": 18704.0,
"max": 20792.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1447.8529248817329,
"min": 1362.6837262167921,
"max": 1452.3680940759016,
"count": 100
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 257717.82062894845,
"min": 156982.38206840446,
"max": 316908.9906318673,
"count": 100
},
"SoccerTwos.Step.mean": {
"value": 4999788.0,
"min": 4009916.0,
"max": 4999788.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 4999788.0,
"min": 4009916.0,
"max": 4999788.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.003941728733479977,
"min": -0.07890141010284424,
"max": 0.12007641047239304,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.7016277313232422,
"min": -10.651690483093262,
"max": 20.533065795898438,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.006369381677359343,
"min": -0.074931800365448,
"max": 0.11935584992170334,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.1337499618530273,
"min": -10.115793228149414,
"max": 20.132320404052734,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.027782022283318338,
"min": -0.22990908989539513,
"max": 0.3713333321114381,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.945199966430664,
"min": -35.852399826049805,
"max": 53.47199982404709,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.027782022283318338,
"min": -0.22990908989539513,
"max": 0.3713333321114381,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.945199966430664,
"min": -35.852399826049805,
"max": 53.47199982404709,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018181406586275747,
"min": 0.011711180448764935,
"max": 0.024391871690750123,
"count": 48
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018181406586275747,
"min": 0.011711180448764935,
"max": 0.024391871690750123,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08860972747206688,
"min": 0.06452058975895246,
"max": 0.09899336422483127,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08860972747206688,
"min": 0.06452058975895246,
"max": 0.09899336422483127,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09137522255380949,
"min": 0.06540971485277017,
"max": 0.10181420395771662,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09137522255380949,
"min": 0.06540971485277017,
"max": 0.10181420395771662,
"count": 48
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 48
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 48
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 48
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 48
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 48
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 48
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677186599",
"python_version": "3.8.15 | packaged by conda-forge | (default, Nov 22 2022, 08:49:35) \n[GCC 10.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --resume --initialize-from=SoccerTwos --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1677208215"
},
"total": 21616.813935551,
"count": 1,
"self": 0.7073853080000845,
"children": {
"run_training.setup": {
"total": 0.016974316000016643,
"count": 1,
"self": 0.016974316000016643
},
"TrainerController.start_learning": {
"total": 21616.089575927,
"count": 1,
"self": 3.220351229869266,
"children": {
"TrainerController._reset_env": {
"total": 1.9565833170044584,
"count": 6,
"self": 1.9565833170044584
},
"TrainerController.advance": {
"total": 21610.641849352127,
"count": 69432,
"self": 3.5971864697057754,
"children": {
"env_step": {
"total": 14589.285470454291,
"count": 69432,
"self": 14164.850334149665,
"children": {
"SubprocessEnvManager._take_step": {
"total": 422.3507650345132,
"count": 69432,
"self": 15.411872300947664,
"children": {
"TorchPolicy.evaluate": {
"total": 406.9388927335655,
"count": 125404,
"self": 406.9388927335655
}
}
},
"workers": {
"total": 2.0843712701137633,
"count": 69432,
"self": 0.0,
"children": {
"worker_root": {
"total": 21579.579130355,
"count": 69432,
"is_parallel": true,
"self": 7797.377723194233,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005202019000023483,
"count": 2,
"is_parallel": true,
"self": 0.0010196599999972022,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004182359000026281,
"count": 8,
"is_parallel": true,
"self": 0.004182359000026281
}
}
},
"UnityEnvironment.step": {
"total": 0.3199886740000011,
"count": 1,
"is_parallel": true,
"self": 0.0028174480000302538,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.003394438999976046,
"count": 1,
"is_parallel": true,
"self": 0.003394438999976046
},
"communicator.exchange": {
"total": 0.304528227999981,
"count": 1,
"is_parallel": true,
"self": 0.304528227999981
},
"steps_from_proto": {
"total": 0.009248559000013756,
"count": 2,
"is_parallel": true,
"self": 0.0008982999999602725,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008350259000053484,
"count": 8,
"is_parallel": true,
"self": 0.008350259000053484
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.01767323799941778,
"count": 10,
"is_parallel": true,
"self": 0.0031686419932270837,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.014504596006190695,
"count": 40,
"is_parallel": true,
"self": 0.014504596006190695
}
}
},
"UnityEnvironment.step": {
"total": 13782.183733922768,
"count": 69431,
"is_parallel": true,
"self": 68.27179831056492,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 297.5665126266925,
"count": 69431,
"is_parallel": true,
"self": 297.5665126266925
},
"communicator.exchange": {
"total": 12950.69541392179,
"count": 69431,
"is_parallel": true,
"self": 12950.69541392179
},
"steps_from_proto": {
"total": 465.65000906371944,
"count": 138862,
"is_parallel": true,
"self": 81.39833913502974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 384.2516699286897,
"count": 555448,
"is_parallel": true,
"self": 384.2516699286897
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7017.759192428131,
"count": 69432,
"self": 23.725909247148593,
"children": {
"process_trajectory": {
"total": 952.3833110709934,
"count": 69432,
"self": 951.8914804129913,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4918306580020726,
"count": 2,
"self": 0.4918306580020726
}
}
},
"_update_policy": {
"total": 6041.64997210999,
"count": 48,
"self": 204.19424143499418,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5837.455730674996,
"count": 1440,
"self": 5837.455730674996
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.499991963617504e-07,
"count": 1,
"self": 8.499991963617504e-07
},
"TrainerController._save_models": {
"total": 0.270791177998035,
"count": 1,
"self": 0.004163938996498473,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2666272390015365,
"count": 1,
"self": 0.2666272390015365
}
}
}
}
}
}
}