SoccerTwos / run_logs /timers.json
LukeSajkowski's picture
First Push
9de79a0
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.130321979522705,
"min": 3.094003677368164,
"max": 3.196326732635498,
"count": 98
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 49784.640625,
"min": 6640.3564453125,
"max": 101255.59375,
"count": 98
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 622.0,
"max": 999.0,
"count": 98
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 3996.0,
"max": 32272.0,
"count": 98
},
"SoccerTwos.Step.mean": {
"value": 1999172.0,
"min": 1009162.0,
"max": 1999172.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 1999172.0,
"min": 1009162.0,
"max": 1999172.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.004479071591049433,
"min": -0.015565295703709126,
"max": -0.0010307520860806108,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.04479071497917175,
"min": -0.20024868845939636,
"max": -0.013521570712327957,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0040475367568433285,
"min": -0.01636963151395321,
"max": -0.0006929096998646855,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.04047536849975586,
"min": -0.1895117163658142,
"max": -0.006929096765816212,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.35756363651969214,
"max": 0.15663332988818487,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -4.0,
"max": 1.8795999586582184,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.35756363651969214,
"max": 0.15663332988818487,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -4.0,
"max": 1.8795999586582184,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1182.3467215275443,
"min": 1182.3467215275443,
"max": 1188.0745949308694,
"count": 43
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2364.6934430550887,
"min": 2364.6934430550887,
"max": 9496.53895594265,
"count": 43
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0139344495953992,
"min": 0.011848328060780962,
"max": 0.021928426437079908,
"count": 43
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0139344495953992,
"min": 0.011848328060780962,
"max": 0.021928426437079908,
"count": 43
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0007438580030187344,
"min": 6.040429269660308e-07,
"max": 0.0031857129405111527,
"count": 43
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0007438580030187344,
"min": 6.040429269660308e-07,
"max": 0.0031857129405111527,
"count": 43
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0007712062773255942,
"min": 6.05373832248309e-07,
"max": 0.0032253032938266792,
"count": 43
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0007712062773255942,
"min": 6.05373832248309e-07,
"max": 0.0032253032938266792,
"count": 43
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 43
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 43
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 43
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 43
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 43
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 43
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677073283",
"python_version": "3.8.15 | packaged by conda-forge | (default, Nov 22 2022, 08:49:35) \n[GCC 10.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --resume --initialize-from=SoccerTwos --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1677094636"
},
"total": 21352.880546647997,
"count": 1,
"self": 0.7440052459969593,
"children": {
"run_training.setup": {
"total": 0.020469732000492513,
"count": 1,
"self": 0.020469732000492513
},
"TrainerController.start_learning": {
"total": 21352.11607167,
"count": 1,
"self": 2.6825647874793503,
"children": {
"TrainerController._reset_env": {
"total": 2.247523407007975,
"count": 6,
"self": 2.247523407007975
},
"TrainerController.advance": {
"total": 21346.78264748851,
"count": 64676,
"self": 3.2051193225015595,
"children": {
"env_step": {
"total": 17908.125406118248,
"count": 64676,
"self": 17498.108757675865,
"children": {
"SubprocessEnvManager._take_step": {
"total": 408.34013692037115,
"count": 64676,
"self": 18.43215989517921,
"children": {
"TorchPolicy.evaluate": {
"total": 389.90797702519194,
"count": 128558,
"self": 389.90797702519194
}
}
},
"workers": {
"total": 1.676511522011424,
"count": 64676,
"self": 0.0,
"children": {
"worker_root": {
"total": 21316.824138112293,
"count": 64676,
"is_parallel": true,
"self": 4189.692854776771,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005261721998977009,
"count": 2,
"is_parallel": true,
"self": 0.001454508994356729,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00380721300462028,
"count": 8,
"is_parallel": true,
"self": 0.00380721300462028
}
}
},
"UnityEnvironment.step": {
"total": 0.22220683799969265,
"count": 1,
"is_parallel": true,
"self": 0.002064692998828832,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.004270829002052778,
"count": 1,
"is_parallel": true,
"self": 0.004270829002052778
},
"communicator.exchange": {
"total": 0.20700765699803014,
"count": 1,
"is_parallel": true,
"self": 0.20700765699803014
},
"steps_from_proto": {
"total": 0.008863659000780899,
"count": 2,
"is_parallel": true,
"self": 0.0010717400000430644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007791919000737835,
"count": 8,
"is_parallel": true,
"self": 0.007791919000737835
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.021267499007080914,
"count": 10,
"is_parallel": true,
"self": 0.003550054996594554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.01771744401048636,
"count": 40,
"is_parallel": true,
"self": 0.01771744401048636
}
}
},
"UnityEnvironment.step": {
"total": 17127.110015836515,
"count": 64675,
"is_parallel": true,
"self": 58.02131571182326,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 323.27815109633957,
"count": 64675,
"is_parallel": true,
"self": 323.27815109633957
},
"communicator.exchange": {
"total": 16121.53144544861,
"count": 64675,
"is_parallel": true,
"self": 16121.53144544861
},
"steps_from_proto": {
"total": 624.2791035797418,
"count": 129350,
"is_parallel": true,
"self": 94.20912168586437,
"children": {
"_process_rank_one_or_two_observation": {
"total": 530.0699818938774,
"count": 517400,
"is_parallel": true,
"self": 530.0699818938774
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3435.452122047762,
"count": 64676,
"self": 20.097007352487708,
"children": {
"process_trajectory": {
"total": 427.17622248229964,
"count": 64676,
"self": 426.17536823829505,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0008542440045858,
"count": 2,
"self": 1.0008542440045858
}
}
},
"_update_policy": {
"total": 2988.1788922129745,
"count": 43,
"self": 266.589264026843,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2721.5896281861314,
"count": 1359,
"self": 2721.5896281861314
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3179960660636425e-06,
"count": 1,
"self": 1.3179960660636425e-06
},
"TrainerController._save_models": {
"total": 0.4033346690048347,
"count": 1,
"self": 0.006257740002183709,
"children": {
"RLTrainer._checkpoint": {
"total": 0.397076929002651,
"count": 1,
"self": 0.397076929002651
}
}
}
}
}
}
}