poca-SoccerTwos / run_logs /timers.json
bk6000's picture
First Push, 7M steps`
5d9ea5f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.0942254066467285,
"min": 3.0505452156066895,
"max": 3.2957489490509033,
"count": 723
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 62478.6015625,
"min": 14568.326171875,
"max": 147598.28125,
"count": 723
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 380.27272727272725,
"max": 999.0,
"count": 723
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 14060.0,
"max": 27524.0,
"count": 723
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1212.4203478670436,
"min": 1188.74567555429,
"max": 1216.2181202744537,
"count": 450
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4849.681391468174,
"min": 2377.49135110858,
"max": 19310.885895802334,
"count": 450
},
"SoccerTwos.Step.mean": {
"value": 7229290.0,
"min": 9780.0,
"max": 7229290.0,
"count": 723
},
"SoccerTwos.Step.sum": {
"value": 7229290.0,
"min": 9780.0,
"max": 7229290.0,
"count": 723
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0048225512728095055,
"min": -0.03641282021999359,
"max": 0.047128573060035706,
"count": 723
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.048225514590740204,
"min": -0.7282564043998718,
"max": 0.6597805023193359,
"count": 723
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.008394157513976097,
"min": -0.04248817265033722,
"max": 0.047219887375831604,
"count": 723
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.08394157886505127,
"min": -0.7650524377822876,
"max": 0.6610501408576965,
"count": 723
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 723
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 723
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5384615384615384,
"max": 0.3671000003814697,
"count": 723
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -10.970800042152405,
"max": 5.873600006103516,
"count": 723
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5384615384615384,
"max": 0.3671000003814697,
"count": 723
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -10.970800042152405,
"max": 5.873600006103516,
"count": 723
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 723
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 723
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.011670061721815728,
"min": 0.011136161518030955,
"max": 0.024067297995012876,
"count": 337
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.011670061721815728,
"min": 0.011136161518030955,
"max": 0.024067297995012876,
"count": 337
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0015443278360180556,
"min": 3.769347968566687e-08,
"max": 0.008818658084298174,
"count": 337
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0015443278360180556,
"min": 3.769347968566687e-08,
"max": 0.008818658084298174,
"count": 337
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0015654605619298916,
"min": 3.895241480478262e-08,
"max": 0.008899103260288636,
"count": 337
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0015654605619298916,
"min": 3.895241480478262e-08,
"max": 0.008899103260288636,
"count": 337
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 337
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 337
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 337
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 337
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 337
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 337
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691592147",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Ben\\anaconda3\\envs\\soccertwos\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1691611633"
},
"total": 19485.494555,
"count": 1,
"self": 4.528668499999185,
"children": {
"run_training.setup": {
"total": 0.10322560000000003,
"count": 1,
"self": 0.10322560000000003
},
"TrainerController.start_learning": {
"total": 19480.862660900002,
"count": 1,
"self": 8.857006300775538,
"children": {
"TrainerController._reset_env": {
"total": 3.6961701000066443,
"count": 36,
"self": 3.6961701000066443
},
"TrainerController.advance": {
"total": 19468.162814199215,
"count": 469557,
"self": 8.724774800652085,
"children": {
"env_step": {
"total": 6153.651952299064,
"count": 469557,
"self": 4888.477995399755,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1259.5305559995495,
"count": 469557,
"self": 47.67138119867741,
"children": {
"TorchPolicy.evaluate": {
"total": 1211.859174800872,
"count": 932600,
"self": 1211.859174800872
}
}
},
"workers": {
"total": 5.643400899759746,
"count": 469556,
"self": 0.0,
"children": {
"worker_root": {
"total": 19466.95047229988,
"count": 469556,
"is_parallel": true,
"self": 15604.234410900384,
"children": {
"steps_from_proto": {
"total": 0.048356199999860516,
"count": 72,
"is_parallel": true,
"self": 0.010500199991285708,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03785600000857481,
"count": 288,
"is_parallel": true,
"self": 0.03785600000857481
}
}
},
"UnityEnvironment.step": {
"total": 3862.667705199495,
"count": 469556,
"is_parallel": true,
"self": 198.30306729936274,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 151.13009359970232,
"count": 469556,
"is_parallel": true,
"self": 151.13009359970232
},
"communicator.exchange": {
"total": 2892.368963799363,
"count": 469556,
"is_parallel": true,
"self": 2892.368963799363
},
"steps_from_proto": {
"total": 620.8655805010666,
"count": 939112,
"is_parallel": true,
"self": 132.82847609971884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 488.03710440134773,
"count": 3756448,
"is_parallel": true,
"self": 488.03710440134773
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 13305.7860870995,
"count": 469556,
"self": 66.61950400023852,
"children": {
"process_trajectory": {
"total": 1073.0624936992708,
"count": 469556,
"self": 1071.0544292992706,
"children": {
"RLTrainer._checkpoint": {
"total": 2.008064400000194,
"count": 14,
"self": 2.008064400000194
}
}
},
"_update_policy": {
"total": 12166.10408939999,
"count": 337,
"self": 911.3280202000242,
"children": {
"TorchPOCAOptimizer.update": {
"total": 11254.776069199967,
"count": 10110,
"self": 11254.776069199967
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1000010999850929e-06,
"count": 1,
"self": 1.1000010999850929e-06
},
"TrainerController._save_models": {
"total": 0.14666920000308892,
"count": 1,
"self": 0.006894900001498172,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13977430000159075,
"count": 1,
"self": 0.13977430000159075
}
}
}
}
}
}
}