poca-SoccerTwos / run_logs /timers.json
hishamcse's picture
First Push
480abfc verified
raw
history blame
No virus
16.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.224320411682129,
"min": 1.1934096813201904,
"max": 3.2957115173339844,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 24525.5859375,
"min": 19798.484375,
"max": 114391.2421875,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.73972602739725,
"min": 43.07142857142857,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20072.0,
"min": 16812.0,
"max": 23752.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1649.6950294373494,
"min": 1193.387519517885,
"max": 1674.1346939885816,
"count": 946
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 240855.474297853,
"min": 2386.77503903577,
"max": 361264.701742583,
"count": 946
},
"SoccerTwos.Step.mean": {
"value": 9999898.0,
"min": 9452.0,
"max": 9999898.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999898.0,
"min": 9452.0,
"max": 9999898.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.013396024703979492,
"min": -0.09649722278118134,
"max": 0.16366739571094513,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.9558196067810059,
"min": -18.913455963134766,
"max": 23.21530532836914,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.015066573396325111,
"min": -0.09596189856529236,
"max": 0.16240698099136353,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.1997196674346924,
"min": -18.80853271484375,
"max": 23.998371124267578,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.07343287500616623,
"min": -0.5249176454894683,
"max": 0.5267454521222548,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -10.721199750900269,
"min": -49.8879998922348,
"max": 57.433600306510925,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.07343287500616623,
"min": -0.5249176454894683,
"max": 0.5267454521222548,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -10.721199750900269,
"min": -49.8879998922348,
"max": 57.433600306510925,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02196032485226169,
"min": 0.01057320363082302,
"max": 0.024185372155625374,
"count": 479
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02196032485226169,
"min": 0.01057320363082302,
"max": 0.024185372155625374,
"count": 479
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09936649103959401,
"min": 9.217846866249601e-07,
"max": 0.11876121511061986,
"count": 479
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09936649103959401,
"min": 9.217846866249601e-07,
"max": 0.11876121511061986,
"count": 479
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10050952831904093,
"min": 1.0755970796102095e-06,
"max": 0.11994767561554909,
"count": 479
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10050952831904093,
"min": 1.0755970796102095e-06,
"max": 0.11994767561554909,
"count": 479
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 4.912999017600248e-07,
"min": 4.912999017600248e-07,
"max": 0.00049889840022032,
"count": 479
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 4.912999017600248e-07,
"min": 4.912999017600248e-07,
"max": 0.00049889840022032,
"count": 479
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10009823999999998,
"min": 0.10009823999999998,
"max": 0.19977967999999996,
"count": 479
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10009823999999998,
"min": 0.10009823999999998,
"max": 0.19977967999999996,
"count": 479
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.4902176000000248e-05,
"min": 1.4902176000000248e-05,
"max": 0.004989006032000001,
"count": 479
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.4902176000000248e-05,
"min": 1.4902176000000248e-05,
"max": 0.004989006032000001,
"count": 479
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719048389",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\E:\\Conda\\install\\envs\\rl\\Scripts\\mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1719092978"
},
"total": 44589.40395359998,
"count": 1,
"self": 0.43065500003285706,
"children": {
"run_training.setup": {
"total": 0.12554799998179078,
"count": 1,
"self": 0.12554799998179078
},
"TrainerController.start_learning": {
"total": 44588.84775059996,
"count": 1,
"self": 24.855890704435296,
"children": {
"TrainerController._reset_env": {
"total": 9.302256700233556,
"count": 40,
"self": 9.302256700233556
},
"TrainerController.advance": {
"total": 44554.49283189536,
"count": 676323,
"self": 26.739062278473284,
"children": {
"env_step": {
"total": 20095.1796489057,
"count": 676323,
"self": 15811.539273703122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4266.221866301785,
"count": 676323,
"self": 139.56137516262243,
"children": {
"TorchPolicy.evaluate": {
"total": 4126.660491139162,
"count": 1262444,
"self": 4126.660491139162
}
}
},
"workers": {
"total": 17.418508900795132,
"count": 676323,
"self": 0.0,
"children": {
"worker_root": {
"total": 44551.766275383125,
"count": 676323,
"is_parallel": true,
"self": 31950.131210963067,
"children": {
"steps_from_proto": {
"total": 0.10991340025793761,
"count": 80,
"is_parallel": true,
"self": 0.02162499987753108,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08828840038040653,
"count": 320,
"is_parallel": true,
"self": 0.08828840038040653
}
}
},
"UnityEnvironment.step": {
"total": 12601.5251510198,
"count": 676323,
"is_parallel": true,
"self": 643.0856418304611,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 459.34943359234603,
"count": 676323,
"is_parallel": true,
"self": 459.34943359234603
},
"communicator.exchange": {
"total": 9458.439011297014,
"count": 676323,
"is_parallel": true,
"self": 9458.439011297014
},
"steps_from_proto": {
"total": 2040.6510642999783,
"count": 1352646,
"is_parallel": true,
"self": 396.5964069997426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1644.0546573002357,
"count": 5410584,
"is_parallel": true,
"self": 1644.0546573002357
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 24432.574120711186,
"count": 676323,
"self": 165.41424257232575,
"children": {
"process_trajectory": {
"total": 4025.447374839103,
"count": 676323,
"self": 4016.751310938911,
"children": {
"RLTrainer._checkpoint": {
"total": 8.696063900191803,
"count": 50,
"self": 8.696063900191803
}
}
},
"_update_policy": {
"total": 20241.712503299757,
"count": 479,
"self": 1764.827305602259,
"children": {
"TorchPOCAOptimizer.update": {
"total": 18476.8851976975,
"count": 14370,
"self": 18476.8851976975
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.7999631129205227e-06,
"count": 1,
"self": 2.7999631129205227e-06
},
"TrainerController._save_models": {
"total": 0.1967684999690391,
"count": 1,
"self": 0.006009599950630218,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1907589000184089,
"count": 1,
"self": 0.1907589000184089
}
}
}
}
}
}
}