poca-SoccerTwos / run_logs /timers.json
bastienm's picture
First Push`
dae2e59
raw
history blame
15.6 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5866121053695679,
"min": 1.527618646621704,
"max": 3.2174642086029053,
"count": 961
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32493.81640625,
"min": 6953.41748046875,
"max": 159545.171875,
"count": 961
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.12244897959184,
"min": 40.21666666666667,
"max": 999.0,
"count": 961
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19648.0,
"min": 1252.0,
"max": 31432.0,
"count": 961
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1462.7843713394097,
"min": 1195.6239676801113,
"max": 1534.4058743689864,
"count": 926
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 286705.7367825243,
"min": 2391.2479353602225,
"max": 357247.09576504957,
"count": 926
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 961
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 961
},
"SoccerTwos.Step.mean": {
"value": 10099984.0,
"min": 509840.0,
"max": 10099984.0,
"count": 960
},
"SoccerTwos.Step.sum": {
"value": 10099984.0,
"min": 509840.0,
"max": 10099984.0,
"count": 960
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06891822814941406,
"min": -0.14169293642044067,
"max": 0.16339489817619324,
"count": 960
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -13.507972717285156,
"min": -26.63827133178711,
"max": 25.03961181640625,
"count": 960
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0703696683049202,
"min": -0.13789178431034088,
"max": 0.167164608836174,
"count": 960
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -13.792455673217773,
"min": -25.923654556274414,
"max": 25.369762420654297,
"count": 960
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 960
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 960
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0867428572810426,
"min": -0.5304761919237319,
"max": 0.4851259258058336,
"count": 960
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -17.00160002708435,
"min": -54.925999879837036,
"max": 56.5091997385025,
"count": 960
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0867428572810426,
"min": -0.5304761919237319,
"max": 0.4851259258058336,
"count": 960
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -17.00160002708435,
"min": -54.925999879837036,
"max": 56.5091997385025,
"count": 960
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01604358012555167,
"min": 0.009785405876270185,
"max": 0.02438918145150334,
"count": 461
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01604358012555167,
"min": 0.009785405876270185,
"max": 0.02438918145150334,
"count": 461
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.12075758377710978,
"min": 8.424399216740616e-07,
"max": 0.12673227315147717,
"count": 461
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.12075758377710978,
"min": 8.424399216740616e-07,
"max": 0.12673227315147717,
"count": 461
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.12336124951640765,
"min": 8.604714347863288e-07,
"max": 0.12984081829587618,
"count": 461
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.12336124951640765,
"min": 8.604714347863288e-07,
"max": 0.12984081829587618,
"count": 461
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 461
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 461
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 461
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 461
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 461
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 461
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684243551",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\ProgramData\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1684307269"
},
"total": 63718.5219284,
"count": 1,
"self": 2.7861281000004965,
"children": {
"run_training.setup": {
"total": 0.21971890000000016,
"count": 1,
"self": 0.21971890000000016
},
"TrainerController.start_learning": {
"total": 63715.5160814,
"count": 1,
"self": 25.563273298765125,
"children": {
"TrainerController._reset_env": {
"total": 23.362592199984995,
"count": 97,
"self": 23.362592199984995
},
"TrainerController.advance": {
"total": 63666.30404140124,
"count": 665938,
"self": 23.20057059924875,
"children": {
"env_step": {
"total": 19977.192617300956,
"count": 665938,
"self": 15309.20797199773,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4650.740881002019,
"count": 665938,
"self": 165.74838670110694,
"children": {
"TorchPolicy.evaluate": {
"total": 4484.992494300912,
"count": 1222556,
"self": 4484.992494300912
}
}
},
"workers": {
"total": 17.243764301206767,
"count": 665938,
"self": 0.0,
"children": {
"worker_root": {
"total": 63602.547887296896,
"count": 665938,
"is_parallel": true,
"self": 51273.08974479824,
"children": {
"steps_from_proto": {
"total": 0.35863439998501434,
"count": 194,
"is_parallel": true,
"self": 0.0657623999876975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.29287199999731683,
"count": 776,
"is_parallel": true,
"self": 0.29287199999731683
}
}
},
"UnityEnvironment.step": {
"total": 12329.099508098669,
"count": 665938,
"is_parallel": true,
"self": 752.6750525024345,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 677.5573044998821,
"count": 665938,
"is_parallel": true,
"self": 677.5573044998821
},
"communicator.exchange": {
"total": 8538.213244295146,
"count": 665938,
"is_parallel": true,
"self": 8538.213244295146
},
"steps_from_proto": {
"total": 2360.6539068012066,
"count": 1331876,
"is_parallel": true,
"self": 438.56148160226144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1922.0924251989452,
"count": 5327504,
"is_parallel": true,
"self": 1922.0924251989452
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 43665.91085350104,
"count": 665938,
"self": 169.67386100112344,
"children": {
"process_trajectory": {
"total": 6299.385353299973,
"count": 665938,
"self": 6294.3278008999905,
"children": {
"RLTrainer._checkpoint": {
"total": 5.057552399982548,
"count": 20,
"self": 5.057552399982548
}
}
},
"_update_policy": {
"total": 37196.851639199944,
"count": 462,
"self": 2144.8734003001955,
"children": {
"TorchPOCAOptimizer.update": {
"total": 35051.97823889975,
"count": 13867,
"self": 35051.97823889975
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.0000010156072676e-06,
"count": 1,
"self": 3.0000010156072676e-06
},
"TrainerController._save_models": {
"total": 0.2861715000035474,
"count": 1,
"self": 0.01669680000486551,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2694746999986819,
"count": 1,
"self": 0.2694746999986819
}
}
}
}
}
}
}