poca-SoccerTwos / run_logs /timers.json
aw-infoprojekt's picture
First Push`
985f9c3 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.446406364440918,
"min": 1.3144081830978394,
"max": 3.295694589614868,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 29900.11328125,
"min": 2531.093505859375,
"max": 120581.984375,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.76744186046512,
"min": 40.21138211382114,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19528.0,
"min": 8508.0,
"max": 29024.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1475.320743529146,
"min": 1192.247579527929,
"max": 1638.8461169062796,
"count": 4961
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 253755.1678870131,
"min": 2384.9742369170244,
"max": 372278.2488110581,
"count": 4961
},
"SoccerTwos.Step.mean": {
"value": 49999978.0,
"min": 9528.0,
"max": 49999978.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999978.0,
"min": 9528.0,
"max": 49999978.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.10765518993139267,
"min": -0.13581587374210358,
"max": 0.2133915275335312,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -18.516693115234375,
"min": -24.990121841430664,
"max": 27.262981414794922,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10555391758680344,
"min": -0.1348966658115387,
"max": 0.2110351175069809,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -18.1552734375,
"min": -26.05246353149414,
"max": 26.826025009155273,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.27730697670648263,
"min": -0.5882352941176471,
"max": 0.4262222243576414,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -47.696799993515015,
"min": -69.46000027656555,
"max": 61.8814001083374,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.27730697670648263,
"min": -0.5882352941176471,
"max": 0.4262222243576414,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -47.696799993515015,
"min": -69.46000027656555,
"max": 61.8814001083374,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.022182459169804738,
"min": 0.010085381534008776,
"max": 0.026134529170424987,
"count": 2422
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.022182459169804738,
"min": 0.010085381534008776,
"max": 0.026134529170424987,
"count": 2422
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09831546048323313,
"min": 2.3263883501082697e-07,
"max": 0.1319205698867639,
"count": 2422
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09831546048323313,
"min": 2.3263883501082697e-07,
"max": 0.1319205698867639,
"count": 2422
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09923548027873039,
"min": 2.540691194023263e-07,
"max": 0.13445422401030857,
"count": 2422
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09923548027873039,
"min": 2.540691194023263e-07,
"max": 0.13445422401030857,
"count": 2422
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2422
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2422
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2422
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2422
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2422
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2422
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714120417",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:34:57) [MSC v.1936 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\awawrzenczyk\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1714280392"
},
"total": 159973.10677129996,
"count": 1,
"self": 1.7551786999683827,
"children": {
"run_training.setup": {
"total": 0.0960217000101693,
"count": 1,
"self": 0.0960217000101693
},
"TrainerController.start_learning": {
"total": 159971.2555709,
"count": 1,
"self": 104.36469283117913,
"children": {
"TrainerController._reset_env": {
"total": 10.891662000212818,
"count": 250,
"self": 10.891662000212818
},
"TrainerController.advance": {
"total": 159855.81322016864,
"count": 3443779,
"self": 92.14484625018667,
"children": {
"env_step": {
"total": 71070.31975681492,
"count": 3443779,
"self": 55959.180508135,
"children": {
"SubprocessEnvManager._take_step": {
"total": 15046.485411652422,
"count": 3443779,
"self": 530.0668466040515,
"children": {
"TorchPolicy.evaluate": {
"total": 14516.41856504837,
"count": 6282640,
"self": 14516.41856504837
}
}
},
"workers": {
"total": 64.65383702749386,
"count": 3443779,
"self": 0.0,
"children": {
"worker_root": {
"total": 159829.19301099877,
"count": 3443779,
"is_parallel": true,
"self": 115599.1904546995,
"children": {
"steps_from_proto": {
"total": 0.5243781015160494,
"count": 500,
"is_parallel": true,
"self": 0.10216150205815211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.42221659945789725,
"count": 2000,
"is_parallel": true,
"self": 0.42221659945789725
}
}
},
"UnityEnvironment.step": {
"total": 44229.47817819775,
"count": 3443779,
"is_parallel": true,
"self": 2351.9206956436974,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1995.7284042327083,
"count": 3443779,
"is_parallel": true,
"self": 1995.7284042327083
},
"communicator.exchange": {
"total": 32654.10387813067,
"count": 3443779,
"is_parallel": true,
"self": 32654.10387813067
},
"steps_from_proto": {
"total": 7227.7252001906745,
"count": 6887558,
"is_parallel": true,
"self": 1418.7065065652132,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5809.018693625461,
"count": 27550232,
"is_parallel": true,
"self": 5809.018693625461
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 88693.34861710353,
"count": 3443779,
"self": 786.5742838846636,
"children": {
"process_trajectory": {
"total": 17157.408444620203,
"count": 3443779,
"self": 17142.514052320097,
"children": {
"RLTrainer._checkpoint": {
"total": 14.89439230010612,
"count": 100,
"self": 14.89439230010612
}
}
},
"_update_policy": {
"total": 70749.36588859867,
"count": 2422,
"self": 10474.710897096433,
"children": {
"TorchPOCAOptimizer.update": {
"total": 60274.65499150223,
"count": 72678,
"self": 60274.65499150223
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100008375942707e-06,
"count": 1,
"self": 1.100008375942707e-06
},
"TrainerController._save_models": {
"total": 0.18599479994736612,
"count": 1,
"self": 0.03299059986602515,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15300420008134097,
"count": 1,
"self": 0.15300420008134097
}
}
}
}
}
}
}