poca-SoccerTwos / run_logs /timers.json
Cloud1989's picture
First Push`
db09640
raw
history blame
15.7 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4313995838165283,
"min": 1.3370754718780518,
"max": 3.2957136631011963,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28582.1875,
"min": 23355.708984375,
"max": 123306.5625,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 71.84057971014492,
"min": 43.944954128440365,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19828.0,
"min": 15972.0,
"max": 26820.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1711.3748529366103,
"min": 1192.1688832781308,
"max": 1757.9048700728965,
"count": 4960
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 236169.72970525222,
"min": 2384.3377665562616,
"max": 372015.2229110092,
"count": 4960
},
"SoccerTwos.Step.mean": {
"value": 49999974.0,
"min": 9428.0,
"max": 49999974.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999974.0,
"min": 9428.0,
"max": 49999974.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0491936057806015,
"min": -0.13646982610225677,
"max": 0.19538433849811554,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.739523887634277,
"min": -23.063400268554688,
"max": 24.145103454589844,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0504918098449707,
"min": -0.1415393054485321,
"max": 0.19853390753269196,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.917377948760986,
"min": -23.920143127441406,
"max": 24.684202194213867,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.14086277249955784,
"min": -0.5440117646666134,
"max": 0.40071194801690446,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -19.298199832439423,
"min": -65.68080008029938,
"max": 63.713199734687805,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.14086277249955784,
"min": -0.5440117646666134,
"max": 0.40071194801690446,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -19.298199832439423,
"min": -65.68080008029938,
"max": 63.713199734687805,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.011858370089779175,
"min": 0.009039412205068705,
"max": 0.025838682233976822,
"count": 2422
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.011858370089779175,
"min": 0.009039412205068705,
"max": 0.025838682233976822,
"count": 2422
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09803017849723498,
"min": 5.647592672630708e-07,
"max": 0.11775427137811979,
"count": 2422
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09803017849723498,
"min": 5.647592672630708e-07,
"max": 0.11775427137811979,
"count": 2422
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09895891770720482,
"min": 5.779042766107523e-07,
"max": 0.12086075792709987,
"count": 2422
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09895891770720482,
"min": 5.779042766107523e-07,
"max": 0.12086075792709987,
"count": 2422
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2422
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2422
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2422
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2422
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2422
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2422
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704357120",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:34:57) [MSC v.1936 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\hanke\\AppData\\Roaming\\Python\\Python310\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1704562718"
},
"total": 205598.4578497,
"count": 1,
"self": 0.4013831000775099,
"children": {
"run_training.setup": {
"total": 0.11664410005323589,
"count": 1,
"self": 0.11664410005323589
},
"TrainerController.start_learning": {
"total": 205597.93982249987,
"count": 1,
"self": 96.3563927647192,
"children": {
"TrainerController._reset_env": {
"total": 10.219013900728896,
"count": 250,
"self": 10.219013900728896
},
"TrainerController.advance": {
"total": 205491.23172393464,
"count": 3434191,
"self": 93.86372776678763,
"children": {
"env_step": {
"total": 75273.94705407321,
"count": 3434191,
"self": 59624.64579544333,
"children": {
"SubprocessEnvManager._take_step": {
"total": 15588.993267453276,
"count": 3434191,
"self": 534.1782296341844,
"children": {
"TorchPolicy.evaluate": {
"total": 15054.815037819091,
"count": 6285676,
"self": 15054.815037819091
}
}
},
"workers": {
"total": 60.30799117661081,
"count": 3434191,
"self": 0.0,
"children": {
"worker_root": {
"total": 205460.83039254998,
"count": 3434191,
"is_parallel": true,
"self": 157265.73885627207,
"children": {
"steps_from_proto": {
"total": 0.5397364990785718,
"count": 500,
"is_parallel": true,
"self": 0.10658559203147888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.4331509070470929,
"count": 2000,
"is_parallel": true,
"self": 0.4331509070470929
}
}
},
"UnityEnvironment.step": {
"total": 48194.55179977883,
"count": 3434191,
"is_parallel": true,
"self": 2551.1347198931035,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1917.7697322282474,
"count": 3434191,
"is_parallel": true,
"self": 1917.7697322282474
},
"communicator.exchange": {
"total": 35254.17744936934,
"count": 3434191,
"is_parallel": true,
"self": 35254.17744936934
},
"steps_from_proto": {
"total": 8471.469898288138,
"count": 6868382,
"is_parallel": true,
"self": 1610.608937019715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6860.860961268423,
"count": 27473528,
"is_parallel": true,
"self": 6860.860961268423
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 130123.42094209464,
"count": 3434191,
"self": 627.715604780009,
"children": {
"process_trajectory": {
"total": 16163.501031924505,
"count": 3434191,
"self": 16147.823729424272,
"children": {
"RLTrainer._checkpoint": {
"total": 15.677302500233054,
"count": 100,
"self": 15.677302500233054
}
}
},
"_update_policy": {
"total": 113332.20430539013,
"count": 2422,
"self": 7811.7441535941325,
"children": {
"TorchPOCAOptimizer.update": {
"total": 105520.460151796,
"count": 72660,
"self": 105520.460151796
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0998919606208801e-06,
"count": 1,
"self": 1.0998919606208801e-06
},
"TrainerController._save_models": {
"total": 0.1326907998882234,
"count": 1,
"self": 0.003899799892678857,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12879099999554455,
"count": 1,
"self": 0.12879099999554455
}
}
}
}
}
}
}