poca-SoccerTwos / run_logs /timers.json
jfelgate's picture
Second Push`
9d26c98
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.8730628490448,
"min": 2.8617215156555176,
"max": 3.2958219051361084,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 60771.02734375,
"min": 19359.3828125,
"max": 107074.6328125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 65.82894736842105,
"min": 58.01176470588236,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20012.0,
"min": 11988.0,
"max": 27972.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1597.30028557217,
"min": 1194.3413621361374,
"max": 1606.6213780570185,
"count": 867
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 242789.64340696984,
"min": 2388.6827242722748,
"max": 271941.0045605601,
"count": 867
},
"SoccerTwos.Step.mean": {
"value": 9999988.0,
"min": 9808.0,
"max": 9999988.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999988.0,
"min": 9808.0,
"max": 9999988.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.1200740858912468,
"min": -0.01309108268469572,
"max": 0.37468603253364563,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 18.131187438964844,
"min": -0.7058466672897339,
"max": 47.08662414550781,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.11712533980607986,
"min": -0.0205258596688509,
"max": 0.37982872128486633,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 17.68592643737793,
"min": -1.056337833404541,
"max": 47.34741973876953,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0505668864344919,
"min": -0.6153846153846154,
"max": 0.863523708176367,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 7.635599851608276,
"min": -53.372000098228455,
"max": 115.39520001411438,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0505668864344919,
"min": -0.6153846153846154,
"max": 0.863523708176367,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 7.635599851608276,
"min": -53.372000098228455,
"max": 115.39520001411438,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017426303517192234,
"min": 0.016077206578168746,
"max": 0.019102142048917206,
"count": 48
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017426303517192234,
"min": 0.016077206578168746,
"max": 0.019102142048917206,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.04179534549514453,
"min": 0.00031694915543387954,
"max": 0.04179534549514453,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.04179534549514453,
"min": 0.00031694915543387954,
"max": 0.04179534549514453,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0436076757311821,
"min": 0.00035278027043811257,
"max": 0.0436076757311821,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0436076757311821,
"min": 0.00035278027043811257,
"max": 0.0436076757311821,
"count": 48
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 3.0000000000000004e-05,
"min": 3.0000000000000004e-05,
"max": 3.0000000000000004e-05,
"count": 48
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 3.0000000000000004e-05,
"min": 3.0000000000000004e-05,
"max": 3.0000000000000004e-05,
"count": 48
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.09999999999999999,
"min": 0.09999999999999999,
"max": 0.09999999999999999,
"count": 48
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.09999999999999999,
"min": 0.09999999999999999,
"max": 0.09999999999999999,
"count": 48
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.0005000000000000001,
"min": 0.0005000000000000001,
"max": 0.0005000000000000001,
"count": 48
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.0005000000000000001,
"min": 0.0005000000000000001,
"max": 0.0005000000000000001,
"count": 48
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697578148",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\jfelg\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos.exe --run-id=SoccerTwosv5 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1697603536"
},
"total": 25387.0109249,
"count": 1,
"self": 0.27513830000680173,
"children": {
"run_training.setup": {
"total": 0.09081249999871943,
"count": 1,
"self": 0.09081249999871943
},
"TrainerController.start_learning": {
"total": 25386.644974099996,
"count": 1,
"self": 14.330496000147832,
"children": {
"TrainerController._reset_env": {
"total": 4.731971600005636,
"count": 2,
"self": 4.731971600005636
},
"TrainerController.advance": {
"total": 25367.419991499846,
"count": 638980,
"self": 15.043644593963108,
"children": {
"env_step": {
"total": 11934.713227302374,
"count": 638980,
"self": 8914.857568500935,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3010.699860498833,
"count": 638980,
"self": 84.67556860102195,
"children": {
"TorchPolicy.evaluate": {
"total": 2926.024291897811,
"count": 1250950,
"self": 2926.024291897811
}
}
},
"workers": {
"total": 9.155798302606854,
"count": 638980,
"self": 0.0,
"children": {
"worker_root": {
"total": 25363.593820000708,
"count": 638980,
"is_parallel": true,
"self": 18273.61071539773,
"children": {
"steps_from_proto": {
"total": 0.0032063999969977885,
"count": 4,
"is_parallel": true,
"self": 0.0006805999873904511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025258000096073374,
"count": 16,
"is_parallel": true,
"self": 0.0025258000096073374
}
}
},
"UnityEnvironment.step": {
"total": 7089.979898202982,
"count": 638980,
"is_parallel": true,
"self": 324.4559530022161,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 250.7997671997582,
"count": 638980,
"is_parallel": true,
"self": 250.7997671997582
},
"communicator.exchange": {
"total": 5480.399172400401,
"count": 638980,
"is_parallel": true,
"self": 5480.399172400401
},
"steps_from_proto": {
"total": 1034.3250056006073,
"count": 1277960,
"is_parallel": true,
"self": 212.83390089090972,
"children": {
"_process_rank_one_or_two_observation": {
"total": 821.4911047096975,
"count": 5111840,
"is_parallel": true,
"self": 821.4911047096975
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 13417.663119603509,
"count": 638980,
"self": 105.40617439909693,
"children": {
"process_trajectory": {
"total": 2081.8678270043747,
"count": 638980,
"self": 2078.001419104381,
"children": {
"RLTrainer._checkpoint": {
"total": 3.8664078999936464,
"count": 20,
"self": 3.8664078999936464
}
}
},
"_update_policy": {
"total": 11230.389118200037,
"count": 48,
"self": 1414.8527947996336,
"children": {
"TorchPOCAOptimizer.update": {
"total": 9815.536323400403,
"count": 14400,
"self": 9815.536323400403
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0000003385357559e-06,
"count": 1,
"self": 1.0000003385357559e-06
},
"TrainerController._save_models": {
"total": 0.16251399999600835,
"count": 1,
"self": 0.0019696999952429906,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16054430000076536,
"count": 1,
"self": 0.16054430000076536
}
}
}
}
}
}
}