poca-SoccerTwos / run_logs /timers.json
Haru4me's picture
First Push
f40f802 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.886430263519287,
"min": 2.806196928024292,
"max": 3.2956626415252686,
"count": 124
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 57636.23828125,
"min": 14634.5126953125,
"max": 109185.234375,
"count": 124
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 86.41071428571429,
"min": 79.98412698412699,
"max": 999.0,
"count": 124
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19356.0,
"min": 8380.0,
"max": 27000.0,
"count": 124
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1345.4823567514406,
"min": 1198.9970947669865,
"max": 1345.4823567514406,
"count": 121
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 150694.02395616134,
"min": 2397.9941895339734,
"max": 168202.87468894984,
"count": 121
},
"SoccerTwos.Step.mean": {
"value": 1239923.0,
"min": 9196.0,
"max": 1239923.0,
"count": 124
},
"SoccerTwos.Step.sum": {
"value": 1239923.0,
"min": 9196.0,
"max": 1239923.0,
"count": 124
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.11026978492736816,
"min": -0.08389207720756531,
"max": 0.16095934808254242,
"count": 124
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 12.460485458374023,
"min": -1.0905958414077759,
"max": 15.958245277404785,
"count": 124
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.11049355566501617,
"min": -0.08388417214155197,
"max": 0.16049960255622864,
"count": 124
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 12.485772132873535,
"min": -1.0904635190963745,
"max": 15.856868743896484,
"count": 124
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 124
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 124
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.32790796419160556,
"min": -0.6259076916254483,
"max": 0.4461333420541551,
"count": 124
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 37.05359995365143,
"min": -27.90720009803772,
"max": 37.05359995365143,
"count": 124
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.32790796419160556,
"min": -0.6259076916254483,
"max": 0.4461333420541551,
"count": 124
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 37.05359995365143,
"min": -27.90720009803772,
"max": 37.05359995365143,
"count": 124
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 124
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 124
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017933064362053604,
"min": 0.010807520801123852,
"max": 0.02106685910645562,
"count": 58
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017933064362053604,
"min": 0.010807520801123852,
"max": 0.02106685910645562,
"count": 58
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.059848784531156225,
"min": 0.0005380973469679399,
"max": 0.059848784531156225,
"count": 58
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.059848784531156225,
"min": 0.0005380973469679399,
"max": 0.059848784531156225,
"count": 58
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0615954947968324,
"min": 0.0005495274681986,
"max": 0.0615954947968324,
"count": 58
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0615954947968324,
"min": 0.0005495274681986,
"max": 0.0615954947968324,
"count": 58
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 58
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 58
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 58
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 58
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 58
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 58
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717444594",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1717447320"
},
"total": 2725.934264701,
"count": 1,
"self": 0.13758261400016636,
"children": {
"run_training.setup": {
"total": 0.06685330499999509,
"count": 1,
"self": 0.06685330499999509
},
"TrainerController.start_learning": {
"total": 2725.729828782,
"count": 1,
"self": 1.9225073879970296,
"children": {
"TrainerController._reset_env": {
"total": 3.656940650000138,
"count": 7,
"self": 3.656940650000138
},
"TrainerController.advance": {
"total": 2719.7633354740024,
"count": 81661,
"self": 2.082915705114374,
"children": {
"env_step": {
"total": 2200.55965069901,
"count": 81661,
"self": 1676.967132833107,
"children": {
"SubprocessEnvManager._take_step": {
"total": 522.4249811439598,
"count": 81661,
"self": 16.175533431941176,
"children": {
"TorchPolicy.evaluate": {
"total": 506.24944771201865,
"count": 159978,
"self": 506.24944771201865
}
}
},
"workers": {
"total": 1.1675367219431791,
"count": 81661,
"self": 0.0,
"children": {
"worker_root": {
"total": 2720.472810753042,
"count": 81661,
"is_parallel": true,
"self": 1312.1720663330736,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0061693979999972726,
"count": 2,
"is_parallel": true,
"self": 0.003932343000087712,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00223705499990956,
"count": 8,
"is_parallel": true,
"self": 0.00223705499990956
}
}
},
"UnityEnvironment.step": {
"total": 0.039695700000038414,
"count": 1,
"is_parallel": true,
"self": 0.0011881120001362433,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008654870000555093,
"count": 1,
"is_parallel": true,
"self": 0.0008654870000555093
},
"communicator.exchange": {
"total": 0.03424549599992588,
"count": 1,
"is_parallel": true,
"self": 0.03424549599992588
},
"steps_from_proto": {
"total": 0.0033966049999207826,
"count": 2,
"is_parallel": true,
"self": 0.000648830999580241,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027477740003405415,
"count": 8,
"is_parallel": true,
"self": 0.0027477740003405415
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1408.2878612979682,
"count": 81660,
"is_parallel": true,
"self": 86.80044462888077,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 60.041113616990174,
"count": 81660,
"is_parallel": true,
"self": 60.041113616990174
},
"communicator.exchange": {
"total": 989.0573466660942,
"count": 81660,
"is_parallel": true,
"self": 989.0573466660942
},
"steps_from_proto": {
"total": 272.3889563860031,
"count": 163320,
"is_parallel": true,
"self": 44.932211983203956,
"children": {
"_process_rank_one_or_two_observation": {
"total": 227.45674440279913,
"count": 653280,
"is_parallel": true,
"self": 227.45674440279913
}
}
}
}
},
"steps_from_proto": {
"total": 0.012883121999948344,
"count": 12,
"is_parallel": true,
"self": 0.0027552960009415983,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.010127825999006745,
"count": 48,
"is_parallel": true,
"self": 0.010127825999006745
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 517.120769069878,
"count": 81661,
"self": 16.27377930387945,
"children": {
"process_trajectory": {
"total": 159.97673368699827,
"count": 81661,
"self": 159.34977788699837,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6269557999999051,
"count": 2,
"self": 0.6269557999999051
}
}
},
"_update_policy": {
"total": 340.8702560790002,
"count": 58,
"self": 203.80017319900503,
"children": {
"TorchPOCAOptimizer.update": {
"total": 137.0700828799952,
"count": 1749,
"self": 137.0700828799952
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.362000148219522e-06,
"count": 1,
"self": 1.362000148219522e-06
},
"TrainerController._save_models": {
"total": 0.38704390800012334,
"count": 1,
"self": 0.002530480000132229,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3845134279999911,
"count": 1,
"self": 0.3845134279999911
}
}
}
}
}
}
}