poca-SoccerTwos / run_logs /timers.json
Parthi's picture
First Push
10a5527
raw
history blame
19.7 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.273096799850464,
"min": 3.2668933868408203,
"max": 3.295724868774414,
"count": 10
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37077.640625,
"min": 37077.640625,
"max": 107257.7421875,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 772.8333333333334,
"min": 409.1666666666667,
"max": 999.0,
"count": 10
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18548.0,
"min": 15132.0,
"max": 27536.0,
"count": 10
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1202.1048419078513,
"min": 1197.3351400713882,
"max": 1202.1048419078513,
"count": 9
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7212.629051447108,
"min": 2394.6702801427764,
"max": 16781.732370544243,
"count": 9
},
"SoccerTwos.Step.mean": {
"value": 99835.0,
"min": 9788.0,
"max": 99835.0,
"count": 10
},
"SoccerTwos.Step.sum": {
"value": 99835.0,
"min": 9788.0,
"max": 99835.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0485982783138752,
"min": -0.0969996526837349,
"max": -0.0485982783138752,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.5831793546676636,
"min": -1.7221587896347046,
"max": -0.5617256164550781,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04743114486336708,
"min": -0.09698408842086792,
"max": -0.04743114486336708,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.5691737532615662,
"min": -1.7347235679626465,
"max": -0.5584996342658997,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.22946666677792868,
"min": -0.4254250004887581,
"max": 0.20239999890327454,
"count": 10
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.753600001335144,
"min": -6.806800007820129,
"max": 3.7979999780654907,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.22946666677792868,
"min": -0.4254250004887581,
"max": 0.20239999890327454,
"count": 10
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.753600001335144,
"min": -6.806800007820129,
"max": 3.7979999780654907,
"count": 10
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018837112501690473,
"min": 0.011768401923472994,
"max": 0.018837112501690473,
"count": 4
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018837112501690473,
"min": 0.011768401923472994,
"max": 0.018837112501690473,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.001745747074407215,
"min": 0.0013953256459596256,
"max": 0.007921716229369243,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.001745747074407215,
"min": 0.0013953256459596256,
"max": 0.007921716229369243,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0017536110477522015,
"min": 0.0013969194435048849,
"max": 0.007526869854579369,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0017536110477522015,
"min": 0.0013969194435048849,
"max": 0.007526869854579369,
"count": 4
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 4
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 4
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 4
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 4
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 4
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686402403",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./SoccerTwos.yaml --env ./ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id SoccerTwo --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686402973"
},
"total": 570.6112363320001,
"count": 1,
"self": 0.5389801770002123,
"children": {
"run_training.setup": {
"total": 0.06431903599991529,
"count": 1,
"self": 0.06431903599991529
},
"TrainerController.start_learning": {
"total": 570.007937119,
"count": 1,
"self": 0.28256211299753886,
"children": {
"TrainerController._reset_env": {
"total": 1.8835971869996229,
"count": 2,
"self": 1.8835971869996229
},
"TrainerController.advance": {
"total": 567.4930995030027,
"count": 7225,
"self": 0.2787904750009602,
"children": {
"env_step": {
"total": 258.1835296370007,
"count": 7225,
"self": 212.23751650999384,
"children": {
"SubprocessEnvManager._take_step": {
"total": 45.76937661199872,
"count": 7225,
"self": 1.6032072839859666,
"children": {
"TorchPolicy.evaluate": {
"total": 44.16616932801276,
"count": 14350,
"self": 44.16616932801276
}
}
},
"workers": {
"total": 0.17663651500811284,
"count": 7225,
"self": 0.0,
"children": {
"worker_root": {
"total": 567.7389741700101,
"count": 7225,
"is_parallel": true,
"self": 392.33588427904556,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0084156069999608,
"count": 2,
"is_parallel": true,
"self": 0.005412873999603107,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030027330003576935,
"count": 8,
"is_parallel": true,
"self": 0.0030027330003576935
}
}
},
"UnityEnvironment.step": {
"total": 0.07044513400001051,
"count": 1,
"is_parallel": true,
"self": 0.0013436229999115312,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0014680570000109583,
"count": 1,
"is_parallel": true,
"self": 0.0014680570000109583
},
"communicator.exchange": {
"total": 0.06282397099994341,
"count": 1,
"is_parallel": true,
"self": 0.06282397099994341
},
"steps_from_proto": {
"total": 0.00480948300014461,
"count": 2,
"is_parallel": true,
"self": 0.0009365330001855909,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003872949999959019,
"count": 8,
"is_parallel": true,
"self": 0.003872949999959019
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 175.40078959896482,
"count": 7224,
"is_parallel": true,
"self": 9.913494993955737,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.261065245018699,
"count": 7224,
"is_parallel": true,
"self": 6.261065245018699
},
"communicator.exchange": {
"total": 124.5604742050109,
"count": 7224,
"is_parallel": true,
"self": 124.5604742050109
},
"steps_from_proto": {
"total": 34.66575515497948,
"count": 14448,
"is_parallel": true,
"self": 6.376640015944076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.289115139035403,
"count": 57792,
"is_parallel": true,
"self": 28.289115139035403
}
}
}
}
},
"steps_from_proto": {
"total": 0.002300291999745241,
"count": 2,
"is_parallel": true,
"self": 0.0004718130007859145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018284789989593264,
"count": 8,
"is_parallel": true,
"self": 0.0018284789989593264
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 309.03077939100103,
"count": 7225,
"self": 1.7174269240124431,
"children": {
"process_trajectory": {
"total": 40.6562813089879,
"count": 7225,
"self": 40.6562813089879
},
"_update_policy": {
"total": 266.6570711580007,
"count": 4,
"self": 23.40013348399816,
"children": {
"TorchPOCAOptimizer.update": {
"total": 243.25693767400253,
"count": 120,
"self": 243.25693767400253
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3059998309472576e-06,
"count": 1,
"self": 1.3059998309472576e-06
},
"TrainerController._save_models": {
"total": 0.3486770100003014,
"count": 1,
"self": 0.0019965860005868308,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34668042399971455,
"count": 1,
"self": 0.34668042399971455
}
}
}
}
}
}
}