poca-SoccerTwos / run_logs /timers.json
swritchie's picture
First Push
828b7c8 verified
raw
history blame contribute delete
No virus
20 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2717387676239014,
"min": 3.2706680297851562,
"max": 3.295699119567871,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 72030.6015625,
"min": 21255.86328125,
"max": 115515.96875,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 959.6,
"min": 436.09090909090907,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19192.0,
"min": 16696.0,
"max": 23492.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.1142323684549,
"min": 1194.3428968907385,
"max": 1202.7057443496806,
"count": 45
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2396.2284647369097,
"min": 2393.110416278334,
"max": 16771.890075551113,
"count": 45
},
"SoccerTwos.Step.mean": {
"value": 499284.0,
"min": 9100.0,
"max": 499284.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499284.0,
"min": 9100.0,
"max": 499284.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.009632928296923637,
"min": -0.018809394910931587,
"max": 0.01007391419261694,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.09632927924394608,
"min": -0.29331517219543457,
"max": 0.1309608817100525,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007784527726471424,
"min": -0.020163726061582565,
"max": -0.0017211500089615583,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.07784527540206909,
"min": -0.321410596370697,
"max": -0.01721150055527687,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.05684000253677368,
"min": -0.5,
"max": 0.19935293933924506,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.5684000253677368,
"min": -8.0,
"max": 3.388999968767166,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.05684000253677368,
"min": -0.5,
"max": 0.19935293933924506,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.5684000253677368,
"min": -8.0,
"max": 3.388999968767166,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01252923917490989,
"min": 0.009265263719134964,
"max": 0.023902750527486204,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01252923917490989,
"min": 0.009265263719134964,
"max": 0.023902750527486204,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0009007092099636793,
"min": 2.652021994435927e-05,
"max": 0.004625796945765615,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0009007092099636793,
"min": 2.652021994435927e-05,
"max": 0.004625796945765615,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0009099281451199204,
"min": 2.2246412845561282e-05,
"max": 0.004623155016452074,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0009099281451199204,
"min": 2.2246412845561282e-05,
"max": 0.004623155016452074,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2,
"min": 0.2,
"max": 0.2,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713443452",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ml-agents/config/poca/SoccerTwos.yaml --env ml-agents/training-env-executables/SoccerTwos.x86_64 --run-id SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713445200"
},
"total": 1747.641900863,
"count": 1,
"self": 0.5623029670000506,
"children": {
"run_training.setup": {
"total": 0.06831463600002508,
"count": 1,
"self": 0.06831463600002508
},
"TrainerController.start_learning": {
"total": 1747.01128326,
"count": 1,
"self": 1.150616828980219,
"children": {
"TrainerController._reset_env": {
"total": 3.712776445000202,
"count": 3,
"self": 3.712776445000202
},
"TrainerController.advance": {
"total": 1741.85666664102,
"count": 32834,
"self": 1.3255909720230648,
"children": {
"env_step": {
"total": 1100.9859284589975,
"count": 32834,
"self": 891.0796303019877,
"children": {
"SubprocessEnvManager._take_step": {
"total": 209.1213839550187,
"count": 32834,
"self": 7.866984353005762,
"children": {
"TorchPolicy.evaluate": {
"total": 201.25439960201294,
"count": 65238,
"self": 201.25439960201294
}
}
},
"workers": {
"total": 0.7849142019910573,
"count": 32834,
"self": 0.0,
"children": {
"worker_root": {
"total": 1742.4318820689796,
"count": 32834,
"is_parallel": true,
"self": 1011.4445486370073,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00716412299999547,
"count": 2,
"is_parallel": true,
"self": 0.004172961999984182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029911610000112887,
"count": 8,
"is_parallel": true,
"self": 0.0029911610000112887
}
}
},
"UnityEnvironment.step": {
"total": 0.05136752800001432,
"count": 1,
"is_parallel": true,
"self": 0.001393323999934637,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009983710000369683,
"count": 1,
"is_parallel": true,
"self": 0.0009983710000369683
},
"communicator.exchange": {
"total": 0.044745535999993535,
"count": 1,
"is_parallel": true,
"self": 0.044745535999993535
},
"steps_from_proto": {
"total": 0.004230297000049177,
"count": 2,
"is_parallel": true,
"self": 0.000737669000045571,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003492628000003606,
"count": 8,
"is_parallel": true,
"self": 0.003492628000003606
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 730.9823270119725,
"count": 32833,
"is_parallel": true,
"self": 46.991319441982,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.60093899799483,
"count": 32833,
"is_parallel": true,
"self": 28.60093899799483
},
"communicator.exchange": {
"total": 512.1773981309882,
"count": 32833,
"is_parallel": true,
"self": 512.1773981309882
},
"steps_from_proto": {
"total": 143.2126704410075,
"count": 65666,
"is_parallel": true,
"self": 25.112212906075058,
"children": {
"_process_rank_one_or_two_observation": {
"total": 118.10045753493245,
"count": 262664,
"is_parallel": true,
"self": 118.10045753493245
}
}
}
}
},
"steps_from_proto": {
"total": 0.005006419999745049,
"count": 4,
"is_parallel": true,
"self": 0.001064964999386575,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003941455000358474,
"count": 16,
"is_parallel": true,
"self": 0.003941455000358474
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 639.5451472099995,
"count": 32834,
"self": 9.06511179998256,
"children": {
"process_trajectory": {
"total": 189.13191706501658,
"count": 32834,
"self": 188.8667908120163,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2651262530002896,
"count": 1,
"self": 0.2651262530002896
}
}
},
"_update_policy": {
"total": 441.3481183450004,
"count": 23,
"self": 35.52136787600023,
"children": {
"TorchPOCAOptimizer.update": {
"total": 405.82675046900016,
"count": 230,
"self": 405.82675046900016
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0339999789721332e-06,
"count": 1,
"self": 1.0339999789721332e-06
},
"TrainerController._save_models": {
"total": 0.2912223109997285,
"count": 1,
"self": 0.00551674699954674,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28570556400018177,
"count": 1,
"self": 0.28570556400018177
}
}
}
}
}
}
}