SoccerTwos / run_logs /timers.json
bhadresh-savani's picture
First Push
50c3f70
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.782020092010498,
"min": 2.7408828735351562,
"max": 3.148832321166992,
"count": 149
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 53147.7109375,
"min": 18819.92578125,
"max": 103040.1640625,
"count": 149
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 79.75806451612904,
"min": 66.62162162162163,
"max": 931.6,
"count": 149
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19780.0,
"min": 16048.0,
"max": 33996.0,
"count": 149
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1323.1592803467115,
"min": 1182.5733001608478,
"max": 1323.1592803467115,
"count": 149
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 164071.75076299222,
"min": 2386.5580859722763,
"max": 193010.27497535327,
"count": 149
},
"SoccerTwos.Step.mean": {
"value": 4999933.0,
"min": 3499420.0,
"max": 4999933.0,
"count": 151
},
"SoccerTwos.Step.sum": {
"value": 4999933.0,
"min": 3499420.0,
"max": 4999933.0,
"count": 151
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.12314624339342117,
"min": -0.03092990815639496,
"max": 0.1394692361354828,
"count": 151
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 15.270133972167969,
"min": -1.051616907119751,
"max": 16.178430557250977,
"count": 151
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.13183137774467468,
"min": -0.036932479590177536,
"max": 0.1436532437801361,
"count": 151
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 16.347091674804688,
"min": -1.2557042837142944,
"max": 16.663776397705078,
"count": 151
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 151
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 151
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.2843935489654541,
"min": -0.5425764716723386,
"max": 0.9333999752998352,
"count": 151
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 35.26480007171631,
"min": -18.76040017604828,
"max": 35.26480007171631,
"count": 151
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.2843935489654541,
"min": -0.5425764716723386,
"max": 0.9333999752998352,
"count": 151
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 35.26480007171631,
"min": -18.76040017604828,
"max": 35.26480007171631,
"count": 151
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 151
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 151
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01601744956812278,
"min": 0.012989187633502297,
"max": 0.022939226923820874,
"count": 70
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01601744956812278,
"min": 0.012989187633502297,
"max": 0.022939226923820874,
"count": 70
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06345115937292575,
"min": 0.0014499741160155584,
"max": 0.0641418262074391,
"count": 70
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06345115937292575,
"min": 0.0014499741160155584,
"max": 0.0641418262074391,
"count": 70
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06464051492512227,
"min": 0.0014544832248551151,
"max": 0.0654796672364076,
"count": 70
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06464051492512227,
"min": 0.0014544832248551151,
"max": 0.0654796672364076,
"count": 70
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 70
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 70
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 70
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 70
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 70
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 70
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682768356",
"python_version": "3.8.16 (default, Mar 2 2023, 03:21:46) \n[GCC 11.2.0]",
"command_line_arguments": "/root/miniconda3/envs/mlagent_env/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.2+cu111",
"numpy_version": "1.21.2",
"end_time_seconds": "1682771627"
},
"total": 3271.2887740740553,
"count": 1,
"self": 0.5259581981226802,
"children": {
"run_training.setup": {
"total": 0.02622733684256673,
"count": 1,
"self": 0.02622733684256673
},
"TrainerController.start_learning": {
"total": 3270.73658853909,
"count": 1,
"self": 2.9680194517131895,
"children": {
"TrainerController._reset_env": {
"total": 7.201641643652692,
"count": 9,
"self": 7.201641643652692
},
"TrainerController.advance": {
"total": 3260.3147750664502,
"count": 98240,
"self": 3.3056213797535747,
"children": {
"env_step": {
"total": 2625.632222956512,
"count": 98240,
"self": 1997.8606757714879,
"children": {
"SubprocessEnvManager._take_step": {
"total": 625.7929681187961,
"count": 98240,
"self": 19.36032558279112,
"children": {
"TorchPolicy.evaluate": {
"total": 606.432642536005,
"count": 191892,
"self": 606.432642536005
}
}
},
"workers": {
"total": 1.9785790662281215,
"count": 98240,
"self": 0.0,
"children": {
"worker_root": {
"total": 3263.9388357799035,
"count": 98240,
"is_parallel": true,
"self": 1606.7479629141744,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029094689525663853,
"count": 2,
"is_parallel": true,
"self": 0.0007654514629393816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021440174896270037,
"count": 8,
"is_parallel": true,
"self": 0.0021440174896270037
}
}
},
"UnityEnvironment.step": {
"total": 0.03594986000098288,
"count": 1,
"is_parallel": true,
"self": 0.0006534920539706945,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007200029212981462,
"count": 1,
"is_parallel": true,
"self": 0.0007200029212981462
},
"communicator.exchange": {
"total": 0.03261819598264992,
"count": 1,
"is_parallel": true,
"self": 0.03261819598264992
},
"steps_from_proto": {
"total": 0.0019581690430641174,
"count": 2,
"is_parallel": true,
"self": 0.00040725315921008587,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015509158838540316,
"count": 8,
"is_parallel": true,
"self": 0.0015509158838540316
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.016640503890812397,
"count": 16,
"is_parallel": true,
"self": 0.003318921197205782,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.013321582693606615,
"count": 64,
"is_parallel": true,
"self": 0.013321582693606615
}
}
},
"UnityEnvironment.step": {
"total": 1657.1742323618382,
"count": 98239,
"is_parallel": true,
"self": 69.25188956130296,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 44.359899832634255,
"count": 98239,
"is_parallel": true,
"self": 44.359899832634255
},
"communicator.exchange": {
"total": 1348.2308604989666,
"count": 98239,
"is_parallel": true,
"self": 1348.2308604989666
},
"steps_from_proto": {
"total": 195.33158246893436,
"count": 196478,
"is_parallel": true,
"self": 39.18618612224236,
"children": {
"_process_rank_one_or_two_observation": {
"total": 156.145396346692,
"count": 785912,
"is_parallel": true,
"self": 156.145396346692
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 631.3769307301845,
"count": 98240,
"self": 24.519971968838945,
"children": {
"process_trajectory": {
"total": 197.50691253738478,
"count": 98240,
"self": 196.52816882007755,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9787437173072249,
"count": 4,
"self": 0.9787437173072249
}
}
},
"_update_policy": {
"total": 409.3500462239608,
"count": 70,
"self": 247.93217604560778,
"children": {
"TorchPOCAOptimizer.update": {
"total": 161.41787017835304,
"count": 2115,
"self": 161.41787017835304
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.55717134475708e-06,
"count": 1,
"self": 1.55717134475708e-06
},
"TrainerController._save_models": {
"total": 0.25215082010254264,
"count": 1,
"self": 0.007733169011771679,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24441765109077096,
"count": 1,
"self": 0.24441765109077096
}
}
}
}
}
}
}