poca-SoccerTwos / run_logs /timers.json
anthonyx's picture
First Push
f6dac57
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4636728763580322,
"min": 1.3435970544815063,
"max": 3.295746088027954,
"count": 1972
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28196.193359375,
"min": 24718.8515625,
"max": 148478.953125,
"count": 1972
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.56521739130435,
"min": 39.048780487804876,
"max": 999.0,
"count": 1972
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19712.0,
"min": 16364.0,
"max": 25464.0,
"count": 1972
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1647.1849509263236,
"min": 1190.986795050281,
"max": 1696.400452141114,
"count": 1924
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 303082.03097044356,
"min": 2385.8947458905805,
"max": 392999.6386615653,
"count": 1924
},
"SoccerTwos.Step.mean": {
"value": 19719988.0,
"min": 9754.0,
"max": 19719988.0,
"count": 1972
},
"SoccerTwos.Step.sum": {
"value": 19719988.0,
"min": 9754.0,
"max": 19719988.0,
"count": 1972
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.09327541291713715,
"min": -0.13546991348266602,
"max": 0.15362755954265594,
"count": 1972
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -17.162675857543945,
"min": -26.958511352539062,
"max": 26.16107177734375,
"count": 1972
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09281494468450546,
"min": -0.1330975443124771,
"max": 0.15069669485092163,
"count": 1972
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -17.07794952392578,
"min": -26.48641014099121,
"max": 27.064729690551758,
"count": 1972
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1972
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1972
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.17539999731208966,
"min": -0.5836235284805298,
"max": 0.4452646099604093,
"count": 1972
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -32.2735995054245,
"min": -63.24500036239624,
"max": 68.44459998607635,
"count": 1972
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.17539999731208966,
"min": -0.5836235284805298,
"max": 0.4452646099604093,
"count": 1972
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -32.2735995054245,
"min": -63.24500036239624,
"max": 68.44459998607635,
"count": 1972
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1972
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1972
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019298918750913194,
"min": 0.011196109272229174,
"max": 0.027086789696477355,
"count": 952
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019298918750913194,
"min": 0.011196109272229174,
"max": 0.027086789696477355,
"count": 952
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10475120668609937,
"min": 3.4986561028442037e-06,
"max": 0.133472091704607,
"count": 952
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10475120668609937,
"min": 3.4986561028442037e-06,
"max": 0.133472091704607,
"count": 952
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10603421678145726,
"min": 3.4707499215376933e-06,
"max": 0.13618898292382559,
"count": 952
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10603421678145726,
"min": 3.4707499215376933e-06,
"max": 0.13618898292382559,
"count": 952
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 952
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 952
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 952
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 952
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 952
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 952
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680040037",
"python_version": "3.9.16 (main, Mar 8 2023, 04:29:24) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/anthony/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1680117636"
},
"total": 77598.180075375,
"count": 1,
"self": 0.4255744170077378,
"children": {
"run_training.setup": {
"total": 0.01587929099999985,
"count": 1,
"self": 0.01587929099999985
},
"TrainerController.start_learning": {
"total": 77597.738621667,
"count": 1,
"self": 13.890024387903395,
"children": {
"TrainerController._reset_env": {
"total": 6.029232918988157,
"count": 99,
"self": 6.029232918988157
},
"TrainerController.advance": {
"total": 77577.66544098512,
"count": 1355828,
"self": 12.091036097030155,
"children": {
"env_step": {
"total": 63107.72229606026,
"count": 1355828,
"self": 61311.28999381982,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1785.9540610652211,
"count": 1355828,
"self": 58.361526692224516,
"children": {
"TorchPolicy.evaluate": {
"total": 1727.5925343729966,
"count": 2483800,
"self": 1727.5925343729966
}
}
},
"workers": {
"total": 10.478241175224287,
"count": 1355827,
"self": 0.0,
"children": {
"worker_root": {
"total": 77569.61498527639,
"count": 1355827,
"is_parallel": true,
"self": 17842.019668536588,
"children": {
"steps_from_proto": {
"total": 0.14007716891832733,
"count": 198,
"is_parallel": true,
"self": 0.016171200886962334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.123905968031365,
"count": 792,
"is_parallel": true,
"self": 0.123905968031365
}
}
},
"UnityEnvironment.step": {
"total": 59727.45523957089,
"count": 1355827,
"is_parallel": true,
"self": 164.76067077563494,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1061.2439661229068,
"count": 1355827,
"is_parallel": true,
"self": 1061.2439661229068
},
"communicator.exchange": {
"total": 56219.48631231125,
"count": 1355827,
"is_parallel": true,
"self": 56219.48631231125
},
"steps_from_proto": {
"total": 2281.9642903610975,
"count": 2711654,
"is_parallel": true,
"self": 246.62761488650222,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2035.3366754745953,
"count": 10846616,
"is_parallel": true,
"self": 2035.3366754745953
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 14457.852108827828,
"count": 1355827,
"self": 131.49974984878463,
"children": {
"process_trajectory": {
"total": 2574.730039186065,
"count": 1355827,
"self": 2569.0579456890728,
"children": {
"RLTrainer._checkpoint": {
"total": 5.67209349699192,
"count": 39,
"self": 5.67209349699192
}
}
},
"_update_policy": {
"total": 11751.62231979298,
"count": 952,
"self": 1308.9996271409964,
"children": {
"TorchPOCAOptimizer.update": {
"total": 10442.622692651983,
"count": 28560,
"self": 10442.622692651983
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.089984137564898e-07,
"count": 1,
"self": 7.089984137564898e-07
},
"TrainerController._save_models": {
"total": 0.15392266599519644,
"count": 1,
"self": 0.0007123329996829852,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15321033299551345,
"count": 1,
"self": 0.15321033299551345
}
}
}
}
}
}
}