poca-SoccerTwos / run_logs /timers.json
Falguni's picture
Push to hugging-face hub
3166049
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7437447309494019,
"min": 1.7315653562545776,
"max": 3.29567289352417,
"count": 601
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33312.5,
"min": 13444.439453125,
"max": 119655.625,
"count": 601
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 62.860759493670884,
"min": 45.76635514018692,
"max": 999.0,
"count": 601
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19864.0,
"min": 13656.0,
"max": 26348.0,
"count": 601
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1601.8699573878819,
"min": 1191.1123378177726,
"max": 1607.603739333349,
"count": 571
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 253095.45326728534,
"min": 2384.6432361123093,
"max": 339585.8386923352,
"count": 571
},
"SoccerTwos.Step.mean": {
"value": 6009996.0,
"min": 9192.0,
"max": 6009996.0,
"count": 601
},
"SoccerTwos.Step.sum": {
"value": 6009996.0,
"min": 9192.0,
"max": 6009996.0,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.029794463887810707,
"min": -0.1359105408191681,
"max": 0.1818428784608841,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.707525253295898,
"min": -22.42523956298828,
"max": 25.78728675842285,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.030805815011262894,
"min": -0.1358196884393692,
"max": 0.1897372156381607,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.867318630218506,
"min": -22.410249710083008,
"max": 25.02996826171875,
"count": 601
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 601
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.29110885949074466,
"min": -0.626007409007461,
"max": 0.6455857178994587,
"count": 601
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -45.99519979953766,
"min": -50.74680018424988,
"max": 60.02239990234375,
"count": 601
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.29110885949074466,
"min": -0.626007409007461,
"max": 0.6455857178994587,
"count": 601
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -45.99519979953766,
"min": -50.74680018424988,
"max": 60.02239990234375,
"count": 601
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 601
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 601
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016154999113981224,
"min": 0.010843643702294988,
"max": 0.024398589048845072,
"count": 287
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016154999113981224,
"min": 0.010843643702294988,
"max": 0.024398589048845072,
"count": 287
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09558494711915652,
"min": 8.604709250903397e-06,
"max": 0.11457380279898643,
"count": 287
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09558494711915652,
"min": 8.604709250903397e-06,
"max": 0.11457380279898643,
"count": 287
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09721433917681375,
"min": 8.349429746582852e-06,
"max": 0.11660631770888964,
"count": 287
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09721433917681375,
"min": 8.349429746582852e-06,
"max": 0.11660631770888964,
"count": 287
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 287
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 287
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 287
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 287
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 287
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 287
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688192759",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/falguni/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688206113"
},
"total": 13353.832674457999,
"count": 1,
"self": 0.0563971319970733,
"children": {
"run_training.setup": {
"total": 0.012138932001107605,
"count": 1,
"self": 0.012138932001107605
},
"TrainerController.start_learning": {
"total": 13353.764138394,
"count": 1,
"self": 9.482302090986195,
"children": {
"TrainerController._reset_env": {
"total": 6.079826735000097,
"count": 31,
"self": 6.079826735000097
},
"TrainerController.advance": {
"total": 13337.937720122014,
"count": 405092,
"self": 9.082247380691115,
"children": {
"env_step": {
"total": 8359.656683257941,
"count": 405092,
"self": 6573.244519398053,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1780.567025279055,
"count": 405092,
"self": 53.82817040557711,
"children": {
"TorchPolicy.evaluate": {
"total": 1726.738854873478,
"count": 762768,
"self": 1726.738854873478
}
}
},
"workers": {
"total": 5.845138580833009,
"count": 405091,
"self": 0.0,
"children": {
"worker_root": {
"total": 13335.899672741258,
"count": 405091,
"is_parallel": true,
"self": 7821.509282957135,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0036038180005562026,
"count": 2,
"is_parallel": true,
"self": 0.001695593000476947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019082250000792556,
"count": 8,
"is_parallel": true,
"self": 0.0019082250000792556
}
}
},
"UnityEnvironment.step": {
"total": 0.03019148400017002,
"count": 1,
"is_parallel": true,
"self": 0.0006591580004169373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005102590002934448,
"count": 1,
"is_parallel": true,
"self": 0.0005102590002934448
},
"communicator.exchange": {
"total": 0.027050381000663037,
"count": 1,
"is_parallel": true,
"self": 0.027050381000663037
},
"steps_from_proto": {
"total": 0.0019716859987966018,
"count": 2,
"is_parallel": true,
"self": 0.00040098399585986044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015707020029367413,
"count": 8,
"is_parallel": true,
"self": 0.0015707020029367413
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5514.323992152129,
"count": 405090,
"is_parallel": true,
"self": 292.5599745391628,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 175.87191238900596,
"count": 405090,
"is_parallel": true,
"self": 175.87191238900596
},
"communicator.exchange": {
"total": 4247.232349760192,
"count": 405090,
"is_parallel": true,
"self": 4247.232349760192
},
"steps_from_proto": {
"total": 798.6597554637683,
"count": 810180,
"is_parallel": true,
"self": 151.70362268575263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 646.9561327780157,
"count": 3240720,
"is_parallel": true,
"self": 646.9561327780157
}
}
}
}
},
"steps_from_proto": {
"total": 0.06639763199382287,
"count": 60,
"is_parallel": true,
"self": 0.012711123001281521,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05368650899254135,
"count": 240,
"is_parallel": true,
"self": 0.05368650899254135
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4969.198789483382,
"count": 405091,
"self": 64.48212545998103,
"children": {
"process_trajectory": {
"total": 918.0633754954124,
"count": 405091,
"self": 914.5493576114113,
"children": {
"RLTrainer._checkpoint": {
"total": 3.5140178840010776,
"count": 12,
"self": 3.5140178840010776
}
}
},
"_update_policy": {
"total": 3986.6532885279885,
"count": 287,
"self": 962.6635599140955,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3023.989728613893,
"count": 8619,
"self": 3023.989728613893
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1859992810059339e-06,
"count": 1,
"self": 1.1859992810059339e-06
},
"TrainerController._save_models": {
"total": 0.26428826000119443,
"count": 1,
"self": 0.0017454880035074893,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26254277199768694,
"count": 1,
"self": 0.26254277199768694
}
}
}
}
}
}
}