poca-SoccerTwos / run_logs /timers.json
Snorlax's picture
First Push
643a060 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.382171869277954,
"min": 1.3406294584274292,
"max": 1.5941110849380493,
"count": 1844
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 24901.208984375,
"min": 21594.4140625,
"max": 36567.2734375,
"count": 1844
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 72.20588235294117,
"min": 44.31818181818182,
"max": 104.61702127659575,
"count": 1844
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19640.0,
"min": 14520.0,
"max": 21788.0,
"count": 1844
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1880.319441975431,
"min": 1797.6161456333896,
"max": 1892.5716559389089,
"count": 1844
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 255723.4441086586,
"min": 175502.58311787687,
"max": 410920.08707172686,
"count": 1844
},
"SoccerTwos.Step.mean": {
"value": 49999984.0,
"min": 31569976.0,
"max": 49999984.0,
"count": 1844
},
"SoccerTwos.Step.sum": {
"value": 49999984.0,
"min": 31569976.0,
"max": 49999984.0,
"count": 1844
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008061565458774567,
"min": -0.13645006716251373,
"max": 0.1033400148153305,
"count": 1844
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.0963728427886963,
"min": -24.922229766845703,
"max": 16.017702102661133,
"count": 1844
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.008827511221170425,
"min": -0.13706254959106445,
"max": 0.10023332387208939,
"count": 1844
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.2005414962768555,
"min": -25.208498001098633,
"max": 16.848989486694336,
"count": 1844
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1844
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1844
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.06327352979603935,
"min": -0.4349043472953465,
"max": 0.2952662448214877,
"count": 1844
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.605200052261353,
"min": -77.33240008354187,
"max": 62.98039984703064,
"count": 1844
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.06327352979603935,
"min": -0.4349043472953465,
"max": 0.2952662448214877,
"count": 1844
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.605200052261353,
"min": -77.33240008354187,
"max": 62.98039984703064,
"count": 1844
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1844
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1844
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.021826821127615404,
"min": 0.011657302830280969,
"max": 0.02609102944843471,
"count": 894
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.021826821127615404,
"min": 0.011657302830280969,
"max": 0.02609102944843471,
"count": 894
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08605065693457921,
"min": 0.07169382013380528,
"max": 0.12169351130723953,
"count": 894
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08605065693457921,
"min": 0.07169382013380528,
"max": 0.12169351130723953,
"count": 894
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0873677298426628,
"min": 0.07212473005056382,
"max": 0.12342467854420344,
"count": 894
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0873677298426628,
"min": 0.07212473005056382,
"max": 0.12342467854420344,
"count": 894
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 894
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 894
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 894
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 894
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 894
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 894
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1733752631",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/zhangsz/anaconda3/envs/deep_rl_course/bin/mlagents-learn ./SoccerTwos.yaml --env=../ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos-debug --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1733779195"
},
"total": 26564.170061824,
"count": 1,
"self": 5.735672844002693,
"children": {
"run_training.setup": {
"total": 0.016341089999997394,
"count": 1,
"self": 0.016341089999997394
},
"TrainerController.start_learning": {
"total": 26558.418047889998,
"count": 1,
"self": 16.27938580256523,
"children": {
"TrainerController._reset_env": {
"total": 2.4120728419996027,
"count": 38,
"self": 2.4120728419996027
},
"TrainerController.advance": {
"total": 26539.586466513432,
"count": 1267380,
"self": 14.695409102725534,
"children": {
"env_step": {
"total": 20610.8784505561,
"count": 1267380,
"self": 13519.42253980608,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7080.059960516522,
"count": 1267380,
"self": 98.50023091266303,
"children": {
"TorchPolicy.evaluate": {
"total": 6981.559729603859,
"count": 2308482,
"self": 6981.559729603859
}
}
},
"workers": {
"total": 11.395950233499804,
"count": 1267380,
"self": 0.0,
"children": {
"worker_root": {
"total": 26523.549686335948,
"count": 1267380,
"is_parallel": true,
"self": 14694.419936251274,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017620039999926007,
"count": 2,
"is_parallel": true,
"self": 0.0007527659999979619,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010092379999946388,
"count": 8,
"is_parallel": true,
"self": 0.0010092379999946388
}
}
},
"UnityEnvironment.step": {
"total": 0.017917139000005022,
"count": 1,
"is_parallel": true,
"self": 0.00029563500001472676,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002221390000016754,
"count": 1,
"is_parallel": true,
"self": 0.0002221390000016754
},
"communicator.exchange": {
"total": 0.016483568999994702,
"count": 1,
"is_parallel": true,
"self": 0.016483568999994702
},
"steps_from_proto": {
"total": 0.000915795999993918,
"count": 2,
"is_parallel": true,
"self": 0.00022799099998849215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006878050000054259,
"count": 8,
"is_parallel": true,
"self": 0.0006878050000054259
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.0303977620057978,
"count": 74,
"is_parallel": true,
"self": 0.006590180996589368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.023807581009208434,
"count": 296,
"is_parallel": true,
"self": 0.023807581009208434
}
}
},
"UnityEnvironment.step": {
"total": 11829.099352322668,
"count": 1267379,
"is_parallel": true,
"self": 328.7603502342481,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 203.86634577828423,
"count": 1267379,
"is_parallel": true,
"self": 203.86634577828423
},
"communicator.exchange": {
"total": 10340.955320573892,
"count": 1267379,
"is_parallel": true,
"self": 10340.955320573892
},
"steps_from_proto": {
"total": 955.517335736244,
"count": 2534758,
"is_parallel": true,
"self": 207.19493583769452,
"children": {
"_process_rank_one_or_two_observation": {
"total": 748.3223998985495,
"count": 10139032,
"is_parallel": true,
"self": 748.3223998985495
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5914.012606854603,
"count": 1267380,
"self": 122.59324357371406,
"children": {
"process_trajectory": {
"total": 3067.5393345179223,
"count": 1267380,
"self": 3062.31642538492,
"children": {
"RLTrainer._checkpoint": {
"total": 5.222909133002304,
"count": 37,
"self": 5.222909133002304
}
}
},
"_update_policy": {
"total": 2723.8800287629674,
"count": 894,
"self": 1398.6412164982567,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1325.2388122647108,
"count": 26820,
"self": 1325.2388122647108
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.6800050768069923e-07,
"count": 1,
"self": 4.6800050768069923e-07
},
"TrainerController._save_models": {
"total": 0.1401222640015476,
"count": 1,
"self": 0.0008723770006326959,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1392498870009149,
"count": 1,
"self": 0.1392498870009149
}
}
}
}
}
}
}