yizhangliu's picture
First Push
62afd6c
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9021496772766113,
"min": 1.8278286457061768,
"max": 2.0336482524871826,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 39808.1875,
"min": 31856.9765625,
"max": 49968.15234375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.15909090909091,
"min": 44.46363636363636,
"max": 103.45833333333333,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19768.0,
"min": 18588.0,
"max": 20676.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1555.385077202667,
"min": 1494.539175487284,
"max": 1576.1815326925348,
"count": 500
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 273747.77358766936,
"min": 148491.1755175332,
"max": 336654.3063271615,
"count": 500
},
"SoccerTwos.Step.mean": {
"value": 24999978.0,
"min": 20009991.0,
"max": 24999978.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 24999978.0,
"min": 20009991.0,
"max": 24999978.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01763874664902687,
"min": -0.10776752978563309,
"max": 0.07479347288608551,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.086780548095703,
"min": -17.547164916992188,
"max": 12.565303802490234,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01813901588320732,
"min": -0.11023017019033432,
"max": 0.0698384940624237,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.174327850341797,
"min": -18.204742431640625,
"max": 11.732867240905762,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0786628542627607,
"min": -0.34376484986507533,
"max": 0.4665142857298559,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 13.765999495983124,
"min": -59.32819986343384,
"max": 47.49799942970276,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0786628542627607,
"min": -0.34376484986507533,
"max": 0.4665142857298559,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 13.765999495983124,
"min": -59.32819986343384,
"max": 47.49799942970276,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01652876395867982,
"min": 0.013891844534009578,
"max": 0.023010420791979413,
"count": 121
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01652876395867982,
"min": 0.013891844534009578,
"max": 0.023010420791979413,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10607224243382612,
"min": 0.08422205423315367,
"max": 0.1170943199346463,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10607224243382612,
"min": 0.08422205423315367,
"max": 0.1170943199346463,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10775665591160456,
"min": 0.08507357910275459,
"max": 0.11834435512622198,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10775665591160456,
"min": 0.08507357910275459,
"max": 0.11834435512622198,
"count": 121
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00010000000000000003,
"min": 0.00010000000000000003,
"max": 0.00010000000000000003,
"count": 121
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00010000000000000003,
"min": 0.00010000000000000003,
"max": 0.00010000000000000003,
"count": 121
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999993,
"min": 0.19999999999999993,
"max": 0.19999999999999993,
"count": 121
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999993,
"min": 0.19999999999999993,
"max": 0.19999999999999993,
"count": 121
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 121
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 121
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675995123",
"python_version": "3.9.16 (main, Feb 6 2023, 20:06:40) \n[GCC 9.3.0]",
"command_line_arguments": "/usr/bin/mlagents-learn ./config/poca/SoccerTwos_v4.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1676016142"
},
"total": 21019.160699600005,
"count": 1,
"self": 0.4239419000223279,
"children": {
"run_training.setup": {
"total": 0.02791030000662431,
"count": 1,
"self": 0.02791030000662431
},
"TrainerController.start_learning": {
"total": 21018.708847399976,
"count": 1,
"self": 12.252090293797664,
"children": {
"TrainerController._reset_env": {
"total": 1.578472400084138,
"count": 21,
"self": 1.578472400084138
},
"TrainerController.advance": {
"total": 21004.07154440612,
"count": 346217,
"self": 13.339027916430496,
"children": {
"env_step": {
"total": 11195.647434503247,
"count": 346217,
"self": 9608.978213496332,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1579.37136202713,
"count": 346217,
"self": 65.21483358863043,
"children": {
"TorchPolicy.evaluate": {
"total": 1514.1565284384997,
"count": 627082,
"self": 1514.1565284384997
}
}
},
"workers": {
"total": 7.297858979785815,
"count": 346217,
"self": 0.0,
"children": {
"worker_root": {
"total": 20991.833144892298,
"count": 346217,
"is_parallel": true,
"self": 12627.482296909031,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004215100023429841,
"count": 2,
"is_parallel": true,
"self": 0.0011841999948956072,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030309000285342336,
"count": 8,
"is_parallel": true,
"self": 0.0030309000285342336
}
}
},
"UnityEnvironment.step": {
"total": 0.034806600015144795,
"count": 1,
"is_parallel": true,
"self": 0.0005694999708794057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045980000868439674,
"count": 1,
"is_parallel": true,
"self": 0.00045980000868439674
},
"communicator.exchange": {
"total": 0.032166399993002415,
"count": 1,
"is_parallel": true,
"self": 0.032166399993002415
},
"steps_from_proto": {
"total": 0.001610900042578578,
"count": 2,
"is_parallel": true,
"self": 0.00036669994005933404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001244200102519244,
"count": 8,
"is_parallel": true,
"self": 0.001244200102519244
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.04493440012447536,
"count": 40,
"is_parallel": true,
"self": 0.009977000067010522,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03495740005746484,
"count": 160,
"is_parallel": true,
"self": 0.03495740005746484
}
}
},
"UnityEnvironment.step": {
"total": 8364.305913583143,
"count": 346216,
"is_parallel": true,
"self": 273.51644306909293,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 217.13113000715384,
"count": 346216,
"is_parallel": true,
"self": 217.13113000715384
},
"communicator.exchange": {
"total": 7046.747987709532,
"count": 346216,
"is_parallel": true,
"self": 7046.747987709532
},
"steps_from_proto": {
"total": 826.9103527973639,
"count": 692432,
"is_parallel": true,
"self": 182.80608186731115,
"children": {
"_process_rank_one_or_two_observation": {
"total": 644.1042709300527,
"count": 2769728,
"is_parallel": true,
"self": 644.1042709300527
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9795.085081986443,
"count": 346217,
"self": 69.57300218037562,
"children": {
"process_trajectory": {
"total": 1763.8578488061903,
"count": 346217,
"self": 1761.022810106224,
"children": {
"RLTrainer._checkpoint": {
"total": 2.8350386999663897,
"count": 10,
"self": 2.8350386999663897
}
}
},
"_update_policy": {
"total": 7961.654230999877,
"count": 121,
"self": 920.4508470981964,
"children": {
"TorchPOCAOptimizer.update": {
"total": 7041.20338390168,
"count": 7260,
"self": 7041.20338390168
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.4999608285725117e-06,
"count": 1,
"self": 2.4999608285725117e-06
},
"TrainerController._save_models": {
"total": 0.8067378000123426,
"count": 1,
"self": 0.00272019999101758,
"children": {
"RLTrainer._checkpoint": {
"total": 0.804017600021325,
"count": 1,
"self": 0.804017600021325
}
}
}
}
}
}
}