poca-SoccerTwos / run_logs /timers.json
Iamvincent's picture
First Push`
11cb15b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.0664291381835938,
"min": 1.6211071014404297,
"max": 2.1140940189361572,
"count": 138
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 41460.8359375,
"min": 25437.884765625,
"max": 46805.5625,
"count": 138
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 59.18072289156626,
"min": 48.633663366336634,
"max": 84.25,
"count": 138
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19648.0,
"min": 13268.0,
"max": 21008.0,
"count": 138
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1361.1027575669505,
"min": 1332.2418060752768,
"max": 1381.5848843896038,
"count": 138
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 225943.0577561138,
"min": 157443.04723610973,
"max": 274144.9441959259,
"count": 138
},
"SoccerTwos.Step.mean": {
"value": 9999992.0,
"min": 8629944.0,
"max": 9999992.0,
"count": 138
},
"SoccerTwos.Step.sum": {
"value": 9999992.0,
"min": 8629944.0,
"max": 9999992.0,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.009169833734631538,
"min": -0.11076157540082932,
"max": 0.05555163696408272,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.5313621759414673,
"min": -18.71530532836914,
"max": 10.888120651245117,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.008346023969352245,
"min": -0.11172157526016235,
"max": 0.05500708892941475,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.393786072731018,
"min": -18.769224166870117,
"max": 10.781389236450195,
"count": 138
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 138
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.10444311360399167,
"min": -0.2846628572259631,
"max": 0.3103567264233416,
"count": 138
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 17.441999971866608,
"min": -43.6040004491806,
"max": 53.07100021839142,
"count": 138
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.10444311360399167,
"min": -0.2846628572259631,
"max": 0.3103567264233416,
"count": 138
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 17.441999971866608,
"min": -43.6040004491806,
"max": 53.07100021839142,
"count": 138
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 138
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 138
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01308733964106068,
"min": 0.01295044506744792,
"max": 0.023534431812974314,
"count": 66
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01308733964106068,
"min": 0.01295044506744792,
"max": 0.023534431812974314,
"count": 66
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.17252361327409743,
"min": 0.14690864831209183,
"max": 0.19969933728377023,
"count": 66
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.17252361327409743,
"min": 0.14690864831209183,
"max": 0.19969933728377023,
"count": 66
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.17274378736813864,
"min": 0.1474159265557925,
"max": 0.19963415463765463,
"count": 66
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.17274378736813864,
"min": 0.1474159265557925,
"max": 0.19963415463765463,
"count": 66
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 2.9999999999999994e-05,
"min": 2.9999999999999994e-05,
"max": 2.9999999999999994e-05,
"count": 66
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 2.9999999999999994e-05,
"min": 2.9999999999999994e-05,
"max": 2.9999999999999994e-05,
"count": 66
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 66
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 66
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 66
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675371904",
"python_version": "3.8.16 (default, Jan 17 2023, 22:25:28) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "D:\\anaconda3\\envs\\hf_rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1675376841"
},
"total": 4937.0180866,
"count": 1,
"self": 0.24833760000092298,
"children": {
"run_training.setup": {
"total": 0.11877829999999978,
"count": 1,
"self": 0.11877829999999978
},
"TrainerController.start_learning": {
"total": 4936.6509707,
"count": 1,
"self": 2.6368469999506488,
"children": {
"TrainerController._reset_env": {
"total": 33.563538600000435,
"count": 19,
"self": 33.563538600000435
},
"TrainerController.advance": {
"total": 4900.301972200048,
"count": 95593,
"self": 2.201746299995648,
"children": {
"env_step": {
"total": 1734.293331699992,
"count": 95593,
"self": 1373.3938976000522,
"children": {
"SubprocessEnvManager._take_step": {
"total": 359.1492142999836,
"count": 95593,
"self": 11.475502200033532,
"children": {
"TorchPolicy.evaluate": {
"total": 347.6737120999501,
"count": 173690,
"self": 347.6737120999501
}
}
},
"workers": {
"total": 1.7502197999561844,
"count": 95593,
"self": 0.0,
"children": {
"worker_root": {
"total": 4899.762037900035,
"count": 95593,
"is_parallel": true,
"self": 3840.197846800068,
"children": {
"steps_from_proto": {
"total": 0.04261309999918694,
"count": 38,
"is_parallel": true,
"self": 0.008200999998777547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.034412100000409396,
"count": 152,
"is_parallel": true,
"self": 0.034412100000409396
}
}
},
"UnityEnvironment.step": {
"total": 1059.5215779999673,
"count": 95593,
"is_parallel": true,
"self": 54.76419309988353,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 56.236806500021856,
"count": 95593,
"is_parallel": true,
"self": 56.236806500021856
},
"communicator.exchange": {
"total": 757.0124340999611,
"count": 95593,
"is_parallel": true,
"self": 757.0124340999611
},
"steps_from_proto": {
"total": 191.5081443001009,
"count": 191186,
"is_parallel": true,
"self": 37.52843190006166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 153.97971240003923,
"count": 764744,
"is_parallel": true,
"self": 153.97971240003923
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3163.80689420006,
"count": 95593,
"self": 18.614615900091394,
"children": {
"process_trajectory": {
"total": 510.7507506999694,
"count": 95593,
"self": 510.1938222999698,
"children": {
"RLTrainer._checkpoint": {
"total": 0.556928399999606,
"count": 3,
"self": 0.556928399999606
}
}
},
"_update_policy": {
"total": 2634.441527599999,
"count": 66,
"self": 325.25447209998447,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2309.1870555000146,
"count": 1980,
"self": 2309.1870555000146
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100000190490391e-06,
"count": 1,
"self": 1.100000190490391e-06
},
"TrainerController._save_models": {
"total": 0.14861180000025342,
"count": 1,
"self": 0.013107599999784725,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1355042000004687,
"count": 1,
"self": 0.1355042000004687
}
}
}
}
}
}
}