poca-SoccerTwos / run_logs /timers.json
JoBuettner's picture
First Push
0b736ca
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.954332947731018,
"min": 1.3264840841293335,
"max": 3.295761823654175,
"count": 1863
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37022.8828125,
"min": 13968.203125,
"max": 119658.234375,
"count": 1863
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 407.6666666666667,
"max": 999.0,
"count": 1863
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 4120.0,
"max": 29364.0,
"count": 1863
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1193.01010444665,
"min": 1183.7567493884217,
"max": 1211.300011561075,
"count": 221
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2386.0202088933,
"min": 2368.8649997466773,
"max": 14529.666804347418,
"count": 221
},
"SoccerTwos.Step.mean": {
"value": 18629702.0,
"min": 9616.0,
"max": 18629702.0,
"count": 1863
},
"SoccerTwos.Step.sum": {
"value": 18629702.0,
"min": 9616.0,
"max": 18629702.0,
"count": 1863
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -1.111352389671083e-06,
"min": -0.044144924730062485,
"max": 0.018055004999041557,
"count": 1863
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.1113524124084506e-05,
"min": -0.662173867225647,
"max": 0.25595420598983765,
"count": 1863
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -3.35992376676586e-06,
"min": -0.04418877512216568,
"max": 0.01754818670451641,
"count": 1863
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.359923721291125e-05,
"min": -0.6628316044807434,
"max": 0.2481795847415924,
"count": 1863
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1863
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1863
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5,
"max": 0.3054000011512211,
"count": 1863
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 4.275600016117096,
"count": 1863
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5,
"max": 0.3054000011512211,
"count": 1863
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 4.275600016117096,
"count": 1863
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1863
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1863
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01567243464984737,
"min": 0.010860774454098039,
"max": 0.027968196836688244,
"count": 851
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01567243464984737,
"min": 0.010860774454098039,
"max": 0.027968196836688244,
"count": 851
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 3.425246914804762e-11,
"min": 2.3595142737790433e-11,
"max": 0.005436938970039288,
"count": 851
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 3.425246914804762e-11,
"min": 2.3595142737790433e-11,
"max": 0.005436938970039288,
"count": 851
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 4.56346413204208e-11,
"min": 3.366520314832903e-11,
"max": 0.005439961049705744,
"count": 851
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 4.56346413204208e-11,
"min": 3.366520314832903e-11,
"max": 0.005439961049705744,
"count": 851
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 851
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 851
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 851
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 851
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 851
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 851
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684099442",
"python_version": "3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\johbu\\AppData\\Local\\Programs\\Python\\Python37\\Scripts\\mlagents-learn .\\config\\poca\\SoccerTwos.yaml --env=.\\training-envs-executables\\SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.6",
"end_time_seconds": "1684141538"
},
"total": 42095.9810893,
"count": 1,
"self": 0.40295310000510653,
"children": {
"run_training.setup": {
"total": 0.07414120000000013,
"count": 1,
"self": 0.07414120000000013
},
"TrainerController.start_learning": {
"total": 42095.503995,
"count": 1,
"self": 14.704631397704361,
"children": {
"TrainerController._reset_env": {
"total": 8.012174899989207,
"count": 93,
"self": 8.012174899989207
},
"TrainerController.advance": {
"total": 42072.67401290231,
"count": 1217474,
"self": 15.07942959882348,
"children": {
"env_step": {
"total": 13986.828992203496,
"count": 1217474,
"self": 11024.74603840562,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2952.9800428989524,
"count": 1217474,
"self": 85.42414060606143,
"children": {
"TorchPolicy.evaluate": {
"total": 2867.555902292891,
"count": 2417620,
"self": 391.39378209354527,
"children": {
"TorchPolicy.sample_actions": {
"total": 2476.1621201993457,
"count": 2417620,
"self": 2476.1621201993457
}
}
}
}
},
"workers": {
"total": 9.102910898924563,
"count": 1217474,
"self": 0.0,
"children": {
"worker_root": {
"total": 42069.48677810002,
"count": 1217474,
"is_parallel": true,
"self": 33182.724791397326,
"children": {
"steps_from_proto": {
"total": 0.13034849997127318,
"count": 186,
"is_parallel": true,
"self": 0.029093799915881124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.10125470005539206,
"count": 744,
"is_parallel": true,
"self": 0.10125470005539206
}
}
},
"UnityEnvironment.step": {
"total": 8886.63163820272,
"count": 1217474,
"is_parallel": true,
"self": 504.76745889585254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 474.3629797996917,
"count": 1217474,
"is_parallel": true,
"self": 474.3629797996917
},
"communicator.exchange": {
"total": 6278.15898090307,
"count": 1217474,
"is_parallel": true,
"self": 6278.15898090307
},
"steps_from_proto": {
"total": 1629.3422186041057,
"count": 2434948,
"is_parallel": true,
"self": 348.5505115046576,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1280.791707099448,
"count": 9739792,
"is_parallel": true,
"self": 1280.791707099448
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 28070.76559109999,
"count": 1217474,
"self": 157.3080017994107,
"children": {
"process_trajectory": {
"total": 3583.9044986006,
"count": 1217474,
"self": 3580.662047500574,
"children": {
"RLTrainer._checkpoint": {
"total": 3.2424511000260736,
"count": 37,
"self": 3.2424511000260736
}
}
},
"_update_policy": {
"total": 24329.553090699977,
"count": 851,
"self": 1843.207980099789,
"children": {
"TorchPOCAOptimizer.update": {
"total": 22486.34511060019,
"count": 25542,
"self": 22486.34511060019
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.999988156370819e-07,
"count": 1,
"self": 7.999988156370819e-07
},
"TrainerController._save_models": {
"total": 0.11317499999859137,
"count": 1,
"self": 0.029603699993458577,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0835713000051328,
"count": 1,
"self": 0.0835713000051328
}
}
}
}
}
}
}