poca-SoccerTwos / run_logs /timers.json
Davide Paolini
First Push
cf1d588
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2092037200927734,
"min": 2.051318645477295,
"max": 3.2957499027252197,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36690.45703125,
"min": 24398.72265625,
"max": 136212.0625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 440.3,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 14552.0,
"max": 26372.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1209.2733784559198,
"min": 1199.9657602336108,
"max": 1217.731692958234,
"count": 114
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2418.5467569118396,
"min": 2399.9315204672216,
"max": 16927.944803220005,
"count": 114
},
"SoccerTwos.Step.mean": {
"value": 4999372.0,
"min": 9202.0,
"max": 4999372.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999372.0,
"min": 9202.0,
"max": 4999372.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 1.6234687791438773e-05,
"min": -0.030659424141049385,
"max": 0.009298032149672508,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.00016234687063843012,
"min": -0.4846147894859314,
"max": 0.10227835178375244,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.1326356798235793e-05,
"min": -0.030494512990117073,
"max": 0.008839435875415802,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.00011326356616336852,
"min": -0.49055200815200806,
"max": 0.10443681478500366,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.625,
"max": 0.27061538512890154,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -10.0,
"max": 3.51800000667572,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.625,
"max": 0.27061538512890154,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -10.0,
"max": 3.51800000667572,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012361842540364402,
"min": 0.010362317136847802,
"max": 0.02537267734587658,
"count": 229
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012361842540364402,
"min": 0.010362317136847802,
"max": 0.02537267734587658,
"count": 229
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 2.0773467982830124e-09,
"min": 7.400037268142275e-10,
"max": 0.005619573569856584,
"count": 229
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 2.0773467982830124e-09,
"min": 7.400037268142275e-10,
"max": 0.005619573569856584,
"count": 229
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 2.3037821613354953e-09,
"min": 1.2671702761165211e-09,
"max": 0.005684552737511695,
"count": 229
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 2.3037821613354953e-09,
"min": 1.2671702761165211e-09,
"max": 0.005684552737511695,
"count": 229
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 229
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 229
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 229
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 229
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 229
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 229
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684499777",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/poca/SoccerTwos.yaml --env=./ml-agents/training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684530809"
},
"total": 31032.533391075,
"count": 1,
"self": 0.7350055900024017,
"children": {
"run_training.setup": {
"total": 0.031214870000439987,
"count": 1,
"self": 0.031214870000439987
},
"TrainerController.start_learning": {
"total": 31031.767170615,
"count": 1,
"self": 14.559213626569544,
"children": {
"TrainerController._reset_env": {
"total": 9.670684499004892,
"count": 40,
"self": 9.670684499004892
},
"TrainerController.advance": {
"total": 31007.186300560425,
"count": 332808,
"self": 17.09762029835474,
"children": {
"env_step": {
"total": 12545.260505879905,
"count": 332808,
"self": 10354.12334213098,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2181.882826378487,
"count": 332808,
"self": 98.38380518860777,
"children": {
"TorchPolicy.evaluate": {
"total": 2083.4990211898794,
"count": 661106,
"self": 2083.4990211898794
}
}
},
"workers": {
"total": 9.254337370436588,
"count": 332808,
"self": 0.0,
"children": {
"worker_root": {
"total": 30960.71375693547,
"count": 332808,
"is_parallel": true,
"self": 22398.571599457577,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008940710999922885,
"count": 2,
"is_parallel": true,
"self": 0.005808138000247709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003132572999675176,
"count": 8,
"is_parallel": true,
"self": 0.003132572999675176
}
}
},
"UnityEnvironment.step": {
"total": 0.054130450999764435,
"count": 1,
"is_parallel": true,
"self": 0.0013673729999936768,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0010414919997856487,
"count": 1,
"is_parallel": true,
"self": 0.0010414919997856487
},
"communicator.exchange": {
"total": 0.04755411699989054,
"count": 1,
"is_parallel": true,
"self": 0.04755411699989054
},
"steps_from_proto": {
"total": 0.00416746900009457,
"count": 2,
"is_parallel": true,
"self": 0.0007446389990946045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0034228300009999657,
"count": 8,
"is_parallel": true,
"self": 0.0034228300009999657
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 8562.0241995759,
"count": 332807,
"is_parallel": true,
"self": 467.8820066879216,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 277.2070800379197,
"count": 332807,
"is_parallel": true,
"self": 277.2070800379197
},
"communicator.exchange": {
"total": 6364.978493571819,
"count": 332807,
"is_parallel": true,
"self": 6364.978493571819
},
"steps_from_proto": {
"total": 1451.9566192782404,
"count": 665614,
"is_parallel": true,
"self": 278.78211306334924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1173.1745062148912,
"count": 2662456,
"is_parallel": true,
"self": 1173.1745062148912
}
}
}
}
},
"steps_from_proto": {
"total": 0.11795790199266776,
"count": 78,
"is_parallel": true,
"self": 0.023725785991700832,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.09423211600096693,
"count": 312,
"is_parallel": true,
"self": 0.09423211600096693
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 18444.828174382164,
"count": 332808,
"self": 134.22638580460625,
"children": {
"process_trajectory": {
"total": 2303.1936924115444,
"count": 332808,
"self": 2299.786625567537,
"children": {
"RLTrainer._checkpoint": {
"total": 3.407066844007204,
"count": 10,
"self": 3.407066844007204
}
}
},
"_update_policy": {
"total": 16007.408096166013,
"count": 229,
"self": 1320.1081568170794,
"children": {
"TorchPOCAOptimizer.update": {
"total": 14687.299939348934,
"count": 6870,
"self": 14687.299939348934
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.252999936696142e-06,
"count": 1,
"self": 1.252999936696142e-06
},
"TrainerController._save_models": {
"total": 0.3509706760014524,
"count": 1,
"self": 0.005031965003581718,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34593871099787066,
"count": 1,
"self": 0.34593871099787066
}
}
}
}
}
}
}