poca-SoccerTwos / run_logs /timers.json
maidh's picture
First Push
3e426cb
raw
history blame
15.5 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.276331901550293,
"min": 3.2658324241638184,
"max": 3.2957749366760254,
"count": 8650
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 10484.26171875,
"min": 104.81745910644531,
"max": 15789.125,
"count": 8650
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 158.5,
"min": 0.0,
"max": 999.0,
"count": 2652
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 1268.0,
"min": 0.0,
"max": 19980.0,
"count": 2652
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1164.2205715235966,
"min": 1159.2077927993414,
"max": 1204.7435239037027,
"count": 1051
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2328.4411430471932,
"min": 2318.885447365601,
"max": 7116.464726252681,
"count": 1051
},
"SoccerTwos.Step.mean": {
"value": 4999966.0,
"min": 458.0,
"max": 4999966.0,
"count": 10000
},
"SoccerTwos.Step.sum": {
"value": 4999966.0,
"min": 458.0,
"max": 4999966.0,
"count": 10000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.017738109454512596,
"min": -0.30164986848831177,
"max": 0.03233775869011879,
"count": 10000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.08869054913520813,
"min": -1.8090074062347412,
"max": 0.19402655959129333,
"count": 10000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01838485710322857,
"min": -0.30160006880760193,
"max": 0.033794302493333817,
"count": 10000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.091924287378788,
"min": -1.8077348470687866,
"max": 0.19572553038597107,
"count": 10000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2987
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2987
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.5,
"min": -1.0,
"max": 0.9711999893188477,
"count": 2987
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.0,
"min": -4.0,
"max": 4.129000008106232,
"count": 2987
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.5,
"min": -1.0,
"max": 0.9711999893188477,
"count": 2987
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.0,
"min": -4.0,
"max": 4.129000008106232,
"count": 2987
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.07197124875065955,
"min": 0.016312858080849625,
"max": 0.09011630510742014,
"count": 1783
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.07197124875065955,
"min": 0.016312858080849625,
"max": 0.09011630510742014,
"count": 1783
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 2.1567379421453964e-05,
"min": 2.608480258459167e-06,
"max": 0.02774455361068249,
"count": 1783
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 2.1567379421453964e-05,
"min": 2.608480258459167e-06,
"max": 0.02774455361068249,
"count": 1783
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 1.626949779165443e-05,
"min": 2.6999044848707854e-06,
"max": 0.027782930806279183,
"count": 1783
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 1.626949779165443e-05,
"min": 2.6999044848707854e-06,
"max": 0.027782930806279183,
"count": 1783
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 2.5e-05,
"min": 2.5e-05,
"max": 2.5000000000000005e-05,
"count": 1783
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 2.5e-05,
"min": 2.5e-05,
"max": 2.5000000000000005e-05,
"count": 1783
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2,
"min": 0.2,
"max": 0.20000000000000007,
"count": 1783
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2,
"min": 0.2,
"max": 0.20000000000000007,
"count": 1783
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.005,
"count": 1783
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.005,
"count": 1783
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687852430",
"python_version": "3.10.2 (tags/v3.10.2:a58ebcc, Jan 17 2022, 14:12:15) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\MaiDo\\AppData\\Local\\Programs\\Python\\Python310\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.12.1+cpu",
"numpy_version": "1.23.4",
"end_time_seconds": "1687861892"
},
"total": 9462.54121310002,
"count": 1,
"self": 0.36523850000230595,
"children": {
"run_training.setup": {
"total": 0.10421369998948649,
"count": 1,
"self": 0.10421369998948649
},
"TrainerController.start_learning": {
"total": 9462.071760900028,
"count": 1,
"self": 9.755972094717436,
"children": {
"TrainerController._reset_env": {
"total": 9.259877300064545,
"count": 200,
"self": 9.259877300064545
},
"TrainerController.advance": {
"total": 9442.898137405224,
"count": 323130,
"self": 9.681490987772122,
"children": {
"env_step": {
"total": 7450.867933301255,
"count": 323130,
"self": 6143.621739796363,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1300.730649602483,
"count": 323130,
"self": 55.70470830902923,
"children": {
"TorchPolicy.evaluate": {
"total": 1245.0259412934538,
"count": 641272,
"self": 1245.0259412934538
}
}
},
"workers": {
"total": 6.515543902409263,
"count": 323130,
"self": 0.0,
"children": {
"worker_root": {
"total": 9444.722208496823,
"count": 323130,
"is_parallel": true,
"self": 4412.559179384203,
"children": {
"steps_from_proto": {
"total": 0.44778919965028763,
"count": 400,
"is_parallel": true,
"self": 0.0883305004099384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.35945869924034923,
"count": 1600,
"is_parallel": true,
"self": 0.35945869924034923
}
}
},
"UnityEnvironment.step": {
"total": 5031.71523991297,
"count": 323130,
"is_parallel": true,
"self": 244.5766650448204,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 184.72282387921587,
"count": 323130,
"is_parallel": true,
"self": 184.72282387921587
},
"communicator.exchange": {
"total": 3853.434067182534,
"count": 323130,
"is_parallel": true,
"self": 3853.434067182534
},
"steps_from_proto": {
"total": 748.9816838063998,
"count": 646260,
"is_parallel": true,
"self": 142.99139347008895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 605.9902903363109,
"count": 2585040,
"is_parallel": true,
"self": 605.9902903363109
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1982.3487131161964,
"count": 323130,
"self": 75.70392960589379,
"children": {
"process_trajectory": {
"total": 1011.022233310272,
"count": 323130,
"self": 1008.8699188103201,
"children": {
"RLTrainer._checkpoint": {
"total": 2.152314499951899,
"count": 10,
"self": 2.152314499951899
}
}
},
"_update_policy": {
"total": 895.6225502000307,
"count": 1783,
"self": 307.28713700175285,
"children": {
"TorchPOCAOptimizer.update": {
"total": 588.3354131982778,
"count": 18880,
"self": 588.3354131982778
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.00006853044033e-07,
"count": 1,
"self": 9.00006853044033e-07
},
"TrainerController._save_models": {
"total": 0.15777320001507178,
"count": 1,
"self": 0.004239100031554699,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15353409998351708,
"count": 1,
"self": 0.15353409998351708
}
}
}
}
}
}
}