poca-SoccerTwos / run_logs /timers.json
brand25's picture
First Push
06a919d
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.822566032409668,
"min": 1.7924662828445435,
"max": 3.295607328414917,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35809.77734375,
"min": 22087.16796875,
"max": 118000.0625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 60.7125,
"min": 41.279661016949156,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19428.0,
"min": 11916.0,
"max": 28128.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1566.5653456806922,
"min": 1193.855767723958,
"max": 1578.835491219127,
"count": 488
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 250650.45530891075,
"min": 2390.585739003281,
"max": 347165.07309874066,
"count": 488
},
"SoccerTwos.Step.mean": {
"value": 4999990.0,
"min": 9082.0,
"max": 4999990.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999990.0,
"min": 9082.0,
"max": 4999990.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.010300255380570889,
"min": -0.10910031944513321,
"max": 0.17050983011722565,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.6480408906936646,
"min": -20.40176010131836,
"max": 25.61324691772461,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.009193485602736473,
"min": -0.10899746417999268,
"max": 0.16702477633953094,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.4709577560424805,
"min": -20.382526397705078,
"max": 25.9049072265625,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09465125054121018,
"min": -0.5777411741368911,
"max": 0.51398621756455,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -15.144200086593628,
"min": -57.36560010910034,
"max": 62.688000202178955,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09465125054121018,
"min": -0.5777411741368911,
"max": 0.51398621756455,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -15.144200086593628,
"min": -57.36560010910034,
"max": 62.688000202178955,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012749818619340658,
"min": 0.011668509912366669,
"max": 0.022843901254236697,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012749818619340658,
"min": 0.011668509912366669,
"max": 0.022843901254236697,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09577699949344,
"min": 6.519637172459624e-05,
"max": 0.11873191570242246,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09577699949344,
"min": 6.519637172459624e-05,
"max": 0.11873191570242246,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09710122446219126,
"min": 6.327543972777979e-05,
"max": 0.12073325614134471,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09710122446219126,
"min": 6.327543972777979e-05,
"max": 0.12073325614134471,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680246955",
"python_version": "3.9.16 (main, Mar 1 2023, 18:22:10) \n[GCC 11.2.0]",
"command_line_arguments": "ml-agents/mlagents/trainers/learn.py ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680257145"
},
"total": 10189.94834217,
"count": 1,
"self": 0.44507462399997166,
"children": {
"run_training.setup": {
"total": 0.05366480899999715,
"count": 1,
"self": 0.05366480899999715
},
"TrainerController.start_learning": {
"total": 10189.449602737,
"count": 1,
"self": 7.733716013133744,
"children": {
"TrainerController._reset_env": {
"total": 4.038401389997603,
"count": 25,
"self": 4.038401389997603
},
"TrainerController.advance": {
"total": 10177.428206385868,
"count": 341700,
"self": 8.054457054160594,
"children": {
"env_step": {
"total": 7882.810816224602,
"count": 341700,
"self": 6405.537656605702,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1472.6588848649405,
"count": 341700,
"self": 50.20280604509912,
"children": {
"TorchPolicy.evaluate": {
"total": 1422.4560788198414,
"count": 630824,
"self": 1422.4560788198414
}
}
},
"workers": {
"total": 4.614274753958853,
"count": 341700,
"self": 0.0,
"children": {
"worker_root": {
"total": 10169.498118034508,
"count": 341700,
"is_parallel": true,
"self": 4725.862021528441,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003004491999945458,
"count": 2,
"is_parallel": true,
"self": 0.0008788069998217907,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002125685000123667,
"count": 8,
"is_parallel": true,
"self": 0.002125685000123667
}
}
},
"UnityEnvironment.step": {
"total": 0.04222155900004054,
"count": 1,
"is_parallel": true,
"self": 0.0012354030001233696,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008995069999855332,
"count": 1,
"is_parallel": true,
"self": 0.0008995069999855332
},
"communicator.exchange": {
"total": 0.03673596299995552,
"count": 1,
"is_parallel": true,
"self": 0.03673596299995552
},
"steps_from_proto": {
"total": 0.003350685999976122,
"count": 2,
"is_parallel": true,
"self": 0.000635942000144496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002714743999831626,
"count": 8,
"is_parallel": true,
"self": 0.002714743999831626
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5443.591706503065,
"count": 341699,
"is_parallel": true,
"self": 357.7160711808456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 249.56928493024895,
"count": 341699,
"is_parallel": true,
"self": 249.56928493024895
},
"communicator.exchange": {
"total": 3801.1887028055644,
"count": 341699,
"is_parallel": true,
"self": 3801.1887028055644
},
"steps_from_proto": {
"total": 1035.1176475864063,
"count": 683398,
"is_parallel": true,
"self": 197.87877202872846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 837.2388755576778,
"count": 2733592,
"is_parallel": true,
"self": 837.2388755576778
}
}
}
}
},
"steps_from_proto": {
"total": 0.044390003000899014,
"count": 48,
"is_parallel": true,
"self": 0.009421320011597345,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03496868298930167,
"count": 192,
"is_parallel": true,
"self": 0.03496868298930167
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2286.5629331071054,
"count": 341700,
"self": 59.07281249302605,
"children": {
"process_trajectory": {
"total": 845.6958294960875,
"count": 341700,
"self": 843.1716562320873,
"children": {
"RLTrainer._checkpoint": {
"total": 2.524173264000183,
"count": 10,
"self": 2.524173264000183
}
}
},
"_update_policy": {
"total": 1381.7942911179916,
"count": 240,
"self": 935.5972647249785,
"children": {
"TorchPOCAOptimizer.update": {
"total": 446.1970263930132,
"count": 7209,
"self": 446.1970263930132
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3810004020342603e-06,
"count": 1,
"self": 1.3810004020342603e-06
},
"TrainerController._save_models": {
"total": 0.2492775669998082,
"count": 1,
"self": 0.0019719709998753387,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24730559599993285,
"count": 1,
"self": 0.24730559599993285
}
}
}
}
}
}
}