poca-SoccerTwos / run_logs /timers.json
dbailleul's picture
First Push
c9b2c1c
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2512972354888916,
"min": 2.2299022674560547,
"max": 3.2957186698913574,
"count": 562
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 45602.27734375,
"min": 210.48744201660156,
"max": 117762.34375,
"count": 562
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 51.55789473684211,
"min": 43.482142857142854,
"max": 999.0,
"count": 562
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19592.0,
"min": 3112.0,
"max": 31968.0,
"count": 562
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1431.894787408498,
"min": 1177.37739805464,
"max": 1441.8620239325535,
"count": 449
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 272060.0096076146,
"min": 2354.75479610928,
"max": 306835.3105804471,
"count": 449
},
"SoccerTwos.Step.mean": {
"value": 5619995.0,
"min": 9840.0,
"max": 5619995.0,
"count": 562
},
"SoccerTwos.Step.sum": {
"value": 5619995.0,
"min": 9840.0,
"max": 5619995.0,
"count": 562
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.008535241708159447,
"min": -0.07643411308526993,
"max": 0.1327708512544632,
"count": 562
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.6216959953308105,
"min": -12.117111206054688,
"max": 25.359233856201172,
"count": 562
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.006365294102579355,
"min": -0.07697104662656784,
"max": 0.13355323672294617,
"count": 562
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.2094058990478516,
"min": -12.854165077209473,
"max": 25.508668899536133,
"count": 562
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 562
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 562
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.06011052790441011,
"min": -0.5714285714285714,
"max": 0.4621249958872795,
"count": 562
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 11.421000301837921,
"min": -40.99080008268356,
"max": 45.73100024461746,
"count": 562
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.06011052790441011,
"min": -0.5714285714285714,
"max": 0.4621249958872795,
"count": 562
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 11.421000301837921,
"min": -40.99080008268356,
"max": 45.73100024461746,
"count": 562
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 562
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 562
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018468388903420417,
"min": 0.010478435871967425,
"max": 0.024917255611702178,
"count": 264
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018468388903420417,
"min": 0.010478435871967425,
"max": 0.024917255611702178,
"count": 264
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08638501664002736,
"min": 4.068811290854531e-08,
"max": 0.1062912255525589,
"count": 264
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08638501664002736,
"min": 4.068811290854531e-08,
"max": 0.1062912255525589,
"count": 264
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08870537256201108,
"min": 4.0923209236135943e-08,
"max": 0.10939665660262107,
"count": 264
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08870537256201108,
"min": 4.0923209236135943e-08,
"max": 0.10939665660262107,
"count": 264
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 264
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 264
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 264
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 264
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 264
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 264
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702049216",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/home/dbailleu/.local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.22.3",
"end_time_seconds": "1702070490"
},
"total": 21274.4086999,
"count": 1,
"self": 0.16768681899702642,
"children": {
"run_training.setup": {
"total": 0.01036884600034682,
"count": 1,
"self": 0.01036884600034682
},
"TrainerController.start_learning": {
"total": 21274.230644235,
"count": 1,
"self": 7.4248600445753254,
"children": {
"TrainerController._reset_env": {
"total": 2.4546099879880785,
"count": 29,
"self": 2.4546099879880785
},
"TrainerController.advance": {
"total": 21264.258678789443,
"count": 372560,
"self": 9.100830649873387,
"children": {
"env_step": {
"total": 8574.15538607199,
"count": 372560,
"self": 7400.381202805122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1168.4869949782988,
"count": 372560,
"self": 51.36865540101644,
"children": {
"TorchPolicy.evaluate": {
"total": 1117.1183395772823,
"count": 717920,
"self": 1117.1183395772823
}
}
},
"workers": {
"total": 5.287188288570178,
"count": 372560,
"self": 0.0,
"children": {
"worker_root": {
"total": 21231.55430940061,
"count": 372560,
"is_parallel": true,
"self": 14756.326627073799,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002743323999311542,
"count": 2,
"is_parallel": true,
"self": 0.0005649750019074418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021783489974041004,
"count": 8,
"is_parallel": true,
"self": 0.0021783489974041004
}
}
},
"UnityEnvironment.step": {
"total": 0.029804314999637427,
"count": 1,
"is_parallel": true,
"self": 0.00036474899388849735,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009311610010627192,
"count": 1,
"is_parallel": true,
"self": 0.0009311610010627192
},
"communicator.exchange": {
"total": 0.027411934002884664,
"count": 1,
"is_parallel": true,
"self": 0.027411934002884664
},
"steps_from_proto": {
"total": 0.0010964710018015467,
"count": 2,
"is_parallel": true,
"self": 0.00023595700622536242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008605139955761842,
"count": 8,
"is_parallel": true,
"self": 0.0008605139955761842
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6475.1886665398015,
"count": 372559,
"is_parallel": true,
"self": 176.72478310902443,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 253.02448767446913,
"count": 372559,
"is_parallel": true,
"self": 253.02448767446913
},
"communicator.exchange": {
"total": 5509.755959868915,
"count": 372559,
"is_parallel": true,
"self": 5509.755959868915
},
"steps_from_proto": {
"total": 535.683435887393,
"count": 745118,
"is_parallel": true,
"self": 113.83367876522607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 421.8497571221669,
"count": 2980472,
"is_parallel": true,
"self": 421.8497571221669
}
}
}
}
},
"steps_from_proto": {
"total": 0.03901578700970276,
"count": 56,
"is_parallel": true,
"self": 0.00824886504051392,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.030766921969188843,
"count": 224,
"is_parallel": true,
"self": 0.030766921969188843
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 12681.00246206758,
"count": 372560,
"self": 65.80724746843407,
"children": {
"process_trajectory": {
"total": 1194.1847808191087,
"count": 372560,
"self": 1193.088690232118,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0960905869906128,
"count": 11,
"self": 1.0960905869906128
}
}
},
"_update_policy": {
"total": 11421.010433780037,
"count": 265,
"self": 551.5973732608181,
"children": {
"TorchPOCAOptimizer.update": {
"total": 10869.413060519219,
"count": 7958,
"self": 10869.413060519219
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1609954526647925e-06,
"count": 1,
"self": 1.1609954526647925e-06
},
"TrainerController._save_models": {
"total": 0.0924942519995966,
"count": 1,
"self": 0.0024630129992146976,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0900312390003819,
"count": 1,
"self": 0.0900312390003819
}
}
}
}
}
}
}