poca-SoccerTwos / run_logs /timers.json
jovisaib's picture
First Push
cf6f9e6
raw
history blame
15.5 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8880484104156494,
"min": 1.8722083568572998,
"max": 3.295706272125244,
"count": 658
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37640.1328125,
"min": 16156.5458984375,
"max": 129450.4296875,
"count": 658
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 51.350515463917525,
"min": 43.8018018018018,
"max": 999.0,
"count": 658
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19924.0,
"min": 12552.0,
"max": 27672.0,
"count": 658
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1569.4211476285388,
"min": 1193.541211126307,
"max": 1603.5013753562646,
"count": 629
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 304467.7026399365,
"min": 2393.0391837766865,
"max": 346934.84204839927,
"count": 629
},
"SoccerTwos.Step.mean": {
"value": 6579974.0,
"min": 9852.0,
"max": 6579974.0,
"count": 658
},
"SoccerTwos.Step.sum": {
"value": 6579974.0,
"min": 9852.0,
"max": 6579974.0,
"count": 658
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.00906392838805914,
"min": -0.10840822756290436,
"max": 0.1630936861038208,
"count": 658
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.749338150024414,
"min": -22.00687026977539,
"max": 32.29254913330078,
"count": 658
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.009686826728284359,
"min": -0.10873053967952728,
"max": 0.16325582563877106,
"count": 658
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.869557499885559,
"min": -22.07229995727539,
"max": 32.32465362548828,
"count": 658
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 658
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 658
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.07653367581145133,
"min": -0.6875,
"max": 0.46086666981379193,
"count": 658
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 14.770999431610107,
"min": -49.34479993581772,
"max": 48.02979975938797,
"count": 658
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.07653367581145133,
"min": -0.6875,
"max": 0.46086666981379193,
"count": 658
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 14.770999431610107,
"min": -49.34479993581772,
"max": 48.02979975938797,
"count": 658
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 658
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 658
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01758310463167921,
"min": 0.010413085353987602,
"max": 0.023296837213759622,
"count": 313
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01758310463167921,
"min": 0.010413085353987602,
"max": 0.023296837213759622,
"count": 313
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11528517901897431,
"min": 4.9890405837989725e-05,
"max": 0.12334234490990639,
"count": 313
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11528517901897431,
"min": 4.9890405837989725e-05,
"max": 0.12334234490990639,
"count": 313
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11799983779589335,
"min": 4.931844996463042e-05,
"max": 0.1262499362230301,
"count": 313
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11799983779589335,
"min": 4.931844996463042e-05,
"max": 0.1262499362230301,
"count": 313
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 313
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 313
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 313
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 313
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 313
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 313
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683547966",
"python_version": "3.9.16 (main, Mar 8 2023, 04:29:24) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/jovisaib/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1683573486"
},
"total": 25519.760386834,
"count": 1,
"self": 0.21833533400058514,
"children": {
"run_training.setup": {
"total": 0.08148787500000054,
"count": 1,
"self": 0.08148787500000054
},
"TrainerController.start_learning": {
"total": 25519.460563625,
"count": 1,
"self": 4.096770783682587,
"children": {
"TrainerController._reset_env": {
"total": 2.912471209002332,
"count": 33,
"self": 2.912471209002332
},
"TrainerController.advance": {
"total": 25512.368457882316,
"count": 441051,
"self": 4.251637718884012,
"children": {
"env_step": {
"total": 20981.41589841332,
"count": 441051,
"self": 20308.931328215007,
"children": {
"SubprocessEnvManager._take_step": {
"total": 669.0785221537078,
"count": 441051,
"self": 19.03499032753257,
"children": {
"TorchPolicy.evaluate": {
"total": 650.0435318261752,
"count": 837704,
"self": 650.0435318261752
}
}
},
"workers": {
"total": 3.4060480446074965,
"count": 441050,
"self": 0.0,
"children": {
"worker_root": {
"total": 25509.549095601204,
"count": 441050,
"is_parallel": true,
"self": 5781.137212207312,
"children": {
"steps_from_proto": {
"total": 0.04613553898625544,
"count": 66,
"is_parallel": true,
"self": 0.0054039940053833835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04073154498087206,
"count": 264,
"is_parallel": true,
"self": 0.04073154498087206
}
}
},
"UnityEnvironment.step": {
"total": 19728.365747854907,
"count": 441050,
"is_parallel": true,
"self": 53.23034296190235,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 352.1141099562241,
"count": 441050,
"is_parallel": true,
"self": 352.1141099562241
},
"communicator.exchange": {
"total": 18574.619126274312,
"count": 441050,
"is_parallel": true,
"self": 18574.619126274312
},
"steps_from_proto": {
"total": 748.4021686624687,
"count": 882100,
"is_parallel": true,
"self": 80.83071414853873,
"children": {
"_process_rank_one_or_two_observation": {
"total": 667.57145451393,
"count": 3528400,
"is_parallel": true,
"self": 667.57145451393
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4526.70092175011,
"count": 441050,
"self": 35.48097765252987,
"children": {
"process_trajectory": {
"total": 830.5695127995796,
"count": 441050,
"self": 829.5063930085789,
"children": {
"RLTrainer._checkpoint": {
"total": 1.063119791000645,
"count": 13,
"self": 1.063119791000645
}
}
},
"_update_policy": {
"total": 3660.6504312980005,
"count": 313,
"self": 423.46463323612033,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3237.18579806188,
"count": 9393,
"self": 3237.18579806188
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.920025382190943e-07,
"count": 1,
"self": 7.920025382190943e-07
},
"TrainerController._save_models": {
"total": 0.08286295799916843,
"count": 1,
"self": 0.0011063329984608572,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08175662500070757,
"count": 1,
"self": 0.08175662500070757
}
}
}
}
}
}
}