poca-SoccerTwos / run_logs /timers.json
aphi's picture
First Push
0c68a04
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6738388538360596,
"min": 1.5268455743789673,
"max": 3.2957651615142822,
"count": 994
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34065.96875,
"min": 10568.92578125,
"max": 179269.328125,
"count": 994
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.295918367346935,
"min": 44.486486486486484,
"max": 999.0,
"count": 994
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19716.0,
"min": 9416.0,
"max": 31968.0,
"count": 994
},
"SoccerTwos.Step.mean": {
"value": 9939997.0,
"min": 9000.0,
"max": 9939997.0,
"count": 994
},
"SoccerTwos.Step.sum": {
"value": 9939997.0,
"min": 9000.0,
"max": 9939997.0,
"count": 994
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.008323772810399532,
"min": -0.1177525669336319,
"max": 0.14752517640590668,
"count": 994
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.6314594745635986,
"min": -18.065879821777344,
"max": 27.73473358154297,
"count": 994
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.010034768842160702,
"min": -0.11607915163040161,
"max": 0.14519061148166656,
"count": 994
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.9668147563934326,
"min": -17.73145294189453,
"max": 27.295835494995117,
"count": 994
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 994
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 994
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.05571224494856231,
"min": -0.5,
"max": 0.5506909069689837,
"count": 994
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -10.919600009918213,
"min": -49.316800117492676,
"max": 50.817000329494476,
"count": 994
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.05571224494856231,
"min": -0.5,
"max": 0.5506909069689837,
"count": 994
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -10.919600009918213,
"min": -49.316800117492676,
"max": 50.817000329494476,
"count": 994
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 994
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 994
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1615.109088210085,
"min": 1194.1441202528824,
"max": 1645.2772938662745,
"count": 980
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 316561.38128917664,
"min": 2388.7842148185537,
"max": 358954.22048489144,
"count": 980
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019522495680333426,
"min": 0.010394203113780047,
"max": 0.023615345258925422,
"count": 480
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019522495680333426,
"min": 0.010394203113780047,
"max": 0.023615345258925422,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10989386513829232,
"min": 2.9581445293539824e-05,
"max": 0.12241931185126305,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10989386513829232,
"min": 2.9581445293539824e-05,
"max": 0.12241931185126305,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11195614089568456,
"min": 3.129964231144792e-05,
"max": 0.12525529265403748,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11195614089568456,
"min": 3.129964231144792e-05,
"max": 0.12525529265403748,
"count": 480
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 480
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 480
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691251004",
"python_version": "3.9.9 (main, Dec 22 2021, 19:41:22) \n[GCC 10.3.0]",
"command_line_arguments": "/home/aphi/.pyenv/versions/3.9.9/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691273859"
},
"total": 22854.404714304,
"count": 1,
"self": 0.022190077001141617,
"children": {
"run_training.setup": {
"total": 0.039864002999820514,
"count": 1,
"self": 0.039864002999820514
},
"TrainerController.start_learning": {
"total": 22854.342660224,
"count": 1,
"self": 15.971010732457216,
"children": {
"TrainerController._reset_env": {
"total": 2.375991888993667,
"count": 50,
"self": 2.375991888993667
},
"TrainerController.advance": {
"total": 22835.767046819547,
"count": 680503,
"self": 17.05574680085192,
"children": {
"env_step": {
"total": 11915.415566760114,
"count": 680503,
"self": 10043.211442329506,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1861.7858870874934,
"count": 680503,
"self": 82.98120876659232,
"children": {
"TorchPolicy.evaluate": {
"total": 1778.804678320901,
"count": 1253148,
"self": 1778.804678320901
}
}
},
"workers": {
"total": 10.418237343114015,
"count": 680503,
"self": 0.0,
"children": {
"worker_root": {
"total": 22822.412832047936,
"count": 680503,
"is_parallel": true,
"self": 14637.976740783537,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001420128000972909,
"count": 2,
"is_parallel": true,
"self": 0.0003433679994486738,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010767600015242351,
"count": 8,
"is_parallel": true,
"self": 0.0010767600015242351
}
}
},
"UnityEnvironment.step": {
"total": 0.019359092999366112,
"count": 1,
"is_parallel": true,
"self": 0.00039651199767831713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000708682000549743,
"count": 1,
"is_parallel": true,
"self": 0.000708682000549743
},
"communicator.exchange": {
"total": 0.016968126999927335,
"count": 1,
"is_parallel": true,
"self": 0.016968126999927335
},
"steps_from_proto": {
"total": 0.0012857720012107166,
"count": 2,
"is_parallel": true,
"self": 0.0002926259967352962,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009931460044754203,
"count": 8,
"is_parallel": true,
"self": 0.0009931460044754203
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 8184.351418703425,
"count": 680502,
"is_parallel": true,
"self": 477.54881321897665,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 388.385160443986,
"count": 680502,
"is_parallel": true,
"self": 388.385160443986
},
"communicator.exchange": {
"total": 5951.407338663861,
"count": 680502,
"is_parallel": true,
"self": 5951.407338663861
},
"steps_from_proto": {
"total": 1367.0101063766015,
"count": 1361004,
"is_parallel": true,
"self": 270.387928177237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1096.6221781993645,
"count": 5444016,
"is_parallel": true,
"self": 1096.6221781993645
}
}
}
}
},
"steps_from_proto": {
"total": 0.08467256097355857,
"count": 98,
"is_parallel": true,
"self": 0.016923518991461606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06774904198209697,
"count": 392,
"is_parallel": true,
"self": 0.06774904198209697
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10903.29573325858,
"count": 680503,
"self": 114.85782065303101,
"children": {
"process_trajectory": {
"total": 2064.1165530945327,
"count": 680503,
"self": 2060.324406969532,
"children": {
"RLTrainer._checkpoint": {
"total": 3.792146125000727,
"count": 19,
"self": 3.792146125000727
}
}
},
"_update_policy": {
"total": 8724.321359511016,
"count": 481,
"self": 1749.8782984696263,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6974.443061041389,
"count": 14411,
"self": 6974.443061041389
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8889986677095294e-06,
"count": 1,
"self": 1.8889986677095294e-06
},
"TrainerController._save_models": {
"total": 0.22860889400180895,
"count": 1,
"self": 0.0014931770056136884,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22711571699619526,
"count": 1,
"self": 0.22711571699619526
}
}
}
}
}
}
}