SoccerTwos / run_logs /timers.json
manuu01's picture
First Push
fff94c8
raw
history blame
20.1 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1499600410461426,
"min": 3.131804943084717,
"max": 3.2054200172424316,
"count": 75
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 62394.40625,
"min": 23642.75390625,
"max": 107458.1953125,
"count": 75
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 898.3333333333334,
"min": 374.61538461538464,
"max": 999.0,
"count": 75
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21560.0,
"min": 12860.0,
"max": 24332.0,
"count": 75
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1180.009652463314,
"min": 1175.3909421129385,
"max": 1195.0985680196525,
"count": 74
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4720.038609853256,
"min": 2353.013531603022,
"max": 21287.174162322914,
"count": 74
},
"SoccerTwos.Step.mean": {
"value": 1749370.0,
"min": 1009872.0,
"max": 1749370.0,
"count": 75
},
"SoccerTwos.Step.sum": {
"value": 1749370.0,
"min": 1009872.0,
"max": 1749370.0,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.006868889555335045,
"min": -0.012761061079800129,
"max": 0.00510790292173624,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.07555778324604034,
"min": -0.20322734117507935,
"max": 0.06640274077653885,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007158401422202587,
"min": -0.01327992882579565,
"max": 0.003901140997186303,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.07874241471290588,
"min": -0.20052023231983185,
"max": 0.05071483179926872,
"count": 75
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 75
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.45454545454545453,
"min": -0.8571428571428571,
"max": 0.2841142937541008,
"count": 75
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.0,
"min": -18.0,
"max": 3.977600112557411,
"count": 75
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.45454545454545453,
"min": -0.8571428571428571,
"max": 0.2841142937541008,
"count": 75
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.0,
"min": -18.0,
"max": 3.977600112557411,
"count": 75
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01296827703966604,
"min": 0.01035859460826032,
"max": 0.020192706960369834,
"count": 35
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01296827703966604,
"min": 0.01035859460826032,
"max": 0.020192706960369834,
"count": 35
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.003657562037309011,
"min": 0.00014975956655689516,
"max": 0.005848652799613774,
"count": 35
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.003657562037309011,
"min": 0.00014975956655689516,
"max": 0.005848652799613774,
"count": 35
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003660974717543771,
"min": 0.00014970306795779228,
"max": 0.005846056481823325,
"count": 35
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003660974717543771,
"min": 0.00014970306795779228,
"max": 0.005846056481823325,
"count": 35
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0006,
"min": 0.0006,
"max": 0.0006,
"count": 35
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0006,
"min": 0.0006,
"max": 0.0006,
"count": 35
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 35
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 35
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 35
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 35
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690802700",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690804337"
},
"total": 1637.1034265750004,
"count": 1,
"self": 0.43743649599946366,
"children": {
"run_training.setup": {
"total": 0.03295709200028796,
"count": 1,
"self": 0.03295709200028796
},
"TrainerController.start_learning": {
"total": 1636.6330329870007,
"count": 1,
"self": 1.2384253270329282,
"children": {
"TrainerController._reset_env": {
"total": 4.411428540999623,
"count": 5,
"self": 4.411428540999623
},
"TrainerController.advance": {
"total": 1630.7821262879688,
"count": 48986,
"self": 1.2025667649395473,
"children": {
"env_step": {
"total": 1315.8824931099662,
"count": 48986,
"self": 1041.419592850044,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.7847717199202,
"count": 48986,
"self": 8.683908176870318,
"children": {
"TorchPolicy.evaluate": {
"total": 265.10086354304985,
"count": 97116,
"self": 265.10086354304985
}
}
},
"workers": {
"total": 0.6781285400020352,
"count": 48986,
"self": 0.0,
"children": {
"worker_root": {
"total": 1632.950228529011,
"count": 48986,
"is_parallel": true,
"self": 746.0876349919381,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030742639996788057,
"count": 2,
"is_parallel": true,
"self": 0.0008186979998754396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002255565999803366,
"count": 8,
"is_parallel": true,
"self": 0.002255565999803366
}
}
},
"UnityEnvironment.step": {
"total": 0.04315224199990553,
"count": 1,
"is_parallel": true,
"self": 0.0011571050004022254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000832507999803056,
"count": 1,
"is_parallel": true,
"self": 0.000832507999803056
},
"communicator.exchange": {
"total": 0.0375344529998074,
"count": 1,
"is_parallel": true,
"self": 0.0375344529998074
},
"steps_from_proto": {
"total": 0.0036281759998928464,
"count": 2,
"is_parallel": true,
"self": 0.0006268569991334516,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003001319000759395,
"count": 8,
"is_parallel": true,
"self": 0.003001319000759395
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.009482113999638386,
"count": 8,
"is_parallel": true,
"self": 0.0016659079992678016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007816206000370585,
"count": 32,
"is_parallel": true,
"self": 0.007816206000370585
}
}
},
"UnityEnvironment.step": {
"total": 886.8531114230732,
"count": 48985,
"is_parallel": true,
"self": 54.70159705391279,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.29807094397165,
"count": 48985,
"is_parallel": true,
"self": 34.29807094397165
},
"communicator.exchange": {
"total": 623.1118497290272,
"count": 48985,
"is_parallel": true,
"self": 623.1118497290272
},
"steps_from_proto": {
"total": 174.74159369616154,
"count": 97970,
"is_parallel": true,
"self": 28.404607733481043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 146.3369859626805,
"count": 391880,
"is_parallel": true,
"self": 146.3369859626805
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 313.6970664130631,
"count": 48986,
"self": 9.754627132033875,
"children": {
"process_trajectory": {
"total": 77.68253720902749,
"count": 48986,
"self": 77.46875197402778,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2137852349997047,
"count": 1,
"self": 0.2137852349997047
}
}
},
"_update_policy": {
"total": 226.25990207200175,
"count": 35,
"self": 158.34996710201494,
"children": {
"TorchPOCAOptimizer.update": {
"total": 67.90993496998681,
"count": 1050,
"self": 67.90993496998681
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4509996617562138e-06,
"count": 1,
"self": 1.4509996617562138e-06
},
"TrainerController._save_models": {
"total": 0.20105137999962608,
"count": 1,
"self": 0.0015652529991712072,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19948612700045487,
"count": 1,
"self": 0.19948612700045487
}
}
}
}
}
}
}