crowbarmassage's picture
First Push
7ca0697
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9555612802505493,
"min": 0.936212956905365,
"max": 2.877584218978882,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9123.69921875,
"min": 9123.69921875,
"max": 29564.30078125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.15113639831543,
"min": 0.37244194746017456,
"max": 12.15113639831543,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2369.4716796875,
"min": 72.25373840332031,
"max": 2476.76953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06519780296309595,
"min": 0.06279386983392304,
"max": 0.07301818262694368,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2607912118523838,
"min": 0.2511754793356922,
"max": 0.36509091313471836,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.191707533729427,
"min": 0.11889657997480575,
"max": 0.2851898031491859,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.766830134917708,
"min": 0.475586319899223,
"max": 1.4259490157459296,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.931818181818183,
"min": 3.477272727272727,
"max": 23.931818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1053.0,
"min": 153.0,
"max": 1307.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.931818181818183,
"min": 3.477272727272727,
"max": 23.931818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1053.0,
"min": 153.0,
"max": 1307.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689112603",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689113068"
},
"total": 464.9478828230001,
"count": 1,
"self": 0.43510287600008724,
"children": {
"run_training.setup": {
"total": 0.041160321999996086,
"count": 1,
"self": 0.041160321999996086
},
"TrainerController.start_learning": {
"total": 464.471619625,
"count": 1,
"self": 0.5489255670117927,
"children": {
"TrainerController._reset_env": {
"total": 4.08380502600005,
"count": 1,
"self": 4.08380502600005
},
"TrainerController.advance": {
"total": 459.69681944198817,
"count": 18201,
"self": 0.25222234597720217,
"children": {
"env_step": {
"total": 459.44459709601097,
"count": 18201,
"self": 337.48616796501244,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.70053765199475,
"count": 18201,
"self": 1.8650579739810382,
"children": {
"TorchPolicy.evaluate": {
"total": 119.83547967801371,
"count": 18201,
"self": 119.83547967801371
}
}
},
"workers": {
"total": 0.2578914790037743,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 462.85615569500396,
"count": 18201,
"is_parallel": true,
"self": 218.62783515600108,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005298880999930589,
"count": 1,
"is_parallel": true,
"self": 0.0036629259999472197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001635954999983369,
"count": 10,
"is_parallel": true,
"self": 0.001635954999983369
}
}
},
"UnityEnvironment.step": {
"total": 0.03508686900011071,
"count": 1,
"is_parallel": true,
"self": 0.0006429830001479786,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003344539999261542,
"count": 1,
"is_parallel": true,
"self": 0.0003344539999261542
},
"communicator.exchange": {
"total": 0.03200260000005528,
"count": 1,
"is_parallel": true,
"self": 0.03200260000005528
},
"steps_from_proto": {
"total": 0.0021068319999812957,
"count": 1,
"is_parallel": true,
"self": 0.0003583020001087789,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017485299998725168,
"count": 10,
"is_parallel": true,
"self": 0.0017485299998725168
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 244.22832053900288,
"count": 18200,
"is_parallel": true,
"self": 10.225537699014694,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.07230876299343,
"count": 18200,
"is_parallel": true,
"self": 5.07230876299343
},
"communicator.exchange": {
"total": 193.84522547299912,
"count": 18200,
"is_parallel": true,
"self": 193.84522547299912
},
"steps_from_proto": {
"total": 35.08524860399564,
"count": 18200,
"is_parallel": true,
"self": 6.351173738004945,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.734074865990692,
"count": 182000,
"is_parallel": true,
"self": 28.734074865990692
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015494699994178518,
"count": 1,
"self": 0.00015494699994178518,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 456.152562381043,
"count": 442630,
"is_parallel": true,
"self": 9.702451761070847,
"children": {
"process_trajectory": {
"total": 247.9258669009722,
"count": 442630,
"is_parallel": true,
"self": 246.83381302397208,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0920538770001258,
"count": 4,
"is_parallel": true,
"self": 1.0920538770001258
}
}
},
"_update_policy": {
"total": 198.52424371899997,
"count": 90,
"is_parallel": true,
"self": 80.11556589400027,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.4086778249997,
"count": 4587,
"is_parallel": true,
"self": 118.4086778249997
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14191464300006373,
"count": 1,
"self": 0.0008432220000713642,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14107142099999237,
"count": 1,
"self": 0.14107142099999237
}
}
}
}
}
}
}