vxst2's picture
First Push
6f08725 verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9305263161659241,
"min": 0.9305263161659241,
"max": 2.865267276763916,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8905.13671875,
"min": 8905.13671875,
"max": 29406.23828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.175753593444824,
"min": 0.23729316890239716,
"max": 13.175753593444824,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2569.27197265625,
"min": 46.034873962402344,
"max": 2654.335205078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.028572979965247214,
"min": 0.023970683904887363,
"max": 0.03743098277300305,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.11429191986098886,
"min": 0.09588273561954945,
"max": 0.18197435707164308,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22108894120901823,
"min": 0.14424635174994668,
"max": 0.3848553809026877,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8843557648360729,
"min": 0.5769854069997867,
"max": 1.7357314663628738,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.694009730600002e-05,
"min": 2.694009730600002e-05,
"max": 0.000972940002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00010776038922400007,
"min": 0.00010776038922400007,
"max": 0.00461720003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102694,
"min": 0.102694,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.410776,
"min": 0.410776,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014443060000000012,
"min": 0.00014443060000000012,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000005,
"min": 0.0005777224000000005,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.181818181818183,
"min": 3.227272727272727,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1152.0,
"min": 142.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.181818181818183,
"min": 3.227272727272727,
"max": 26.181818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1152.0,
"min": 142.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718819462",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718819810"
},
"total": 348.404677684,
"count": 1,
"self": 0.3220852630000195,
"children": {
"run_training.setup": {
"total": 0.05577927899997803,
"count": 1,
"self": 0.05577927899997803
},
"TrainerController.start_learning": {
"total": 348.026813142,
"count": 1,
"self": 0.465913246005357,
"children": {
"TrainerController._reset_env": {
"total": 2.7305560730000025,
"count": 1,
"self": 2.7305560730000025
},
"TrainerController.advance": {
"total": 344.7425435839945,
"count": 18204,
"self": 0.22380590000290113,
"children": {
"env_step": {
"total": 344.5187376839916,
"count": 18204,
"self": 213.31865222097338,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.97832079100743,
"count": 18204,
"self": 1.210344158015232,
"children": {
"TorchPolicy.evaluate": {
"total": 129.7679766329922,
"count": 18204,
"self": 129.7679766329922
}
}
},
"workers": {
"total": 0.2217646720108064,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 347.3915572850024,
"count": 18204,
"is_parallel": true,
"self": 162.29836631699862,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021836739999798738,
"count": 1,
"is_parallel": true,
"self": 0.0006545129999153687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001529161000064505,
"count": 10,
"is_parallel": true,
"self": 0.001529161000064505
}
}
},
"UnityEnvironment.step": {
"total": 0.02658194100001765,
"count": 1,
"is_parallel": true,
"self": 0.00045644500005437294,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00027316599999949176,
"count": 1,
"is_parallel": true,
"self": 0.00027316599999949176
},
"communicator.exchange": {
"total": 0.024529020999978002,
"count": 1,
"is_parallel": true,
"self": 0.024529020999978002
},
"steps_from_proto": {
"total": 0.0013233089999857839,
"count": 1,
"is_parallel": true,
"self": 0.00027185200002577403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010514569999600099,
"count": 10,
"is_parallel": true,
"self": 0.0010514569999600099
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 185.0931909680038,
"count": 18203,
"is_parallel": true,
"self": 6.968714000008788,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.7116508169955864,
"count": 18203,
"is_parallel": true,
"self": 3.7116508169955864
},
"communicator.exchange": {
"total": 151.7768035519968,
"count": 18203,
"is_parallel": true,
"self": 151.7768035519968
},
"steps_from_proto": {
"total": 22.636022599002615,
"count": 18203,
"is_parallel": true,
"self": 4.333983037982591,
"children": {
"_process_rank_one_or_two_observation": {
"total": 18.302039561020024,
"count": 182030,
"is_parallel": true,
"self": 18.302039561020024
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015310200001295016,
"count": 1,
"self": 0.00015310200001295016,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 343.6965139989527,
"count": 264364,
"is_parallel": true,
"self": 3.6482737999588153,
"children": {
"process_trajectory": {
"total": 225.63215340299365,
"count": 264364,
"is_parallel": true,
"self": 224.9334394839936,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6987139190000562,
"count": 4,
"is_parallel": true,
"self": 0.6987139190000562
}
}
},
"_update_policy": {
"total": 114.41608679600023,
"count": 90,
"is_parallel": true,
"self": 37.705977276997544,
"children": {
"TorchPPOOptimizer.update": {
"total": 76.71010951900269,
"count": 1080,
"is_parallel": true,
"self": 76.71010951900269
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08764713700008997,
"count": 1,
"self": 0.0008755590001783276,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08677157799991164,
"count": 1,
"self": 0.08677157799991164
}
}
}
}
}
}
}