BMukhtar's picture
First Push
ba39d1e verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9412683844566345,
"min": 0.9412683844566345,
"max": 2.868903875350952,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9007.9384765625,
"min": 9007.9384765625,
"max": 29412.001953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.920598030090332,
"min": 0.5027452111244202,
"max": 12.920598030090332,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2519.5166015625,
"min": 97.5325698852539,
"max": 2604.86767578125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07330537104281855,
"min": 0.06327827169037596,
"max": 0.07330537104281855,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2932214841712742,
"min": 0.25311308676150385,
"max": 0.36003723643108404,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19159745592887722,
"min": 0.09708070110725969,
"max": 0.26526275242076197,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7663898237155089,
"min": 0.38832280442903877,
"max": 1.32631376210381,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.130097290000004e-06,
"min": 8.130097290000004e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.2520389160000015e-05,
"min": 3.2520389160000015e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10271,
"min": 0.10271,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41084,
"min": 0.41084,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014522900000000006,
"min": 0.00014522900000000006,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005809160000000002,
"min": 0.0005809160000000002,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.113636363636363,
"min": 2.7954545454545454,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1105.0,
"min": 123.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.113636363636363,
"min": 2.7954545454545454,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1105.0,
"min": 123.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723457852",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723458301"
},
"total": 448.73903989099995,
"count": 1,
"self": 0.7324942400000509,
"children": {
"run_training.setup": {
"total": 0.0610516669999015,
"count": 1,
"self": 0.0610516669999015
},
"TrainerController.start_learning": {
"total": 447.945493984,
"count": 1,
"self": 0.5581625700076529,
"children": {
"TrainerController._reset_env": {
"total": 2.7789906699999847,
"count": 1,
"self": 2.7789906699999847
},
"TrainerController.advance": {
"total": 444.4669274349924,
"count": 18207,
"self": 0.26121752602057313,
"children": {
"env_step": {
"total": 444.2057099089718,
"count": 18207,
"self": 286.8381953039793,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.09187040098755,
"count": 18207,
"self": 1.4797179219874579,
"children": {
"TorchPolicy.evaluate": {
"total": 155.6121524790001,
"count": 18207,
"self": 155.6121524790001
}
}
},
"workers": {
"total": 0.2756442040049478,
"count": 18207,
"self": 0.0,
"children": {
"worker_root": {
"total": 446.7098390810022,
"count": 18207,
"is_parallel": true,
"self": 227.23136646600165,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0053915280000182975,
"count": 1,
"is_parallel": true,
"self": 0.0037873050002872333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016042229997310642,
"count": 10,
"is_parallel": true,
"self": 0.0016042229997310642
}
}
},
"UnityEnvironment.step": {
"total": 0.03766029899998102,
"count": 1,
"is_parallel": true,
"self": 0.0006264639998789789,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003738309999334888,
"count": 1,
"is_parallel": true,
"self": 0.0003738309999334888
},
"communicator.exchange": {
"total": 0.03406527300012385,
"count": 1,
"is_parallel": true,
"self": 0.03406527300012385
},
"steps_from_proto": {
"total": 0.0025947310000447033,
"count": 1,
"is_parallel": true,
"self": 0.0010973319997447106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014973990002999926,
"count": 10,
"is_parallel": true,
"self": 0.0014973990002999926
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 219.47847261500056,
"count": 18206,
"is_parallel": true,
"self": 10.055651971015777,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.5208270689709025,
"count": 18206,
"is_parallel": true,
"self": 5.5208270689709025
},
"communicator.exchange": {
"total": 169.79333737600064,
"count": 18206,
"is_parallel": true,
"self": 169.79333737600064
},
"steps_from_proto": {
"total": 34.10865619901324,
"count": 18206,
"is_parallel": true,
"self": 6.529487703983477,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.579168495029762,
"count": 182060,
"is_parallel": true,
"self": 27.579168495029762
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016161600001396437,
"count": 1,
"self": 0.00016161600001396437,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 438.73597889594976,
"count": 668829,
"is_parallel": true,
"self": 14.143739439912451,
"children": {
"process_trajectory": {
"total": 242.79635187003714,
"count": 668829,
"is_parallel": true,
"self": 241.42847312303752,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3678787469996223,
"count": 4,
"is_parallel": true,
"self": 1.3678787469996223
}
}
},
"_update_policy": {
"total": 181.79588758600016,
"count": 90,
"is_parallel": true,
"self": 58.06028061899087,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.73560696700929,
"count": 4587,
"is_parallel": true,
"self": 123.73560696700929
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14125169299995832,
"count": 1,
"self": 0.0012442720001217822,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14000742099983654,
"count": 1,
"self": 0.14000742099983654
}
}
}
}
}
}
}