FranticUser's picture
Third Push
f0a4dcd verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7498764395713806,
"min": 0.7498764395713806,
"max": 2.853954792022705,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7126.82568359375,
"min": 7126.82568359375,
"max": 29133.169921875,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.129326820373535,
"min": 0.21694935858249664,
"max": 13.302552223205566,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2547.08935546875,
"min": 42.08817672729492,
"max": 2713.720703125,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06237361735796131,
"min": 0.06102633918549948,
"max": 0.076506828447272,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24949446943184525,
"min": 0.24410535674199793,
"max": 0.38087669455340156,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15757101412643404,
"min": 0.09607985840343378,
"max": 0.2495065285879023,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6302840565057362,
"min": 0.3843194336137351,
"max": 1.2475326429395115,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.7456980848e-06,
"min": 5.7456980848e-06,
"max": 0.0002935056021648,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.29827923392e-05,
"min": 2.29827923392e-05,
"max": 0.0014081280306239997,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10191520000000001,
"min": 0.10191520000000001,
"max": 0.19783520000000002,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40766080000000005,
"min": 0.40766080000000005,
"max": 0.9693760000000002,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010556848000000002,
"min": 0.00010556848000000002,
"max": 0.004891976480000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004222739200000001,
"min": 0.0004222739200000001,
"max": 0.023471862399999998,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.65909090909091,
"min": 3.7045454545454546,
"max": 26.09090909090909,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1129.0,
"min": 163.0,
"max": 1435.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.65909090909091,
"min": 3.7045454545454546,
"max": 26.09090909090909,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1129.0,
"min": 163.0,
"max": 1435.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1769894145",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1769894671"
},
"total": 525.4305488040001,
"count": 1,
"self": 0.43517932300017037,
"children": {
"run_training.setup": {
"total": 0.026357548999953906,
"count": 1,
"self": 0.026357548999953906
},
"TrainerController.start_learning": {
"total": 524.969011932,
"count": 1,
"self": 0.4063689640138364,
"children": {
"TrainerController._reset_env": {
"total": 2.943450650999921,
"count": 1,
"self": 2.943450650999921
},
"TrainerController.advance": {
"total": 521.545380152986,
"count": 22728,
"self": 0.4219052990054024,
"children": {
"env_step": {
"total": 375.5976816529925,
"count": 22728,
"self": 294.5825825719918,
"children": {
"SubprocessEnvManager._take_step": {
"total": 80.7671016880114,
"count": 22728,
"self": 1.460853781005767,
"children": {
"TorchPolicy.evaluate": {
"total": 79.30624790700563,
"count": 22728,
"self": 79.30624790700563
}
}
},
"workers": {
"total": 0.2479973929893049,
"count": 22728,
"self": 0.0,
"children": {
"worker_root": {
"total": 522.802646034005,
"count": 22728,
"is_parallel": true,
"self": 263.63541238800747,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005017296999994869,
"count": 1,
"is_parallel": true,
"self": 0.0036315639999884297,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013857330000064394,
"count": 10,
"is_parallel": true,
"self": 0.0013857330000064394
}
}
},
"UnityEnvironment.step": {
"total": 0.0364463920000162,
"count": 1,
"is_parallel": true,
"self": 0.0005731820000391963,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038880099998550577,
"count": 1,
"is_parallel": true,
"self": 0.00038880099998550577
},
"communicator.exchange": {
"total": 0.03368637799997032,
"count": 1,
"is_parallel": true,
"self": 0.03368637799997032
},
"steps_from_proto": {
"total": 0.001798031000021183,
"count": 1,
"is_parallel": true,
"self": 0.0003431369999589151,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014548940000622679,
"count": 10,
"is_parallel": true,
"self": 0.0014548940000622679
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 259.1672336459975,
"count": 22727,
"is_parallel": true,
"self": 12.50310607400661,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.524809306989027,
"count": 22727,
"is_parallel": true,
"self": 6.524809306989027
},
"communicator.exchange": {
"total": 197.82578640198744,
"count": 22727,
"is_parallel": true,
"self": 197.82578640198744
},
"steps_from_proto": {
"total": 42.31353186301442,
"count": 22727,
"is_parallel": true,
"self": 7.6822490900301545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.631282772984264,
"count": 227270,
"is_parallel": true,
"self": 34.631282772984264
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 145.52579320098812,
"count": 22728,
"self": 0.5145760540027595,
"children": {
"process_trajectory": {
"total": 30.967017710985942,
"count": 22728,
"self": 30.40582623298576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5611914780001825,
"count": 5,
"self": 0.5611914780001825
}
}
},
"_update_policy": {
"total": 114.04419943599942,
"count": 113,
"self": 47.25833948199977,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.78585995399965,
"count": 5760,
"self": 66.78585995399965
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0220001058769412e-06,
"count": 1,
"self": 1.0220001058769412e-06
},
"TrainerController._save_models": {
"total": 0.07381114200006778,
"count": 1,
"self": 0.0006677950002540456,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07314334699981373,
"count": 1,
"self": 0.07314334699981373
}
}
}
}
}
}
}