ppo-Huggy / run_logs /timers.json
totaldungeon's picture
Huggy
15397b2 verified
raw
history blame contribute delete
No virus
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4055194854736328,
"min": 1.4055194854736328,
"max": 1.4258497953414917,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69553.5390625,
"min": 61744.63671875,
"max": 72821.015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 104.0798319327731,
"min": 83.2546689303905,
"max": 425.59550561797755,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49542.0,
"min": 37878.0,
"max": 50178.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999940.0,
"min": 49403.0,
"max": 1999940.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999940.0,
"min": 49403.0,
"max": 1999940.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.399080514907837,
"min": 0.05756046622991562,
"max": 2.4195656776428223,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1141.9622802734375,
"min": 5.06532096862793,
"max": 1382.817138671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6377756694535246,
"min": 1.6894428567452864,
"max": 3.916169394155869,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1731.5812186598778,
"min": 148.6709713935852,
"max": 2220.4680464863777,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6377756694535246,
"min": 1.6894428567452864,
"max": 3.916169394155869,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1731.5812186598778,
"min": 148.6709713935852,
"max": 2220.4680464863777,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014223136986765895,
"min": 0.012394313208157353,
"max": 0.021061497884632747,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.02844627397353179,
"min": 0.015538538176527558,
"max": 0.05649299843159194,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05152300428599119,
"min": 0.021355834872358374,
"max": 0.05509958720455567,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10304600857198239,
"min": 0.03286679244289795,
"max": 0.15847746431827547,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.118073960675005e-06,
"min": 3.118073960675005e-06,
"max": 0.00029509875163375,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 6.23614792135001e-06,
"min": 6.23614792135001e-06,
"max": 0.0008666023611325498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10103932500000004,
"min": 0.10103932500000004,
"max": 0.19836625000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20207865000000008,
"min": 0.19836625000000005,
"max": 0.5888674500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.18623175000001e-05,
"min": 6.18623175000001e-05,
"max": 0.004918475874999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001237246350000002,
"min": 0.0001237246350000002,
"max": 0.014444485754999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710290032",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710292388"
},
"total": 2356.3989663189996,
"count": 1,
"self": 0.43089623700006996,
"children": {
"run_training.setup": {
"total": 0.05109205899998415,
"count": 1,
"self": 0.05109205899998415
},
"TrainerController.start_learning": {
"total": 2355.9169780229995,
"count": 1,
"self": 4.272239820980758,
"children": {
"TrainerController._reset_env": {
"total": 2.5495407000000796,
"count": 1,
"self": 2.5495407000000796
},
"TrainerController.advance": {
"total": 2348.977194847019,
"count": 231023,
"self": 4.555003559106808,
"children": {
"env_step": {
"total": 1877.8115208439694,
"count": 231023,
"self": 1556.7170761880395,
"children": {
"SubprocessEnvManager._take_step": {
"total": 318.4044720369393,
"count": 231023,
"self": 16.23419057807564,
"children": {
"TorchPolicy.evaluate": {
"total": 302.17028145886366,
"count": 221634,
"self": 302.17028145886366
}
}
},
"workers": {
"total": 2.6899726189906232,
"count": 231023,
"self": 0.0,
"children": {
"worker_root": {
"total": 2348.7963789159717,
"count": 231023,
"is_parallel": true,
"self": 1089.4035788659423,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009623540001939546,
"count": 1,
"is_parallel": true,
"self": 0.00025948600023184554,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000702867999962109,
"count": 2,
"is_parallel": true,
"self": 0.000702867999962109
}
}
},
"UnityEnvironment.step": {
"total": 0.0286996069999077,
"count": 1,
"is_parallel": true,
"self": 0.0004402339995976945,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021557600007326982,
"count": 1,
"is_parallel": true,
"self": 0.00021557600007326982
},
"communicator.exchange": {
"total": 0.02731145200004903,
"count": 1,
"is_parallel": true,
"self": 0.02731145200004903
},
"steps_from_proto": {
"total": 0.0007323450001877063,
"count": 1,
"is_parallel": true,
"self": 0.00019246000033490418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005398849998528021,
"count": 2,
"is_parallel": true,
"self": 0.0005398849998528021
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1259.3928000500293,
"count": 231022,
"is_parallel": true,
"self": 39.44292867305853,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.1017973998878,
"count": 231022,
"is_parallel": true,
"self": 80.1017973998878
},
"communicator.exchange": {
"total": 1050.4378642560866,
"count": 231022,
"is_parallel": true,
"self": 1050.4378642560866
},
"steps_from_proto": {
"total": 89.41020972099636,
"count": 231022,
"is_parallel": true,
"self": 31.87067957210911,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.53953014888725,
"count": 462044,
"is_parallel": true,
"self": 57.53953014888725
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 466.6106704439428,
"count": 231023,
"self": 6.5133141167546,
"children": {
"process_trajectory": {
"total": 149.71370519318975,
"count": 231023,
"self": 148.50735107919013,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2063541139996232,
"count": 10,
"self": 1.2063541139996232
}
}
},
"_update_policy": {
"total": 310.38365113399846,
"count": 96,
"self": 248.6322679219868,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.75138321201166,
"count": 2880,
"self": 61.75138321201166
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.189999789465219e-07,
"count": 1,
"self": 8.189999789465219e-07
},
"TrainerController._save_models": {
"total": 0.11800183600007585,
"count": 1,
"self": 0.0025561440006640623,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11544569199941179,
"count": 1,
"self": 0.11544569199941179
}
}
}
}
}
}
}