ppo-Huggy / run_logs /timers.json
Gadersd's picture
Huggy
ea2548d
raw
history blame
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.401413917541504,
"min": 1.401413917541504,
"max": 1.4267528057098389,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71957.0,
"min": 68400.125,
"max": 77161.5234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.66868381240545,
"min": 74.4434250764526,
"max": 402.73387096774195,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49356.0,
"min": 48686.0,
"max": 49990.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999997.0,
"min": 49650.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999997.0,
"min": 49650.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.492232084274292,
"min": 0.21966251730918884,
"max": 2.5303807258605957,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1647.365478515625,
"min": 27.018489837646484,
"max": 1647.365478515625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.907749623646354,
"min": 1.7168242907136437,
"max": 3.972880187368548,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2583.02250123024,
"min": 211.16938775777817,
"max": 2583.02250123024,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.907749623646354,
"min": 1.7168242907136437,
"max": 3.972880187368548,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2583.02250123024,
"min": 211.16938775777817,
"max": 2583.02250123024,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015930659174692764,
"min": 0.013345793463668088,
"max": 0.020638795191674334,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047791977524078294,
"min": 0.026691586927336176,
"max": 0.061916385575023006,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06527987892429034,
"min": 0.02048669494688511,
"max": 0.06527987892429034,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19583963677287103,
"min": 0.04097338989377022,
"max": 0.19583963677287103,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5557488147833318e-06,
"min": 3.5557488147833318e-06,
"max": 0.00029534145155285,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0667246444349995e-05,
"min": 1.0667246444349995e-05,
"max": 0.0008441271186242999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118521666666669,
"min": 0.10118521666666669,
"max": 0.19844715000000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30355565000000007,
"min": 0.20751079999999994,
"max": 0.5813756999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.914231166666665e-05,
"min": 6.914231166666665e-05,
"max": 0.004922512785000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020742693499999993,
"min": 0.00020742693499999993,
"max": 0.01407064743,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672932637",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672934807"
},
"total": 2169.859613924,
"count": 1,
"self": 0.3895191800002067,
"children": {
"run_training.setup": {
"total": 0.10708111899998585,
"count": 1,
"self": 0.10708111899998585
},
"TrainerController.start_learning": {
"total": 2169.363013625,
"count": 1,
"self": 3.7989682019710926,
"children": {
"TrainerController._reset_env": {
"total": 7.738017861999992,
"count": 1,
"self": 7.738017861999992
},
"TrainerController.advance": {
"total": 2157.710162829029,
"count": 233204,
"self": 3.7102494791720346,
"children": {
"env_step": {
"total": 1685.5543330289067,
"count": 233204,
"self": 1418.4414054849399,
"children": {
"SubprocessEnvManager._take_step": {
"total": 264.6053199779427,
"count": 233204,
"self": 14.067012820907507,
"children": {
"TorchPolicy.evaluate": {
"total": 250.5383071570352,
"count": 223006,
"self": 64.1346565668556,
"children": {
"TorchPolicy.sample_actions": {
"total": 186.4036505901796,
"count": 223006,
"self": 186.4036505901796
}
}
}
}
},
"workers": {
"total": 2.5076075660242623,
"count": 233204,
"self": 0.0,
"children": {
"worker_root": {
"total": 2161.5782700909645,
"count": 233204,
"is_parallel": true,
"self": 994.491597204019,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002235662999964916,
"count": 1,
"is_parallel": true,
"self": 0.0003170989999716767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019185639999932391,
"count": 2,
"is_parallel": true,
"self": 0.0019185639999932391
}
}
},
"UnityEnvironment.step": {
"total": 0.03316976499991142,
"count": 1,
"is_parallel": true,
"self": 0.0002820710000150939,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017215899993061612,
"count": 1,
"is_parallel": true,
"self": 0.00017215899993061612
},
"communicator.exchange": {
"total": 0.031900521000011395,
"count": 1,
"is_parallel": true,
"self": 0.031900521000011395
},
"steps_from_proto": {
"total": 0.0008150139999543171,
"count": 1,
"is_parallel": true,
"self": 0.0002786339999829579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005363799999713592,
"count": 2,
"is_parallel": true,
"self": 0.0005363799999713592
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1167.0866728869455,
"count": 233203,
"is_parallel": true,
"self": 34.04428369999914,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.28219440496127,
"count": 233203,
"is_parallel": true,
"self": 75.28219440496127
},
"communicator.exchange": {
"total": 966.6863554770346,
"count": 233203,
"is_parallel": true,
"self": 966.6863554770346
},
"steps_from_proto": {
"total": 91.07383930495064,
"count": 233203,
"is_parallel": true,
"self": 37.32101380489837,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.75282550005227,
"count": 466406,
"is_parallel": true,
"self": 53.75282550005227
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 468.44558032094983,
"count": 233204,
"self": 5.722726787901706,
"children": {
"process_trajectory": {
"total": 147.2929468090473,
"count": 233204,
"self": 146.0096226120471,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2833241970001836,
"count": 10,
"self": 1.2833241970001836
}
}
},
"_update_policy": {
"total": 315.42990672400083,
"count": 97,
"self": 262.98446572699027,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.445440997010564,
"count": 2910,
"self": 52.445440997010564
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.389997674385086e-07,
"count": 1,
"self": 8.389997674385086e-07
},
"TrainerController._save_models": {
"total": 0.11586389300009614,
"count": 1,
"self": 0.001868934999947669,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11399495800014847,
"count": 1,
"self": 0.11399495800014847
}
}
}
}
}
}
}