ppo-Huggy-test / run_logs /timers.json
narySt's picture
Huggy
be6b899 verified
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4033211469650269,
"min": 1.4033211469650269,
"max": 1.4260895252227783,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69587.890625,
"min": 68304.828125,
"max": 76335.875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 78.68203497615262,
"min": 76.72670807453416,
"max": 395.375,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49491.0,
"min": 48977.0,
"max": 50608.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999973.0,
"min": 49987.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999973.0,
"min": 49987.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4854953289031982,
"min": 0.09897430986166,
"max": 2.4955966472625732,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1560.89111328125,
"min": 12.569737434387207,
"max": 1607.164306640625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9615127383523685,
"min": 1.7774303475702842,
"max": 3.9615127383523685,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2487.8299996852875,
"min": 225.7336541414261,
"max": 2501.2240886688232,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9615127383523685,
"min": 1.7774303475702842,
"max": 3.9615127383523685,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2487.8299996852875,
"min": 225.7336541414261,
"max": 2501.2240886688232,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015697793266735968,
"min": 0.014368707828240682,
"max": 0.02205368612485472,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047093379800207905,
"min": 0.028737415656481365,
"max": 0.05815946893611301,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05203511777023475,
"min": 0.02214452183494965,
"max": 0.059683067186011206,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15610535331070424,
"min": 0.0442890436698993,
"max": 0.1790492015580336,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5229988257000065e-06,
"min": 3.5229988257000065e-06,
"max": 0.0002953500765499749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0568996477100019e-05,
"min": 1.0568996477100019e-05,
"max": 0.00084413836862055,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117430000000001,
"min": 0.10117430000000001,
"max": 0.19845002500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30352290000000004,
"min": 0.20747694999999997,
"max": 0.58137945,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.85975700000001e-05,
"min": 6.85975700000001e-05,
"max": 0.004922656247499998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002057927100000003,
"min": 0.0002057927100000003,
"max": 0.014070834555000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705789399",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ml-agents/config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705792042"
},
"total": 2642.3328215590004,
"count": 1,
"self": 0.4967438650000986,
"children": {
"run_training.setup": {
"total": 0.05645684199998868,
"count": 1,
"self": 0.05645684199998868
},
"TrainerController.start_learning": {
"total": 2641.7796208520003,
"count": 1,
"self": 5.115104574078032,
"children": {
"TrainerController._reset_env": {
"total": 3.458775797000044,
"count": 1,
"self": 3.458775797000044
},
"TrainerController.advance": {
"total": 2633.0824843649216,
"count": 232688,
"self": 5.289147296750343,
"children": {
"env_step": {
"total": 2087.4389536721683,
"count": 232688,
"self": 1741.5476866438355,
"children": {
"SubprocessEnvManager._take_step": {
"total": 342.6891106861617,
"count": 232688,
"self": 18.204394166118618,
"children": {
"TorchPolicy.evaluate": {
"total": 324.4847165200431,
"count": 222891,
"self": 324.4847165200431
}
}
},
"workers": {
"total": 3.2021563421710653,
"count": 232688,
"self": 0.0,
"children": {
"worker_root": {
"total": 2633.9968415680096,
"count": 232688,
"is_parallel": true,
"self": 1213.9723888672427,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006986739999774727,
"count": 1,
"is_parallel": true,
"self": 0.00019582499976422696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005028490002132457,
"count": 2,
"is_parallel": true,
"self": 0.0005028490002132457
}
}
},
"UnityEnvironment.step": {
"total": 0.030087356999956683,
"count": 1,
"is_parallel": true,
"self": 0.00036709000028167793,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019478199988043343,
"count": 1,
"is_parallel": true,
"self": 0.00019478199988043343
},
"communicator.exchange": {
"total": 0.028754818999914278,
"count": 1,
"is_parallel": true,
"self": 0.028754818999914278
},
"steps_from_proto": {
"total": 0.0007706659998802934,
"count": 1,
"is_parallel": true,
"self": 0.00019792399984908116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005727420000312122,
"count": 2,
"is_parallel": true,
"self": 0.0005727420000312122
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1420.024452700767,
"count": 232687,
"is_parallel": true,
"self": 43.570789307038694,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.7793957308611,
"count": 232687,
"is_parallel": true,
"self": 86.7793957308611
},
"communicator.exchange": {
"total": 1191.2882936089195,
"count": 232687,
"is_parallel": true,
"self": 1191.2882936089195
},
"steps_from_proto": {
"total": 98.38597405394762,
"count": 232687,
"is_parallel": true,
"self": 34.909734926912506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.476239127035115,
"count": 465374,
"is_parallel": true,
"self": 63.476239127035115
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 540.3543833960027,
"count": 232688,
"self": 7.600867555075183,
"children": {
"process_trajectory": {
"total": 165.48935803492645,
"count": 232688,
"self": 164.13738695392726,
"children": {
"RLTrainer._checkpoint": {
"total": 1.351971080999192,
"count": 10,
"self": 1.351971080999192
}
}
},
"_update_policy": {
"total": 367.2641578060011,
"count": 97,
"self": 300.90669256200067,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.35746524400042,
"count": 2910,
"self": 66.35746524400042
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0350004231440835e-06,
"count": 1,
"self": 1.0350004231440835e-06
},
"TrainerController._save_models": {
"total": 0.1232550810000248,
"count": 1,
"self": 0.00295050699969579,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12030457400032901,
"count": 1,
"self": 0.12030457400032901
}
}
}
}
}
}
}