ppo-Huggy / run_logs /timers.json
TaniaSF's picture
Huggy
ae10096 verified
raw
history blame
No virus
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4056673049926758,
"min": 1.4056673049926758,
"max": 1.430708885192871,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71919.5625,
"min": 68464.0625,
"max": 78072.515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.25925925925925,
"min": 93.49527410207939,
"max": 368.77037037037036,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50184.0,
"min": 48804.0,
"max": 50350.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999992.0,
"min": 49866.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999992.0,
"min": 49866.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3469860553741455,
"min": 0.017784278839826584,
"max": 2.42008900642395,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1140.63525390625,
"min": 2.436446189880371,
"max": 1246.785400390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.656336315006876,
"min": 1.8529174482300335,
"max": 3.799275526995601,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1776.9794490933418,
"min": 253.84969040751457,
"max": 1884.6119043827057,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.656336315006876,
"min": 1.8529174482300335,
"max": 3.799275526995601,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1776.9794490933418,
"min": 253.84969040751457,
"max": 1884.6119043827057,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014654411519101511,
"min": 0.013525732476167226,
"max": 0.02084803735294069,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.029308823038203022,
"min": 0.02705146495233445,
"max": 0.05748982474130268,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05198853351175785,
"min": 0.02175226714462042,
"max": 0.06205614792803923,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1039770670235157,
"min": 0.04350453428924084,
"max": 0.18616844378411768,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.6104984631999965e-06,
"min": 4.6104984631999965e-06,
"max": 0.00029530672656442506,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.220996926399993e-06,
"min": 9.220996926399993e-06,
"max": 0.0008442487685837501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10153680000000001,
"min": 0.10153680000000001,
"max": 0.198435575,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20307360000000002,
"min": 0.20307360000000002,
"max": 0.5814162500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.668631999999998e-05,
"min": 8.668631999999998e-05,
"max": 0.004921935192500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017337263999999997,
"min": 0.00017337263999999997,
"max": 0.014072670875000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714073629",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714076209"
},
"total": 2579.9167474829997,
"count": 1,
"self": 0.4895115419994909,
"children": {
"run_training.setup": {
"total": 0.08846238500007075,
"count": 1,
"self": 0.08846238500007075
},
"TrainerController.start_learning": {
"total": 2579.338773556,
"count": 1,
"self": 4.604408637012966,
"children": {
"TrainerController._reset_env": {
"total": 2.9654170379999414,
"count": 1,
"self": 2.9654170379999414
},
"TrainerController.advance": {
"total": 2571.649080812987,
"count": 231069,
"self": 4.878382835135653,
"children": {
"env_step": {
"total": 2090.1446619779026,
"count": 231069,
"self": 1731.525330326031,
"children": {
"SubprocessEnvManager._take_step": {
"total": 355.39671416390456,
"count": 231069,
"self": 19.075466327900358,
"children": {
"TorchPolicy.evaluate": {
"total": 336.3212478360042,
"count": 223019,
"self": 336.3212478360042
}
}
},
"workers": {
"total": 3.2226174879671134,
"count": 231069,
"self": 0.0,
"children": {
"worker_root": {
"total": 2571.5826449540177,
"count": 231069,
"is_parallel": true,
"self": 1178.1025901690834,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009767960000317544,
"count": 1,
"is_parallel": true,
"self": 0.0002775809999775447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006992150000542097,
"count": 2,
"is_parallel": true,
"self": 0.0006992150000542097
}
}
},
"UnityEnvironment.step": {
"total": 0.03180550900003709,
"count": 1,
"is_parallel": true,
"self": 0.00042142899997088534,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021636100007071946,
"count": 1,
"is_parallel": true,
"self": 0.00021636100007071946
},
"communicator.exchange": {
"total": 0.030320886000026803,
"count": 1,
"is_parallel": true,
"self": 0.030320886000026803
},
"steps_from_proto": {
"total": 0.000846832999968683,
"count": 1,
"is_parallel": true,
"self": 0.0002417439999362614,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006050890000324216,
"count": 2,
"is_parallel": true,
"self": 0.0006050890000324216
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1393.4800547849343,
"count": 231068,
"is_parallel": true,
"self": 41.583538315890564,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 91.62019090607225,
"count": 231068,
"is_parallel": true,
"self": 91.62019090607225
},
"communicator.exchange": {
"total": 1160.9794712879968,
"count": 231068,
"is_parallel": true,
"self": 1160.9794712879968
},
"steps_from_proto": {
"total": 99.29685427497475,
"count": 231068,
"is_parallel": true,
"self": 37.620524219094364,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.676330055880385,
"count": 462136,
"is_parallel": true,
"self": 61.676330055880385
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 476.62603599994884,
"count": 231069,
"self": 7.244574380962604,
"children": {
"process_trajectory": {
"total": 159.65623101998688,
"count": 231069,
"self": 158.19028733898688,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4659436809999988,
"count": 10,
"self": 1.4659436809999988
}
}
},
"_update_policy": {
"total": 309.72523059899936,
"count": 96,
"self": 247.12286658401467,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.60236401498469,
"count": 2880,
"self": 62.60236401498469
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.990000424091704e-07,
"count": 1,
"self": 8.990000424091704e-07
},
"TrainerController._save_models": {
"total": 0.11986616900003355,
"count": 1,
"self": 0.0019689199998538243,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11789724900017973,
"count": 1,
"self": 0.11789724900017973
}
}
}
}
}
}
}