ppo-Huggy / run_logs /timers.json
erud1t3's picture
Huggy
56769b3
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4025731086730957,
"min": 1.4025731086730957,
"max": 1.4271453619003296,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71360.1171875,
"min": 68919.8828125,
"max": 76578.671875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 95.5,
"min": 95.5,
"max": 402.9024390243902,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49469.0,
"min": 48910.0,
"max": 50259.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999978.0,
"min": 49758.0,
"max": 1999978.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999978.0,
"min": 49758.0,
"max": 1999978.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3565216064453125,
"min": -0.028351930901408195,
"max": 2.4237518310546875,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1220.67822265625,
"min": -3.572343349456787,
"max": 1220.67822265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6452895353429566,
"min": 1.858753201034334,
"max": 3.8419011288979013,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1888.2599793076515,
"min": 234.20290333032608,
"max": 1918.1212958097458,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6452895353429566,
"min": 1.858753201034334,
"max": 3.8419011288979013,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1888.2599793076515,
"min": 234.20290333032608,
"max": 1918.1212958097458,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016770521653719093,
"min": 0.013966655177622064,
"max": 0.019496173627946214,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.033541043307438186,
"min": 0.02793331035524413,
"max": 0.056049939427369584,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05617246019343535,
"min": 0.022097081566850342,
"max": 0.06787428415069978,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1123449203868707,
"min": 0.044194163133700684,
"max": 0.19016470598677795,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.5372984876000075e-06,
"min": 4.5372984876000075e-06,
"max": 0.0002953549515483501,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.074596975200015e-06,
"min": 9.074596975200015e-06,
"max": 0.0008439945186684999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10151239999999997,
"min": 0.10151239999999997,
"max": 0.19845164999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20302479999999995,
"min": 0.20302479999999995,
"max": 0.5813315000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.546876000000014e-05,
"min": 8.546876000000014e-05,
"max": 0.004922737335000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017093752000000027,
"min": 0.00017093752000000027,
"max": 0.014068441849999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676259968",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676262581"
},
"total": 2612.962377013,
"count": 1,
"self": 0.4382577610003864,
"children": {
"run_training.setup": {
"total": 0.1108448640000006,
"count": 1,
"self": 0.1108448640000006
},
"TrainerController.start_learning": {
"total": 2612.413274388,
"count": 1,
"self": 4.832952297999782,
"children": {
"TrainerController._reset_env": {
"total": 11.01579237300001,
"count": 1,
"self": 11.01579237300001
},
"TrainerController.advance": {
"total": 2596.4372040420003,
"count": 231480,
"self": 5.141793480137039,
"children": {
"env_step": {
"total": 2020.2805748399421,
"count": 231480,
"self": 1682.9398346580372,
"children": {
"SubprocessEnvManager._take_step": {
"total": 334.24042898998385,
"count": 231480,
"self": 17.41542294392849,
"children": {
"TorchPolicy.evaluate": {
"total": 316.82500604605536,
"count": 222974,
"self": 78.78765726311474,
"children": {
"TorchPolicy.sample_actions": {
"total": 238.03734878294063,
"count": 222974,
"self": 238.03734878294063
}
}
}
}
},
"workers": {
"total": 3.1003111919211506,
"count": 231480,
"self": 0.0,
"children": {
"worker_root": {
"total": 2602.770834867939,
"count": 231480,
"is_parallel": true,
"self": 1239.5961724479835,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020750830000224596,
"count": 1,
"is_parallel": true,
"self": 0.00039835300009372077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016767299999287388,
"count": 2,
"is_parallel": true,
"self": 0.0016767299999287388
}
}
},
"UnityEnvironment.step": {
"total": 0.03195866300001171,
"count": 1,
"is_parallel": true,
"self": 0.00037321200005635546,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002450060000001031,
"count": 1,
"is_parallel": true,
"self": 0.0002450060000001031
},
"communicator.exchange": {
"total": 0.030573743999980252,
"count": 1,
"is_parallel": true,
"self": 0.030573743999980252
},
"steps_from_proto": {
"total": 0.0007667009999750007,
"count": 1,
"is_parallel": true,
"self": 0.00028614599995080425,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004805550000241965,
"count": 2,
"is_parallel": true,
"self": 0.0004805550000241965
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1363.1746624199554,
"count": 231479,
"is_parallel": true,
"self": 40.727294200991764,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.96280683201098,
"count": 231479,
"is_parallel": true,
"self": 85.96280683201098
},
"communicator.exchange": {
"total": 1135.815203051966,
"count": 231479,
"is_parallel": true,
"self": 1135.815203051966
},
"steps_from_proto": {
"total": 100.66935833498673,
"count": 231479,
"is_parallel": true,
"self": 43.38901172100174,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.28034661398499,
"count": 462958,
"is_parallel": true,
"self": 57.28034661398499
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 571.0148357219211,
"count": 231480,
"self": 7.831638918908311,
"children": {
"process_trajectory": {
"total": 173.88444282701306,
"count": 231480,
"self": 172.64780977101287,
"children": {
"RLTrainer._checkpoint": {
"total": 1.236633056000187,
"count": 10,
"self": 1.236633056000187
}
}
},
"_update_policy": {
"total": 389.2987539759997,
"count": 96,
"self": 329.46862708199774,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.83012689400198,
"count": 2880,
"self": 59.83012689400198
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.139998837781604e-07,
"count": 1,
"self": 9.139998837781604e-07
},
"TrainerController._save_models": {
"total": 0.12732476100018175,
"count": 1,
"self": 0.0022214489999896614,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1251033120001921,
"count": 1,
"self": 0.1251033120001921
}
}
}
}
}
}
}