ppo-Huggy / run_logs /timers.json
abhinavsharma101's picture
Huggy
5cf5ab2
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407251000404358,
"min": 1.407251000404358,
"max": 1.4305286407470703,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72305.9609375,
"min": 68611.65625,
"max": 77472.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.8256880733945,
"min": 79.94918032786886,
"max": 400.1904761904762,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50045.0,
"min": 48769.0,
"max": 50424.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999841.0,
"min": 49800.0,
"max": 1999841.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999841.0,
"min": 49800.0,
"max": 1999841.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3642807006835938,
"min": 0.11072560399770737,
"max": 2.448641061782837,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1288.532958984375,
"min": 13.840700149536133,
"max": 1490.986572265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.696187349525067,
"min": 1.7543857469558717,
"max": 3.983913949732081,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2014.4221054911613,
"min": 219.29821836948395,
"max": 2384.663511455059,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.696187349525067,
"min": 1.7543857469558717,
"max": 3.983913949732081,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2014.4221054911613,
"min": 219.29821836948395,
"max": 2384.663511455059,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015936354962023974,
"min": 0.014414606641124313,
"max": 0.020111762120440187,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04780906488607192,
"min": 0.028829213282248627,
"max": 0.060335286361320566,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05303012099530962,
"min": 0.022101443167775868,
"max": 0.06046721662084261,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15909036298592885,
"min": 0.044202886335551736,
"max": 0.18140164986252783,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.565948811383326e-06,
"min": 3.565948811383326e-06,
"max": 0.00029531475156175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0697846434149978e-05,
"min": 1.0697846434149978e-05,
"max": 0.0008440672686442499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118861666666668,
"min": 0.10118861666666668,
"max": 0.19843825000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30356585,
"min": 0.2075459,
"max": 0.5813557500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.931197166666656e-05,
"min": 6.931197166666656e-05,
"max": 0.004922068675,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020793591499999967,
"min": 0.00020793591499999967,
"max": 0.014069651924999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687784330",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687786818"
},
"total": 2488.0921037180005,
"count": 1,
"self": 0.436463671000638,
"children": {
"run_training.setup": {
"total": 0.043681926999852294,
"count": 1,
"self": 0.043681926999852294
},
"TrainerController.start_learning": {
"total": 2487.61195812,
"count": 1,
"self": 4.697603756091667,
"children": {
"TrainerController._reset_env": {
"total": 5.479949411999996,
"count": 1,
"self": 5.479949411999996
},
"TrainerController.advance": {
"total": 2477.3142439989083,
"count": 232325,
"self": 4.774333817817933,
"children": {
"env_step": {
"total": 1936.0315938759093,
"count": 232325,
"self": 1626.6645925198964,
"children": {
"SubprocessEnvManager._take_step": {
"total": 306.26896617194825,
"count": 232325,
"self": 18.01842431495993,
"children": {
"TorchPolicy.evaluate": {
"total": 288.2505418569883,
"count": 223036,
"self": 288.2505418569883
}
}
},
"workers": {
"total": 3.0980351840646563,
"count": 232325,
"self": 0.0,
"children": {
"worker_root": {
"total": 2479.345378792016,
"count": 232325,
"is_parallel": true,
"self": 1153.0819481188669,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009179039998343796,
"count": 1,
"is_parallel": true,
"self": 0.00023667599975851772,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006812280000758619,
"count": 2,
"is_parallel": true,
"self": 0.0006812280000758619
}
}
},
"UnityEnvironment.step": {
"total": 0.0688698530000238,
"count": 1,
"is_parallel": true,
"self": 0.0003817849999450118,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021789400011584803,
"count": 1,
"is_parallel": true,
"self": 0.00021789400011584803
},
"communicator.exchange": {
"total": 0.06751903700001094,
"count": 1,
"is_parallel": true,
"self": 0.06751903700001094
},
"steps_from_proto": {
"total": 0.0007511369999519957,
"count": 1,
"is_parallel": true,
"self": 0.00020835400005125848,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005427829999007372,
"count": 2,
"is_parallel": true,
"self": 0.0005427829999007372
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1326.2634306731493,
"count": 232324,
"is_parallel": true,
"self": 41.242571143272244,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.98775594295853,
"count": 232324,
"is_parallel": true,
"self": 78.98775594295853
},
"communicator.exchange": {
"total": 1107.1145203089327,
"count": 232324,
"is_parallel": true,
"self": 1107.1145203089327
},
"steps_from_proto": {
"total": 98.91858327798582,
"count": 232324,
"is_parallel": true,
"self": 34.13974480972138,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.77883846826444,
"count": 464648,
"is_parallel": true,
"self": 64.77883846826444
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 536.5083163051811,
"count": 232325,
"self": 7.310260734245503,
"children": {
"process_trajectory": {
"total": 138.7798874229361,
"count": 232325,
"self": 137.35626352593704,
"children": {
"RLTrainer._checkpoint": {
"total": 1.423623896999061,
"count": 10,
"self": 1.423623896999061
}
}
},
"_update_policy": {
"total": 390.4181681479995,
"count": 97,
"self": 326.96841135199907,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.44975679600043,
"count": 2910,
"self": 63.44975679600043
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2479999895731453e-06,
"count": 1,
"self": 1.2479999895731453e-06
},
"TrainerController._save_models": {
"total": 0.12015970499987816,
"count": 1,
"self": 0.001999887999772909,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11815981700010525,
"count": 1,
"self": 0.11815981700010525
}
}
}
}
}
}
}