ppo-Huggy / run_logs /timers.json
shru123's picture
Huggy
30cfc79
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4025816917419434,
"min": 1.4025816917419434,
"max": 1.43050217628479,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71353.5390625,
"min": 69140.921875,
"max": 75337.28125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.01470588235294,
"min": 77.77637795275591,
"max": 391.3125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49512.0,
"min": 48735.0,
"max": 50092.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999997.0,
"min": 49855.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999997.0,
"min": 49855.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4658141136169434,
"min": 0.06485355645418167,
"max": 2.5078182220458984,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1341.40283203125,
"min": 8.236401557922363,
"max": 1522.7921142578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8416563672397066,
"min": 1.6902434131291908,
"max": 4.021965499456764,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2089.8610637784004,
"min": 214.66091346740723,
"max": 2473.5079205036163,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8416563672397066,
"min": 1.6902434131291908,
"max": 4.021965499456764,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2089.8610637784004,
"min": 214.66091346740723,
"max": 2473.5079205036163,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01568843168024614,
"min": 0.013536816570558585,
"max": 0.019978830625768752,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04706529504073842,
"min": 0.02707363314111717,
"max": 0.05458764061331749,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05021466550727685,
"min": 0.023670885546339882,
"max": 0.06368641642232736,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15064399652183055,
"min": 0.04833814638356368,
"max": 0.17353701790173848,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.579748806783334e-06,
"min": 3.579748806783334e-06,
"max": 0.000295368526543825,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0739246420350001e-05,
"min": 1.0739246420350001e-05,
"max": 0.0008439709686763498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10119321666666665,
"min": 0.10119321666666665,
"max": 0.198456175,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30357965,
"min": 0.20757750000000003,
"max": 0.58132365,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.954151166666668e-05,
"min": 6.954151166666668e-05,
"max": 0.0049229631324999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020862453500000003,
"min": 0.00020862453500000003,
"max": 0.014068050135000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679078309",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679081152"
},
"total": 2842.90650145,
"count": 1,
"self": 0.48667898799976683,
"children": {
"run_training.setup": {
"total": 0.20238864500004183,
"count": 1,
"self": 0.20238864500004183
},
"TrainerController.start_learning": {
"total": 2842.2174338170003,
"count": 1,
"self": 6.042242260892635,
"children": {
"TrainerController._reset_env": {
"total": 8.799342565000018,
"count": 1,
"self": 8.799342565000018
},
"TrainerController.advance": {
"total": 2827.2360904631078,
"count": 233006,
"self": 6.71282763605268,
"children": {
"env_step": {
"total": 2225.6055971720857,
"count": 233006,
"self": 1869.7266466861836,
"children": {
"SubprocessEnvManager._take_step": {
"total": 352.05855485595964,
"count": 233006,
"self": 22.012712248911953,
"children": {
"TorchPolicy.evaluate": {
"total": 330.0458426070477,
"count": 223098,
"self": 330.0458426070477
}
}
},
"workers": {
"total": 3.820395629942425,
"count": 233006,
"self": 0.0,
"children": {
"worker_root": {
"total": 2831.5196850159996,
"count": 233006,
"is_parallel": true,
"self": 1326.870986637004,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012588779999873623,
"count": 1,
"is_parallel": true,
"self": 0.0003863189999151473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000872559000072215,
"count": 2,
"is_parallel": true,
"self": 0.000872559000072215
}
}
},
"UnityEnvironment.step": {
"total": 0.0331904189999932,
"count": 1,
"is_parallel": true,
"self": 0.00034767600004670385,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023169700000380544,
"count": 1,
"is_parallel": true,
"self": 0.00023169700000380544
},
"communicator.exchange": {
"total": 0.03184348899998213,
"count": 1,
"is_parallel": true,
"self": 0.03184348899998213
},
"steps_from_proto": {
"total": 0.0007675569999605614,
"count": 1,
"is_parallel": true,
"self": 0.00026046899995435524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005070880000062061,
"count": 2,
"is_parallel": true,
"self": 0.0005070880000062061
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1504.6486983789955,
"count": 233005,
"is_parallel": true,
"self": 44.71176322409906,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 91.26975914195214,
"count": 233005,
"is_parallel": true,
"self": 91.26975914195214
},
"communicator.exchange": {
"total": 1262.6458345830324,
"count": 233005,
"is_parallel": true,
"self": 1262.6458345830324
},
"steps_from_proto": {
"total": 106.02134142991184,
"count": 233005,
"is_parallel": true,
"self": 41.23617389093374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.7851675389781,
"count": 466010,
"is_parallel": true,
"self": 64.7851675389781
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 594.9176656549694,
"count": 233006,
"self": 9.314105746011364,
"children": {
"process_trajectory": {
"total": 167.6560645339573,
"count": 233006,
"self": 166.3322801999576,
"children": {
"RLTrainer._checkpoint": {
"total": 1.323784333999697,
"count": 10,
"self": 1.323784333999697
}
}
},
"_update_policy": {
"total": 417.9474953750007,
"count": 97,
"self": 353.8936465609996,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.05384881400107,
"count": 2910,
"self": 64.05384881400107
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6250000953732524e-06,
"count": 1,
"self": 1.6250000953732524e-06
},
"TrainerController._save_models": {
"total": 0.13975690299957932,
"count": 1,
"self": 0.002255124999464897,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13750177800011443,
"count": 1,
"self": 0.13750177800011443
}
}
}
}
}
}
}