ppo-Huggy / run_logs /timers.json
MarcusAGray's picture
Huggy
148ca2a
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4044312238693237,
"min": 1.4044312238693237,
"max": 1.4284498691558838,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70196.28125,
"min": 68428.40625,
"max": 77340.921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.14528301886793,
"min": 90.56146788990826,
"max": 453.27027027027026,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48837.0,
"min": 48837.0,
"max": 50313.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999994.0,
"min": 49797.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999994.0,
"min": 49797.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.343609571456909,
"min": 0.12583576142787933,
"max": 2.4490044116973877,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1242.113037109375,
"min": 13.841934204101562,
"max": 1288.413818359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7916850514007066,
"min": 1.7487663164734841,
"max": 3.921746356931163,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2009.5930772423744,
"min": 192.36429481208324,
"max": 2043.2298519611359,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7916850514007066,
"min": 1.7487663164734841,
"max": 3.921746356931163,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2009.5930772423744,
"min": 192.36429481208324,
"max": 2043.2298519611359,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01583832499955962,
"min": 0.014214647358555037,
"max": 0.021270070795920522,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04751497499867886,
"min": 0.028429294717110073,
"max": 0.06188409884731906,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05649776963724031,
"min": 0.02138824832315246,
"max": 0.06150973103940487,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16949330891172093,
"min": 0.04277649664630492,
"max": 0.1845291931182146,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2360489213499986e-06,
"min": 3.2360489213499986e-06,
"max": 0.00029533492655502494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.708146764049995e-06,
"min": 9.708146764049995e-06,
"max": 0.0008441851686049498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10107865000000003,
"min": 0.10107865000000003,
"max": 0.19844497500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032359500000001,
"min": 0.20731430000000006,
"max": 0.5813950499999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.382463499999997e-05,
"min": 6.382463499999997e-05,
"max": 0.0049224042525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019147390499999994,
"min": 0.00019147390499999994,
"max": 0.014071612995000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670364407",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670366702"
},
"total": 2295.070076139,
"count": 1,
"self": 0.3955309420002777,
"children": {
"run_training.setup": {
"total": 0.10853508899998587,
"count": 1,
"self": 0.10853508899998587
},
"TrainerController.start_learning": {
"total": 2294.566010108,
"count": 1,
"self": 4.1652070840541455,
"children": {
"TrainerController._reset_env": {
"total": 10.279425678999985,
"count": 1,
"self": 10.279425678999985
},
"TrainerController.advance": {
"total": 2279.998763702946,
"count": 231642,
"self": 4.68094251994853,
"children": {
"env_step": {
"total": 1805.95798379903,
"count": 231642,
"self": 1509.3128303429871,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.95327950597715,
"count": 231642,
"self": 15.175826899978915,
"children": {
"TorchPolicy.evaluate": {
"total": 278.77745260599823,
"count": 222932,
"self": 69.99240183198475,
"children": {
"TorchPolicy.sample_actions": {
"total": 208.78505077401348,
"count": 222932,
"self": 208.78505077401348
}
}
}
}
},
"workers": {
"total": 2.6918739500657693,
"count": 231642,
"self": 0.0,
"children": {
"worker_root": {
"total": 2286.000301800016,
"count": 231642,
"is_parallel": true,
"self": 1050.358989754986,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022490380000022014,
"count": 1,
"is_parallel": true,
"self": 0.0003670879999617682,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018819500000404332,
"count": 2,
"is_parallel": true,
"self": 0.0018819500000404332
}
}
},
"UnityEnvironment.step": {
"total": 0.02841593299996248,
"count": 1,
"is_parallel": true,
"self": 0.0002841159999888987,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020032399999081463,
"count": 1,
"is_parallel": true,
"self": 0.00020032399999081463
},
"communicator.exchange": {
"total": 0.0271831739999584,
"count": 1,
"is_parallel": true,
"self": 0.0271831739999584
},
"steps_from_proto": {
"total": 0.0007483190000243667,
"count": 1,
"is_parallel": true,
"self": 0.00025476400003299204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004935549999913746,
"count": 2,
"is_parallel": true,
"self": 0.0004935549999913746
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1235.6413120450302,
"count": 231641,
"is_parallel": true,
"self": 34.922929840804045,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.43563251410347,
"count": 231641,
"is_parallel": true,
"self": 80.43563251410347
},
"communicator.exchange": {
"total": 1023.907480302001,
"count": 231641,
"is_parallel": true,
"self": 1023.907480302001
},
"steps_from_proto": {
"total": 96.37526938812175,
"count": 231641,
"is_parallel": true,
"self": 41.926077487193595,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.449191900928156,
"count": 463282,
"is_parallel": true,
"self": 54.449191900928156
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 469.35983738396703,
"count": 231642,
"self": 6.5211160999652975,
"children": {
"process_trajectory": {
"total": 151.35894812900392,
"count": 231642,
"self": 150.84906962100342,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5098785080004973,
"count": 4,
"self": 0.5098785080004973
}
}
},
"_update_policy": {
"total": 311.4797731549978,
"count": 97,
"self": 257.3726636950005,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.10710945999733,
"count": 2910,
"self": 54.10710945999733
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.749999895168003e-07,
"count": 1,
"self": 7.749999895168003e-07
},
"TrainerController._save_models": {
"total": 0.12261286700004348,
"count": 1,
"self": 0.0020302859998082567,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12058258100023522,
"count": 1,
"self": 0.12058258100023522
}
}
}
}
}
}
}