ppo-Huggy / run_logs /timers.json
abbiekeats's picture
Huggy
bf0a631
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4045161008834839,
"min": 1.4045110940933228,
"max": 1.4275330305099487,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70277.7734375,
"min": 67935.3125,
"max": 75234.3359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 96.38910505836576,
"min": 84.20102214650767,
"max": 390.8046875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49544.0,
"min": 48860.0,
"max": 50094.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999944.0,
"min": 49581.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999944.0,
"min": 49581.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4489691257476807,
"min": 0.02115306816995144,
"max": 2.4624955654144287,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1258.7701416015625,
"min": 2.6864397525787354,
"max": 1410.11962890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8937121655226683,
"min": 1.8972659012464088,
"max": 3.960493353369472,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2001.3680530786514,
"min": 240.95276945829391,
"max": 2240.684354186058,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8937121655226683,
"min": 1.8972659012464088,
"max": 3.960493353369472,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2001.3680530786514,
"min": 240.95276945829391,
"max": 2240.684354186058,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018453275749602146,
"min": 0.01366751354119818,
"max": 0.019830748764151293,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05535982724880644,
"min": 0.02733502708239636,
"max": 0.05949224629245388,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05277219981782966,
"min": 0.02496958722670873,
"max": 0.058941075267891094,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15831659945348897,
"min": 0.04993917445341746,
"max": 0.17559698621431988,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.365548878183337e-06,
"min": 3.365548878183337e-06,
"max": 0.000295344376551875,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0096646634550011e-05,
"min": 1.0096646634550011e-05,
"max": 0.0008440327686557499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112181666666668,
"min": 0.10112181666666668,
"max": 0.19844812500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30336545000000004,
"min": 0.20740935,
"max": 0.58134425,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.597865166666671e-05,
"min": 6.597865166666671e-05,
"max": 0.0049225614375,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019793595500000014,
"min": 0.00019793595500000014,
"max": 0.014069078074999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678284459",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678286797"
},
"total": 2337.708342186,
"count": 1,
"self": 0.8096058639998773,
"children": {
"run_training.setup": {
"total": 0.18311401599999044,
"count": 1,
"self": 0.18311401599999044
},
"TrainerController.start_learning": {
"total": 2336.715622306,
"count": 1,
"self": 3.868485747066188,
"children": {
"TrainerController._reset_env": {
"total": 10.020093912999982,
"count": 1,
"self": 10.020093912999982
},
"TrainerController.advance": {
"total": 2322.6403090369336,
"count": 231882,
"self": 4.251054592949458,
"children": {
"env_step": {
"total": 1813.4026920779866,
"count": 231882,
"self": 1514.1082857669026,
"children": {
"SubprocessEnvManager._take_step": {
"total": 296.6156417959802,
"count": 231882,
"self": 15.531500644874768,
"children": {
"TorchPolicy.evaluate": {
"total": 281.0841411511054,
"count": 222984,
"self": 70.52141462514987,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.56272652595555,
"count": 222984,
"self": 210.56272652595555
}
}
}
}
},
"workers": {
"total": 2.6787645151039214,
"count": 231882,
"self": 0.0,
"children": {
"worker_root": {
"total": 2328.556424901,
"count": 231882,
"is_parallel": true,
"self": 1095.9175848310583,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010584480000943586,
"count": 1,
"is_parallel": true,
"self": 0.0003930760001367162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006653719999576424,
"count": 2,
"is_parallel": true,
"self": 0.0006653719999576424
}
}
},
"UnityEnvironment.step": {
"total": 0.032442165000020395,
"count": 1,
"is_parallel": true,
"self": 0.00032726000017646584,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002041189999317794,
"count": 1,
"is_parallel": true,
"self": 0.0002041189999317794
},
"communicator.exchange": {
"total": 0.028009286999918004,
"count": 1,
"is_parallel": true,
"self": 0.028009286999918004
},
"steps_from_proto": {
"total": 0.0039014989999941463,
"count": 1,
"is_parallel": true,
"self": 0.0002778820000912674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003623616999902879,
"count": 2,
"is_parallel": true,
"self": 0.003623616999902879
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1232.6388400699416,
"count": 231881,
"is_parallel": true,
"self": 37.73534189999009,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.00453629897015,
"count": 231881,
"is_parallel": true,
"self": 77.00453629897015
},
"communicator.exchange": {
"total": 1026.9068216520418,
"count": 231881,
"is_parallel": true,
"self": 1026.9068216520418
},
"steps_from_proto": {
"total": 90.99214021893943,
"count": 231881,
"is_parallel": true,
"self": 36.665840733058644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.32629948588078,
"count": 463762,
"is_parallel": true,
"self": 54.32629948588078
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 504.98656236599754,
"count": 231882,
"self": 6.100954242939565,
"children": {
"process_trajectory": {
"total": 159.3410060220599,
"count": 231882,
"self": 157.8918619670594,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4491440550004882,
"count": 10,
"self": 1.4491440550004882
}
}
},
"_update_policy": {
"total": 339.5446021009981,
"count": 97,
"self": 282.95650638898724,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.58809571201084,
"count": 2910,
"self": 56.58809571201084
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2059999789926223e-06,
"count": 1,
"self": 1.2059999789926223e-06
},
"TrainerController._save_models": {
"total": 0.1867324030004056,
"count": 1,
"self": 0.002873195000574924,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1838592079998307,
"count": 1,
"self": 0.1838592079998307
}
}
}
}
}
}
}