ppo-Huggy / run_logs /timers.json
Morpheee's picture
Huggy
228c90a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4045268297195435,
"min": 1.4045268297195435,
"max": 1.4249088764190674,
"count": 38
},
"Huggy.Policy.Entropy.sum": {
"value": 70031.109375,
"min": 53182.12109375,
"max": 72798.96875,
"count": 38
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.35470941883767,
"min": 92.91214953271027,
"max": 275.8455284552846,
"count": 38
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49079.0,
"min": 33929.0,
"max": 50133.0,
"count": 38
},
"Huggy.Step.mean": {
"value": 1999509.0,
"min": 149855.0,
"max": 1999509.0,
"count": 38
},
"Huggy.Step.sum": {
"value": 1999509.0,
"min": 149855.0,
"max": 1999509.0,
"count": 38
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3690881729125977,
"min": 0.5866540670394897,
"max": 2.403860569000244,
"count": 38
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1182.175048828125,
"min": 71.57179260253906,
"max": 1232.798583984375,
"count": 38
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7612902604506346,
"min": 2.9149974369611895,
"max": 3.8429813464482625,
"count": 38
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1876.8838399648666,
"min": 355.62968730926514,
"max": 1953.9927376508713,
"count": 38
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7612902604506346,
"min": 2.9149974369611895,
"max": 3.8429813464482625,
"count": 38
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1876.8838399648666,
"min": 355.62968730926514,
"max": 1953.9927376508713,
"count": 38
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016687300073196333,
"min": 0.013491771740276211,
"max": 0.018985960454220834,
"count": 38
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.033374600146392666,
"min": 0.014965496338360633,
"max": 0.0569578813626625,
"count": 38
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05731986711422602,
"min": 0.022055043052468037,
"max": 0.05731986711422602,
"count": 38
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11463973422845204,
"min": 0.02264293413609266,
"max": 0.1577295444905758,
"count": 38
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2176739274750064e-06,
"min": 3.2176739274750064e-06,
"max": 0.00027952005682664995,
"count": 38
},
"Huggy.Policy.LearningRate.sum": {
"value": 6.435347854950013e-06,
"min": 6.435347854950013e-06,
"max": 0.0008199690266770001,
"count": 38
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101072525,
"min": 0.101072525,
"max": 0.19317335000000008,
"count": 38
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20214505,
"min": 0.19317335000000008,
"max": 0.573323,
"count": 38
},
"Huggy.Policy.Beta.mean": {
"value": 6.351899750000012e-05,
"min": 6.351899750000012e-05,
"max": 0.004659350165,
"count": 38
},
"Huggy.Policy.Beta.sum": {
"value": 0.00012703799500000024,
"min": 0.00012703799500000024,
"max": 0.013668817699999999,
"count": 38
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672076972",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --resume --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672079499"
},
"total": 2527.283682075,
"count": 1,
"self": 0.6338085719999071,
"children": {
"run_training.setup": {
"total": 0.18512747899990245,
"count": 1,
"self": 0.18512747899990245
},
"TrainerController.start_learning": {
"total": 2526.4647460240003,
"count": 1,
"self": 5.113497952964281,
"children": {
"TrainerController._reset_env": {
"total": 8.773607599000002,
"count": 1,
"self": 8.773607599000002
},
"TrainerController.advance": {
"total": 2512.406270193036,
"count": 217818,
"self": 5.585072284013222,
"children": {
"env_step": {
"total": 1758.8766437130407,
"count": 217818,
"self": 1420.256747821063,
"children": {
"SubprocessEnvManager._take_step": {
"total": 335.13158903899625,
"count": 217818,
"self": 17.9600434750771,
"children": {
"TorchPolicy.evaluate": {
"total": 317.17154556391915,
"count": 210000,
"self": 74.11501478692242,
"children": {
"TorchPolicy.sample_actions": {
"total": 243.05653077699674,
"count": 210000,
"self": 243.05653077699674
}
}
}
}
},
"workers": {
"total": 3.488306852981509,
"count": 217818,
"self": 0.0,
"children": {
"worker_root": {
"total": 2518.69709688307,
"count": 217818,
"is_parallel": true,
"self": 1405.401381260082,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011734910000313903,
"count": 1,
"is_parallel": true,
"self": 0.0005619989999559039,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006114920000754864,
"count": 2,
"is_parallel": true,
"self": 0.0006114920000754864
}
}
},
"UnityEnvironment.step": {
"total": 0.03823284199995669,
"count": 1,
"is_parallel": true,
"self": 0.00024440599986519373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001700270000810633,
"count": 1,
"is_parallel": true,
"self": 0.0001700270000810633
},
"communicator.exchange": {
"total": 0.037109502000021166,
"count": 1,
"is_parallel": true,
"self": 0.037109502000021166
},
"steps_from_proto": {
"total": 0.0007089069999892672,
"count": 1,
"is_parallel": true,
"self": 0.00022558600005595508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004833209999333121,
"count": 2,
"is_parallel": true,
"self": 0.0004833209999333121
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1113.2957156229882,
"count": 217817,
"is_parallel": true,
"self": 30.60987491018568,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 71.67871878698406,
"count": 217817,
"is_parallel": true,
"self": 71.67871878698406
},
"communicator.exchange": {
"total": 926.9839290028839,
"count": 217817,
"is_parallel": true,
"self": 926.9839290028839
},
"steps_from_proto": {
"total": 84.02319292293453,
"count": 217817,
"is_parallel": true,
"self": 35.09752118492247,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.925671738012056,
"count": 435634,
"is_parallel": true,
"self": 48.925671738012056
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 747.9445541959824,
"count": 217818,
"self": 7.780433181893272,
"children": {
"process_trajectory": {
"total": 211.97777985109087,
"count": 217818,
"self": 210.26702962209174,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7107502289991317,
"count": 10,
"self": 1.7107502289991317
}
}
},
"_update_policy": {
"total": 528.1863411629982,
"count": 91,
"self": 465.6885839999969,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.497757163001324,
"count": 2730,
"self": 62.497757163001324
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.261999841517536e-06,
"count": 1,
"self": 1.261999841517536e-06
},
"TrainerController._save_models": {
"total": 0.17136901699996088,
"count": 1,
"self": 0.005536529999972117,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16583248699998876,
"count": 1,
"self": 0.16583248699998876
}
}
}
}
}
}
}