ppo-huggy / run_logs /timers.json
hieule's picture
Huggy
d2b37b1
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4078136682510376,
"min": 1.4078136682510376,
"max": 1.4268040657043457,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69391.1328125,
"min": 68889.9453125,
"max": 77860.234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.17977528089888,
"min": 86.33507853403141,
"max": 376.85714285714283,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49224.0,
"min": 48974.0,
"max": 50122.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999944.0,
"min": 49813.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999944.0,
"min": 49813.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.391021251678467,
"min": 0.14932088553905487,
"max": 2.459251642227173,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1276.8052978515625,
"min": 19.710357666015625,
"max": 1406.87353515625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.575771658161606,
"min": 1.8584200887743272,
"max": 3.9355171180433697,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1909.4620654582977,
"min": 245.31145171821117,
"max": 2224.3485311865807,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.575771658161606,
"min": 1.8584200887743272,
"max": 3.9355171180433697,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1909.4620654582977,
"min": 245.31145171821117,
"max": 2224.3485311865807,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01899833439221968,
"min": 0.014154496433911845,
"max": 0.02008026219991734,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05699500317665904,
"min": 0.02830899286782369,
"max": 0.05699500317665904,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0549429169545571,
"min": 0.021842945832759142,
"max": 0.06162155705193678,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1648287508636713,
"min": 0.043685891665518284,
"max": 0.17293203473091126,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.394598868499999e-06,
"min": 3.394598868499999e-06,
"max": 0.00029535262654912506,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0183796605499998e-05,
"min": 1.0183796605499998e-05,
"max": 0.0008440708686430501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011315,
"min": 0.1011315,
"max": 0.19845087499999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033945,
"min": 0.20743264999999997,
"max": 0.5813569499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.646185e-05,
"min": 6.646185e-05,
"max": 0.004922698662500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019938555,
"min": 0.00019938555,
"max": 0.014069711805,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674034854",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674037051"
},
"total": 2196.268415697,
"count": 1,
"self": 0.3870397469991076,
"children": {
"run_training.setup": {
"total": 0.09862983000016357,
"count": 1,
"self": 0.09862983000016357
},
"TrainerController.start_learning": {
"total": 2195.7827461200004,
"count": 1,
"self": 3.840411798972127,
"children": {
"TrainerController._reset_env": {
"total": 9.921792130000085,
"count": 1,
"self": 9.921792130000085
},
"TrainerController.advance": {
"total": 2181.8739903790283,
"count": 231950,
"self": 4.147655221129753,
"children": {
"env_step": {
"total": 1734.2961435360173,
"count": 231950,
"self": 1452.3412505761219,
"children": {
"SubprocessEnvManager._take_step": {
"total": 279.3805550538607,
"count": 231950,
"self": 14.523292083759316,
"children": {
"TorchPolicy.evaluate": {
"total": 264.8572629701014,
"count": 222880,
"self": 66.02558970604059,
"children": {
"TorchPolicy.sample_actions": {
"total": 198.83167326406078,
"count": 222880,
"self": 198.83167326406078
}
}
}
}
},
"workers": {
"total": 2.5743379060347706,
"count": 231950,
"self": 0.0,
"children": {
"worker_root": {
"total": 2187.7993283899864,
"count": 231950,
"is_parallel": true,
"self": 994.4800989929292,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001999688000069,
"count": 1,
"is_parallel": true,
"self": 0.0003155309998419398,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00168415700022706,
"count": 2,
"is_parallel": true,
"self": 0.00168415700022706
}
}
},
"UnityEnvironment.step": {
"total": 0.029407194999976127,
"count": 1,
"is_parallel": true,
"self": 0.00027588199986894324,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020062999988113006,
"count": 1,
"is_parallel": true,
"self": 0.00020062999988113006
},
"communicator.exchange": {
"total": 0.028247246000091764,
"count": 1,
"is_parallel": true,
"self": 0.028247246000091764
},
"steps_from_proto": {
"total": 0.0006834370001342904,
"count": 1,
"is_parallel": true,
"self": 0.00022417700006371888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045926000007057155,
"count": 2,
"is_parallel": true,
"self": 0.00045926000007057155
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1193.3192293970571,
"count": 231949,
"is_parallel": true,
"self": 33.87554863486048,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.62883001810428,
"count": 231949,
"is_parallel": true,
"self": 77.62883001810428
},
"communicator.exchange": {
"total": 989.6991030049835,
"count": 231949,
"is_parallel": true,
"self": 989.6991030049835
},
"steps_from_proto": {
"total": 92.11574773910888,
"count": 231949,
"is_parallel": true,
"self": 39.678345030095215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.43740270901367,
"count": 463898,
"is_parallel": true,
"self": 52.43740270901367
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 443.43019162188125,
"count": 231950,
"self": 5.844322373878185,
"children": {
"process_trajectory": {
"total": 144.6584765840057,
"count": 231950,
"self": 143.56948060700552,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0889959770001951,
"count": 10,
"self": 1.0889959770001951
}
}
},
"_update_policy": {
"total": 292.92739266399735,
"count": 97,
"self": 241.04878453000174,
"children": {
"TorchPPOOptimizer.update": {
"total": 51.87860813399561,
"count": 2910,
"self": 51.87860813399561
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.730000106676016e-07,
"count": 1,
"self": 7.730000106676016e-07
},
"TrainerController._save_models": {
"total": 0.14655103899985988,
"count": 1,
"self": 0.0019946539996453794,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1445563850002145,
"count": 1,
"self": 0.1445563850002145
}
}
}
}
}
}
}