ppo-Huggy / run_logs /timers.json
hungchiayu's picture
Huggy
3c3a2c8
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4126642942428589,
"min": 1.4126642942428589,
"max": 1.4274890422821045,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70068.1484375,
"min": 69117.3828125,
"max": 76809.0078125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.7911877394636,
"min": 84.88164665523156,
"max": 379.67424242424244,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49481.0,
"min": 48879.0,
"max": 50151.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999862.0,
"min": 49897.0,
"max": 1999862.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999862.0,
"min": 49897.0,
"max": 1999862.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3790035247802734,
"min": 0.14388029277324677,
"max": 2.46075177192688,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1241.83984375,
"min": 18.848318099975586,
"max": 1421.32470703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.658866833795533,
"min": 1.6956584894930133,
"max": 4.03121385169479,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1909.9284872412682,
"min": 222.13126212358475,
"max": 2236.7772645950317,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.658866833795533,
"min": 1.6956584894930133,
"max": 4.03121385169479,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1909.9284872412682,
"min": 222.13126212358475,
"max": 2236.7772645950317,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0169812442594169,
"min": 0.014196875062993462,
"max": 0.019938473453900464,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0339624885188338,
"min": 0.029903898921717582,
"max": 0.0582119286050632,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055944746111830077,
"min": 0.02277940365796288,
"max": 0.058911373031636086,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11188949222366015,
"min": 0.04555880731592576,
"max": 0.1725665317227443,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.609898463400013e-06,
"min": 4.609898463400013e-06,
"max": 0.0002953647015451,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.219796926800025e-06,
"min": 9.219796926800025e-06,
"max": 0.0008441625186125,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1015366,
"min": 0.1015366,
"max": 0.19845489999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2030732,
"min": 0.2030732,
"max": 0.5813875000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.667634000000026e-05,
"min": 8.667634000000026e-05,
"max": 0.00492289951,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017335268000000053,
"min": 0.00017335268000000053,
"max": 0.01407123625,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671869319",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671871573"
},
"total": 2254.4680541870002,
"count": 1,
"self": 0.3856769900003201,
"children": {
"run_training.setup": {
"total": 0.10814501200002269,
"count": 1,
"self": 0.10814501200002269
},
"TrainerController.start_learning": {
"total": 2253.974232185,
"count": 1,
"self": 3.945529085950966,
"children": {
"TrainerController._reset_env": {
"total": 8.169778937999979,
"count": 1,
"self": 8.169778937999979
},
"TrainerController.advance": {
"total": 2241.7488430230487,
"count": 232220,
"self": 4.229061316960724,
"children": {
"env_step": {
"total": 1774.3503586540323,
"count": 232220,
"self": 1488.5402578970156,
"children": {
"SubprocessEnvManager._take_step": {
"total": 283.14592610299565,
"count": 232220,
"self": 14.539361759928909,
"children": {
"TorchPolicy.evaluate": {
"total": 268.60656434306674,
"count": 222932,
"self": 66.82627315712125,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.7802911859455,
"count": 222932,
"self": 201.7802911859455
}
}
}
}
},
"workers": {
"total": 2.66417465402111,
"count": 232220,
"self": 0.0,
"children": {
"worker_root": {
"total": 2246.0525973460885,
"count": 232220,
"is_parallel": true,
"self": 1025.2026065860614,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022606910000035896,
"count": 1,
"is_parallel": true,
"self": 0.0003299700000525263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019307209999510633,
"count": 2,
"is_parallel": true,
"self": 0.0019307209999510633
}
}
},
"UnityEnvironment.step": {
"total": 0.027809439000009206,
"count": 1,
"is_parallel": true,
"self": 0.0002891829999498441,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001752740000142694,
"count": 1,
"is_parallel": true,
"self": 0.0001752740000142694
},
"communicator.exchange": {
"total": 0.0262837030000469,
"count": 1,
"is_parallel": true,
"self": 0.0262837030000469
},
"steps_from_proto": {
"total": 0.0010612789999981942,
"count": 1,
"is_parallel": true,
"self": 0.0003145759999370057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007467030000611885,
"count": 2,
"is_parallel": true,
"self": 0.0007467030000611885
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1220.8499907600271,
"count": 232219,
"is_parallel": true,
"self": 34.39107141202226,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.47017050599169,
"count": 232219,
"is_parallel": true,
"self": 79.47017050599169
},
"communicator.exchange": {
"total": 1012.4904460949704,
"count": 232219,
"is_parallel": true,
"self": 1012.4904460949704
},
"steps_from_proto": {
"total": 94.49830274704283,
"count": 232219,
"is_parallel": true,
"self": 40.720452004984736,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.77785074205809,
"count": 464438,
"is_parallel": true,
"self": 53.77785074205809
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 463.1694230520558,
"count": 232220,
"self": 6.148118653113158,
"children": {
"process_trajectory": {
"total": 148.76602729694224,
"count": 232220,
"self": 147.5921694349426,
"children": {
"RLTrainer._checkpoint": {
"total": 1.173857861999636,
"count": 10,
"self": 1.173857861999636
}
}
},
"_update_policy": {
"total": 308.2552771020004,
"count": 96,
"self": 256.16774864399633,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.087528458004044,
"count": 2880,
"self": 52.087528458004044
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.850001904647797e-07,
"count": 1,
"self": 8.850001904647797e-07
},
"TrainerController._save_models": {
"total": 0.11008025299997826,
"count": 1,
"self": 0.0018424529998810613,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1082378000000972,
"count": 1,
"self": 0.1082378000000972
}
}
}
}
}
}
}