ppo-Huggy / run_logs /timers.json
toastedshibe's picture
Huggy
edc5926
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4028329849243164,
"min": 1.4028329849243164,
"max": 1.4264872074127197,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71900.8046875,
"min": 68623.765625,
"max": 76648.5390625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 95.2375478927203,
"min": 87.01241134751773,
"max": 387.8759689922481,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49714.0,
"min": 49075.0,
"max": 50036.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999893.0,
"min": 49542.0,
"max": 1999893.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999893.0,
"min": 49542.0,
"max": 1999893.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.419992685317993,
"min": 0.09700135886669159,
"max": 2.4526381492614746,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1263.2362060546875,
"min": 12.416173934936523,
"max": 1372.742919921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6725059867590324,
"min": 1.856389137916267,
"max": 3.964738114953041,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1917.0481250882149,
"min": 237.61780965328217,
"max": 2165.292198419571,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6725059867590324,
"min": 1.856389137916267,
"max": 3.964738114953041,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1917.0481250882149,
"min": 237.61780965328217,
"max": 2165.292198419571,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017017919130416382,
"min": 0.014142400207758025,
"max": 0.019708331471095637,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05105375739124914,
"min": 0.02828480041551605,
"max": 0.05625770552627121,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053058871668246056,
"min": 0.022810648505886398,
"max": 0.05985470432788134,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15917661500473818,
"min": 0.045621297011772796,
"max": 0.16152794770896434,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.304698898466658e-06,
"min": 3.304698898466658e-06,
"max": 0.0002953467015511001,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.914096695399973e-06,
"min": 9.914096695399973e-06,
"max": 0.00084414526861825,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110153333333334,
"min": 0.10110153333333334,
"max": 0.19844890000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30330460000000004,
"min": 0.20736615000000008,
"max": 0.5813817499999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.496651333333317e-05,
"min": 6.496651333333317e-05,
"max": 0.00492260011,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019489953999999953,
"min": 0.00019489953999999953,
"max": 0.014070949325,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671474108",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671476392"
},
"total": 2283.247820782,
"count": 1,
"self": 0.3923421409999719,
"children": {
"run_training.setup": {
"total": 0.1086723629999824,
"count": 1,
"self": 0.1086723629999824
},
"TrainerController.start_learning": {
"total": 2282.746806278,
"count": 1,
"self": 3.853660931943523,
"children": {
"TrainerController._reset_env": {
"total": 7.204500471000074,
"count": 1,
"self": 7.204500471000074
},
"TrainerController.advance": {
"total": 2271.575449381056,
"count": 232464,
"self": 4.313293792014065,
"children": {
"env_step": {
"total": 1799.4380516160218,
"count": 232464,
"self": 1512.9165002909676,
"children": {
"SubprocessEnvManager._take_step": {
"total": 283.8823295441067,
"count": 232464,
"self": 14.755949896096922,
"children": {
"TorchPolicy.evaluate": {
"total": 269.1263796480098,
"count": 223050,
"self": 67.21825420512141,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.90812544288838,
"count": 223050,
"self": 201.90812544288838
}
}
}
}
},
"workers": {
"total": 2.639221780947423,
"count": 232464,
"self": 0.0,
"children": {
"worker_root": {
"total": 2274.497870335014,
"count": 232464,
"is_parallel": true,
"self": 1032.1704401559846,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001831055000025117,
"count": 1,
"is_parallel": true,
"self": 0.00035354000010556774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014775149999195492,
"count": 2,
"is_parallel": true,
"self": 0.0014775149999195492
}
}
},
"UnityEnvironment.step": {
"total": 0.03245473300000867,
"count": 1,
"is_parallel": true,
"self": 0.00030056799994326866,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019019100000150502,
"count": 1,
"is_parallel": true,
"self": 0.00019019100000150502
},
"communicator.exchange": {
"total": 0.03129011700002593,
"count": 1,
"is_parallel": true,
"self": 0.03129011700002593
},
"steps_from_proto": {
"total": 0.0006738570000379696,
"count": 1,
"is_parallel": true,
"self": 0.00021612300020024122,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045773399983772833,
"count": 2,
"is_parallel": true,
"self": 0.00045773399983772833
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1242.3274301790293,
"count": 232463,
"is_parallel": true,
"self": 35.41751764995388,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.82190297705995,
"count": 232463,
"is_parallel": true,
"self": 81.82190297705995
},
"communicator.exchange": {
"total": 1027.591014766117,
"count": 232463,
"is_parallel": true,
"self": 1027.591014766117
},
"steps_from_proto": {
"total": 97.49699478589855,
"count": 232463,
"is_parallel": true,
"self": 42.721771534002414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.775223251896136,
"count": 464926,
"is_parallel": true,
"self": 54.775223251896136
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 467.82410397302,
"count": 232464,
"self": 6.206529146977459,
"children": {
"process_trajectory": {
"total": 150.8216870460409,
"count": 232464,
"self": 149.58797789604102,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2337091499998678,
"count": 10,
"self": 1.2337091499998678
}
}
},
"_update_policy": {
"total": 310.79588778000164,
"count": 97,
"self": 258.18464372100254,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.61124405899909,
"count": 2910,
"self": 52.61124405899909
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.650000952708069e-07,
"count": 1,
"self": 7.650000952708069e-07
},
"TrainerController._save_models": {
"total": 0.11319472900004257,
"count": 1,
"self": 0.005926674000420462,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10726805499962211,
"count": 1,
"self": 0.10726805499962211
}
}
}
}
}
}
}