ppo-Huggy / run_logs /timers.json
markafitzgerald1's picture
Huggy
c9d3966
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4027156829833984,
"min": 1.4027156829833984,
"max": 1.4250857830047607,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70103.5234375,
"min": 69485.7421875,
"max": 77130.703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 65.83044058744993,
"min": 65.83044058744993,
"max": 407.4390243902439,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49307.0,
"min": 49180.0,
"max": 50115.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999995.0,
"min": 49979.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999995.0,
"min": 49979.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5087125301361084,
"min": 0.09961482882499695,
"max": 2.5564522743225098,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1879.0257568359375,
"min": 12.153009414672852,
"max": 1889.21826171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.923292359299908,
"min": 1.8932996675616405,
"max": 4.042183299470881,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2938.545977115631,
"min": 230.98255944252014,
"max": 2966.7089733481407,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.923292359299908,
"min": 1.8932996675616405,
"max": 4.042183299470881,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2938.545977115631,
"min": 230.98255944252014,
"max": 2966.7089733481407,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01648955220524739,
"min": 0.013244139690262575,
"max": 0.019345522613730282,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049468656615742165,
"min": 0.02648827938052515,
"max": 0.05803656784119085,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0622500794629256,
"min": 0.021169030417998634,
"max": 0.0622500794629256,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1867502383887768,
"min": 0.04233806083599727,
"max": 0.1867502383887768,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.973248675616659e-06,
"min": 3.973248675616659e-06,
"max": 0.00029530102656632497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1919746026849975e-05,
"min": 1.1919746026849975e-05,
"max": 0.00084410176863275,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10132438333333334,
"min": 0.10132438333333334,
"max": 0.19843367500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30397315,
"min": 0.20777675000000007,
"max": 0.5813672500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.60867283333332e-05,
"min": 7.60867283333332e-05,
"max": 0.004921840382500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022826018499999957,
"min": 0.00022826018499999957,
"max": 0.014070225774999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672548239",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672550421"
},
"total": 2182.516576099,
"count": 1,
"self": 0.3869281489996865,
"children": {
"run_training.setup": {
"total": 0.10817652200000794,
"count": 1,
"self": 0.10817652200000794
},
"TrainerController.start_learning": {
"total": 2182.021471428,
"count": 1,
"self": 3.8122998570593154,
"children": {
"TrainerController._reset_env": {
"total": 8.59708307699998,
"count": 1,
"self": 8.59708307699998
},
"TrainerController.advance": {
"total": 2169.4945905999416,
"count": 233620,
"self": 4.018375984927388,
"children": {
"env_step": {
"total": 1697.6602967469394,
"count": 233620,
"self": 1425.6270241381117,
"children": {
"SubprocessEnvManager._take_step": {
"total": 269.46338084890135,
"count": 233620,
"self": 14.105865429860614,
"children": {
"TorchPolicy.evaluate": {
"total": 255.35751541904074,
"count": 222864,
"self": 63.57145721001632,
"children": {
"TorchPolicy.sample_actions": {
"total": 191.78605820902442,
"count": 222864,
"self": 191.78605820902442
}
}
}
}
},
"workers": {
"total": 2.5698917599264632,
"count": 233620,
"self": 0.0,
"children": {
"worker_root": {
"total": 2174.4309775049264,
"count": 233620,
"is_parallel": true,
"self": 1000.6076685800663,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00200359899997693,
"count": 1,
"is_parallel": true,
"self": 0.0002856590000419601,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017179399999349698,
"count": 2,
"is_parallel": true,
"self": 0.0017179399999349698
}
}
},
"UnityEnvironment.step": {
"total": 0.025939018000030956,
"count": 1,
"is_parallel": true,
"self": 0.00025883600005727203,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017463699998643278,
"count": 1,
"is_parallel": true,
"self": 0.00017463699998643278
},
"communicator.exchange": {
"total": 0.024778714000035507,
"count": 1,
"is_parallel": true,
"self": 0.024778714000035507
},
"steps_from_proto": {
"total": 0.0007268309999517442,
"count": 1,
"is_parallel": true,
"self": 0.0002521489999480764,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004746820000036678,
"count": 2,
"is_parallel": true,
"self": 0.0004746820000036678
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1173.8233089248602,
"count": 233619,
"is_parallel": true,
"self": 34.298042845604414,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.36786109804734,
"count": 233619,
"is_parallel": true,
"self": 75.36786109804734
},
"communicator.exchange": {
"total": 972.7715103671133,
"count": 233619,
"is_parallel": true,
"self": 972.7715103671133
},
"steps_from_proto": {
"total": 91.38589461409515,
"count": 233619,
"is_parallel": true,
"self": 37.442525779102084,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.943368834993066,
"count": 467238,
"is_parallel": true,
"self": 53.943368834993066
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 467.81591786807496,
"count": 233620,
"self": 5.483384547163155,
"children": {
"process_trajectory": {
"total": 147.95892319291215,
"count": 233620,
"self": 146.82291561691295,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1360075759992014,
"count": 10,
"self": 1.1360075759992014
}
}
},
"_update_policy": {
"total": 314.37361012799965,
"count": 97,
"self": 262.16113977899613,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.212470349003524,
"count": 2910,
"self": 52.212470349003524
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5289997463696636e-06,
"count": 1,
"self": 1.5289997463696636e-06
},
"TrainerController._save_models": {
"total": 0.11749636499962435,
"count": 1,
"self": 0.00212506099978782,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11537130399983653,
"count": 1,
"self": 0.11537130399983653
}
}
}
}
}
}
}