ppo-Huggy / run_logs /timers.json
4mosot's picture
Huggy
7e47e87
raw history blame
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4047693014144897,
"min": 1.4047693014144897,
"max": 1.4275935888290405,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71567.375,
"min": 67916.4921875,
"max": 77533.1328125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 119.54567307692308,
"min": 116.31924882629107,
"max": 386.31538461538463,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49731.0,
"min": 48994.0,
"max": 50257.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999453.0,
"min": 49737.0,
"max": 1999453.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999453.0,
"min": 49737.0,
"max": 1999453.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.2637887001037598,
"min": -0.05243362486362457,
"max": 2.3066089153289795,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 941.736083984375,
"min": -6.763937473297119,
"max": 960.45654296875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4641706940646353,
"min": 1.8419678597025169,
"max": 3.806532596698915,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1441.0950087308884,
"min": 237.61385390162468,
"max": 1479.2699458003044,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4641706940646353,
"min": 1.8419678597025169,
"max": 3.806532596698915,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1441.0950087308884,
"min": 237.61385390162468,
"max": 1479.2699458003044,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01646967366298971,
"min": 0.012516797275202407,
"max": 0.02073884292525084,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03293934732597942,
"min": 0.029239564954089775,
"max": 0.05595560799896097,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04702942830820879,
"min": 0.02406559158116579,
"max": 0.05687761685500542,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09405885661641758,
"min": 0.04813118316233158,
"max": 0.16598962085942426,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.057823647425e-06,
"min": 4.057823647425e-06,
"max": 0.00029536327654557494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.11564729485e-06,
"min": 8.11564729485e-06,
"max": 0.00084401416866195,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101352575,
"min": 0.101352575,
"max": 0.198454425,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20270515,
"min": 0.20270515,
"max": 0.5813380499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.749349250000001e-05,
"min": 7.749349250000001e-05,
"max": 0.004922875807500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00015498698500000002,
"min": 0.00015498698500000002,
"max": 0.014068768694999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670342495",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670344706"
},
"total": 2210.7581426889997,
"count": 1,
"self": 0.3793534449996514,
"children": {
"run_training.setup": {
"total": 0.10854138300010163,
"count": 1,
"self": 0.10854138300010163
},
"TrainerController.start_learning": {
"total": 2210.270247861,
"count": 1,
"self": 4.117480521072139,
"children": {
"TrainerController._reset_env": {
"total": 10.862433818,
"count": 1,
"self": 10.862433818
},
"TrainerController.advance": {
"total": 2195.1734962689275,
"count": 229940,
"self": 4.469660247002594,
"children": {
"env_step": {
"total": 1735.3869632929632,
"count": 229940,
"self": 1442.6798686230154,
"children": {
"SubprocessEnvManager._take_step": {
"total": 290.1062631939716,
"count": 229940,
"self": 14.905252486017162,
"children": {
"TorchPolicy.evaluate": {
"total": 275.2010107079544,
"count": 223000,
"self": 69.36493642585344,
"children": {
"TorchPolicy.sample_actions": {
"total": 205.83607428210098,
"count": 223000,
"self": 205.83607428210098
}
}
}
}
},
"workers": {
"total": 2.6008314759762925,
"count": 229940,
"self": 0.0,
"children": {
"worker_root": {
"total": 2201.8158332541093,
"count": 229940,
"is_parallel": true,
"self": 1023.4803059870042,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00244081800008189,
"count": 1,
"is_parallel": true,
"self": 0.00036173200010125584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020790859999806344,
"count": 2,
"is_parallel": true,
"self": 0.0020790859999806344
}
}
},
"UnityEnvironment.step": {
"total": 0.027258477000032144,
"count": 1,
"is_parallel": true,
"self": 0.00026010200008386164,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002366740000070422,
"count": 1,
"is_parallel": true,
"self": 0.0002366740000070422
},
"communicator.exchange": {
"total": 0.026059962999966046,
"count": 1,
"is_parallel": true,
"self": 0.026059962999966046
},
"steps_from_proto": {
"total": 0.0007017379999751938,
"count": 1,
"is_parallel": true,
"self": 0.00024138199989920395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046035600007598987,
"count": 2,
"is_parallel": true,
"self": 0.00046035600007598987
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1178.3355272671051,
"count": 229939,
"is_parallel": true,
"self": 34.39199336200522,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.80319368202413,
"count": 229939,
"is_parallel": true,
"self": 76.80319368202413
},
"communicator.exchange": {
"total": 972.0178783831626,
"count": 229939,
"is_parallel": true,
"self": 972.0178783831626
},
"steps_from_proto": {
"total": 95.12246183991306,
"count": 229939,
"is_parallel": true,
"self": 40.95921708697074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.16324475294232,
"count": 459878,
"is_parallel": true,
"self": 54.16324475294232
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 455.3168727289617,
"count": 229940,
"self": 6.7503473150267155,
"children": {
"process_trajectory": {
"total": 142.37208843993733,
"count": 229940,
"self": 141.8700219089377,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5020665309996275,
"count": 4,
"self": 0.5020665309996275
}
}
},
"_update_policy": {
"total": 306.19443697399765,
"count": 96,
"self": 252.22598487800224,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.96845209599542,
"count": 2880,
"self": 53.96845209599542
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.151000105892308e-06,
"count": 1,
"self": 1.151000105892308e-06
},
"TrainerController._save_models": {
"total": 0.11683610199997929,
"count": 1,
"self": 0.0019036600001527404,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11493244199982655,
"count": 1,
"self": 0.11493244199982655
}
}
}
}
}
}
}