ppo-Huggy / run_logs /timers.json
eduiqe's picture
Huggy
dcc7f84
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.408327341079712,
"min": 1.408327341079712,
"max": 1.4273402690887451,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71154.328125,
"min": 69254.875,
"max": 76942.4609375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 97.13178294573643,
"min": 83.48801369863014,
"max": 381.39694656488547,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50120.0,
"min": 48757.0,
"max": 50148.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999903.0,
"min": 49819.0,
"max": 1999903.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999903.0,
"min": 49819.0,
"max": 1999903.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3655617237091064,
"min": 0.06930004805326462,
"max": 2.4402003288269043,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1220.6298828125,
"min": 9.00900650024414,
"max": 1405.2899169921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6467160029235735,
"min": 1.5885832346975803,
"max": 3.9155124207633136,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1881.705457508564,
"min": 206.51582051068544,
"max": 2216.10943120718,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6467160029235735,
"min": 1.5885832346975803,
"max": 3.9155124207633136,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1881.705457508564,
"min": 206.51582051068544,
"max": 2216.10943120718,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.021759106181303247,
"min": 0.014303531300780985,
"max": 0.021759106181303247,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.06527731854390974,
"min": 0.02888489263447506,
"max": 0.06527731854390974,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04874928332865238,
"min": 0.02250079748531183,
"max": 0.060094226855370726,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14624784998595713,
"min": 0.04500159497062366,
"max": 0.18028268056611219,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.569798810099997e-06,
"min": 3.569798810099997e-06,
"max": 0.00029536395154535,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0709396430299991e-05,
"min": 1.0709396430299991e-05,
"max": 0.0008441088186303999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118990000000001,
"min": 0.10118990000000001,
"max": 0.19845465,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30356970000000005,
"min": 0.20756240000000004,
"max": 0.5813696,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.937600999999998e-05,
"min": 6.937600999999998e-05,
"max": 0.004922887035,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020812802999999996,
"min": 0.00020812802999999996,
"max": 0.014070343040000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671859160",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671861527"
},
"total": 2367.487228419,
"count": 1,
"self": 0.4431527929996264,
"children": {
"run_training.setup": {
"total": 0.11522986600016338,
"count": 1,
"self": 0.11522986600016338
},
"TrainerController.start_learning": {
"total": 2366.92884576,
"count": 1,
"self": 4.12007627306366,
"children": {
"TrainerController._reset_env": {
"total": 8.061984804000076,
"count": 1,
"self": 8.061984804000076
},
"TrainerController.advance": {
"total": 2354.628329741937,
"count": 231925,
"self": 4.303294803885819,
"children": {
"env_step": {
"total": 1853.983088012982,
"count": 231925,
"self": 1562.979692139977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 288.3243955831563,
"count": 231925,
"self": 14.883805764176486,
"children": {
"TorchPolicy.evaluate": {
"total": 273.4405898189798,
"count": 222988,
"self": 68.17644977308328,
"children": {
"TorchPolicy.sample_actions": {
"total": 205.2641400458965,
"count": 222988,
"self": 205.2641400458965
}
}
}
}
},
"workers": {
"total": 2.679000289848773,
"count": 231925,
"self": 0.0,
"children": {
"worker_root": {
"total": 2358.9124475899616,
"count": 231925,
"is_parallel": true,
"self": 1066.0531339700044,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023334969998813904,
"count": 1,
"is_parallel": true,
"self": 0.00031515499995293794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020183419999284524,
"count": 2,
"is_parallel": true,
"self": 0.0020183419999284524
}
}
},
"UnityEnvironment.step": {
"total": 0.029470119000052364,
"count": 1,
"is_parallel": true,
"self": 0.00029025000003457535,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001980649999495654,
"count": 1,
"is_parallel": true,
"self": 0.0001980649999495654
},
"communicator.exchange": {
"total": 0.02824405499995919,
"count": 1,
"is_parallel": true,
"self": 0.02824405499995919
},
"steps_from_proto": {
"total": 0.0007377490001090337,
"count": 1,
"is_parallel": true,
"self": 0.00024688000007699884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004908690000320348,
"count": 2,
"is_parallel": true,
"self": 0.0004908690000320348
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1292.8593136199572,
"count": 231924,
"is_parallel": true,
"self": 36.783346785101,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.50723340877971,
"count": 231924,
"is_parallel": true,
"self": 81.50723340877971
},
"communicator.exchange": {
"total": 1076.4889785849175,
"count": 231924,
"is_parallel": true,
"self": 1076.4889785849175
},
"steps_from_proto": {
"total": 98.079754841159,
"count": 231924,
"is_parallel": true,
"self": 40.10983460321245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.96992023794655,
"count": 463848,
"is_parallel": true,
"self": 57.96992023794655
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 496.3419469250689,
"count": 231925,
"self": 6.354400257992438,
"children": {
"process_trajectory": {
"total": 154.46617570007515,
"count": 231925,
"self": 153.0976543560755,
"children": {
"RLTrainer._checkpoint": {
"total": 1.368521343999646,
"count": 10,
"self": 1.368521343999646
}
}
},
"_update_policy": {
"total": 335.5213709670013,
"count": 97,
"self": 281.8443740310224,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.67699693597888,
"count": 2910,
"self": 53.67699693597888
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.989995876618195e-07,
"count": 1,
"self": 8.989995876618195e-07
},
"TrainerController._save_models": {
"total": 0.11845404199993936,
"count": 1,
"self": 0.0020404599999892525,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11641358199995011,
"count": 1,
"self": 0.11641358199995011
}
}
}
}
}
}
}