ppo-Huggy / run_logs /timers.json
newbie4000's picture
Huggy
b612e14
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4032223224639893,
"min": 1.4032223224639893,
"max": 1.425674319267273,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72184.5625,
"min": 68191.484375,
"max": 75779.34375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.94045174537987,
"min": 81.79635761589404,
"max": 361.8848920863309,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49158.0,
"min": 48856.0,
"max": 50302.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999914.0,
"min": 49825.0,
"max": 1999914.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999914.0,
"min": 49825.0,
"max": 1999914.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4273414611816406,
"min": 0.1285102218389511,
"max": 2.4646685123443604,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1182.115234375,
"min": 17.734411239624023,
"max": 1423.963623046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6635572079270773,
"min": 1.724457284246666,
"max": 3.9999477056441486,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1784.1523602604866,
"min": 237.9751052260399,
"max": 2259.7069824934006,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6635572079270773,
"min": 1.724457284246666,
"max": 3.9999477056441486,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1784.1523602604866,
"min": 237.9751052260399,
"max": 2259.7069824934006,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017923911904088326,
"min": 0.01188928446887682,
"max": 0.02055552155595958,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05377173571226498,
"min": 0.028380434294134223,
"max": 0.061666564667878745,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.049121068666378664,
"min": 0.02245912779536512,
"max": 0.06593285072594882,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.147363205999136,
"min": 0.04497511064012845,
"max": 0.17913664802908896,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.173298942266666e-06,
"min": 3.173298942266666e-06,
"max": 0.00029531190156269996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.519896826799997e-06,
"min": 9.519896826799997e-06,
"max": 0.00084393916868695,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105773333333336,
"min": 0.10105773333333336,
"max": 0.19843730000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031732000000001,
"min": 0.20725185000000002,
"max": 0.5813130499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.278089333333332e-05,
"min": 6.278089333333332e-05,
"max": 0.004922021269999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018834267999999997,
"min": 0.00018834267999999997,
"max": 0.014067521194999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672234391",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672236575"
},
"total": 2184.231280342,
"count": 1,
"self": 0.3965977140001087,
"children": {
"run_training.setup": {
"total": 0.10467064900001333,
"count": 1,
"self": 0.10467064900001333
},
"TrainerController.start_learning": {
"total": 2183.7300119789998,
"count": 1,
"self": 3.751654897905155,
"children": {
"TrainerController._reset_env": {
"total": 7.112922579999918,
"count": 1,
"self": 7.112922579999918
},
"TrainerController.advance": {
"total": 2172.7276208380945,
"count": 232640,
"self": 3.9819023482464218,
"children": {
"env_step": {
"total": 1706.753160611934,
"count": 232640,
"self": 1438.447018445886,
"children": {
"SubprocessEnvManager._take_step": {
"total": 265.80721158901576,
"count": 232640,
"self": 13.405042567964415,
"children": {
"TorchPolicy.evaluate": {
"total": 252.40216902105135,
"count": 223108,
"self": 62.742599704990084,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.65956931606127,
"count": 223108,
"self": 189.65956931606127
}
}
}
}
},
"workers": {
"total": 2.4989305770320698,
"count": 232640,
"self": 0.0,
"children": {
"worker_root": {
"total": 2176.1729620919614,
"count": 232640,
"is_parallel": true,
"self": 987.8883412050009,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019916290000310255,
"count": 1,
"is_parallel": true,
"self": 0.0002810319999753119,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017105970000557136,
"count": 2,
"is_parallel": true,
"self": 0.0017105970000557136
}
}
},
"UnityEnvironment.step": {
"total": 0.0267660369999021,
"count": 1,
"is_parallel": true,
"self": 0.00025159599977087055,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017528600005789485,
"count": 1,
"is_parallel": true,
"self": 0.00017528600005789485
},
"communicator.exchange": {
"total": 0.025657320000050277,
"count": 1,
"is_parallel": true,
"self": 0.025657320000050277
},
"steps_from_proto": {
"total": 0.0006818350000230566,
"count": 1,
"is_parallel": true,
"self": 0.000234011999964423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004478230000586336,
"count": 2,
"is_parallel": true,
"self": 0.0004478230000586336
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1188.2846208869605,
"count": 232639,
"is_parallel": true,
"self": 33.84821474191722,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.8077645330485,
"count": 232639,
"is_parallel": true,
"self": 74.8077645330485
},
"communicator.exchange": {
"total": 988.3267166149559,
"count": 232639,
"is_parallel": true,
"self": 988.3267166149559
},
"steps_from_proto": {
"total": 91.30192499703901,
"count": 232639,
"is_parallel": true,
"self": 37.33560650813911,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.9663184888999,
"count": 465278,
"is_parallel": true,
"self": 53.9663184888999
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 461.992557877914,
"count": 232640,
"self": 6.03314928004022,
"children": {
"process_trajectory": {
"total": 144.81794414087278,
"count": 232640,
"self": 143.65159447487258,
"children": {
"RLTrainer._checkpoint": {
"total": 1.166349666000201,
"count": 10,
"self": 1.166349666000201
}
}
},
"_update_policy": {
"total": 311.141464457001,
"count": 97,
"self": 258.5903908590079,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.55107359799308,
"count": 2910,
"self": 52.55107359799308
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.290001798945013e-07,
"count": 1,
"self": 9.290001798945013e-07
},
"TrainerController._save_models": {
"total": 0.13781273399990823,
"count": 1,
"self": 0.0019437390001257882,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13586899499978244,
"count": 1,
"self": 0.13586899499978244
}
}
}
}
}
}
}