ppo-Huggy / run_logs /timers.json
Beegbrain's picture
Huggy
e7e9e7e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.403069257736206,
"min": 1.403069257736206,
"max": 1.4298046827316284,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71149.640625,
"min": 68138.8828125,
"max": 77713.421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.1794425087108,
"min": 84.94168096054888,
"max": 400.056,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49467.0,
"min": 48866.0,
"max": 50026.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999985.0,
"min": 49776.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999985.0,
"min": 49776.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4232468605041504,
"min": 0.0492953322827816,
"max": 2.4718427658081055,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1390.9437255859375,
"min": 6.112621307373047,
"max": 1398.687255859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.759560770585562,
"min": 1.7277066527355103,
"max": 3.9015451093726123,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2157.9878823161125,
"min": 214.23562493920326,
"max": 2200.4714416861534,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.759560770585562,
"min": 1.7277066527355103,
"max": 3.9015451093726123,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2157.9878823161125,
"min": 214.23562493920326,
"max": 2200.4714416861534,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015038461095213682,
"min": 0.013656080609265094,
"max": 0.019821339562380066,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04511538328564105,
"min": 0.02731216121853019,
"max": 0.05600776835926808,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0638710141595867,
"min": 0.02236103021229307,
"max": 0.0638710141595867,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19161304247876007,
"min": 0.04472206042458614,
"max": 0.19161304247876007,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.217648927483336e-06,
"min": 3.217648927483336e-06,
"max": 0.00029529150156949995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.652946782450008e-06,
"min": 9.652946782450008e-06,
"max": 0.0008440339686553499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10107251666666668,
"min": 0.10107251666666668,
"max": 0.19843050000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30321755000000006,
"min": 0.20727875,
"max": 0.58134465,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.35185816666667e-05,
"min": 6.35185816666667e-05,
"max": 0.004921681950000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019055574500000008,
"min": 0.00019055574500000008,
"max": 0.014069098035,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673192698",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673194846"
},
"total": 2147.935008712,
"count": 1,
"self": 0.3897218510001039,
"children": {
"run_training.setup": {
"total": 0.10718716899998526,
"count": 1,
"self": 0.10718716899998526
},
"TrainerController.start_learning": {
"total": 2147.438099692,
"count": 1,
"self": 3.6167685339823947,
"children": {
"TrainerController._reset_env": {
"total": 8.473504527000046,
"count": 1,
"self": 8.473504527000046
},
"TrainerController.advance": {
"total": 2135.2324167330175,
"count": 231987,
"self": 3.7832695569136376,
"children": {
"env_step": {
"total": 1678.3836468460381,
"count": 231987,
"self": 1413.2249370630261,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.721565008055,
"count": 231987,
"self": 13.55091418093457,
"children": {
"TorchPolicy.evaluate": {
"total": 249.17065082712043,
"count": 223008,
"self": 62.52308384908531,
"children": {
"TorchPolicy.sample_actions": {
"total": 186.64756697803512,
"count": 223008,
"self": 186.64756697803512
}
}
}
}
},
"workers": {
"total": 2.437144774957119,
"count": 231987,
"self": 0.0,
"children": {
"worker_root": {
"total": 2139.920258323063,
"count": 231987,
"is_parallel": true,
"self": 975.1254327660247,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002432790000057139,
"count": 1,
"is_parallel": true,
"self": 0.00034717500011538505,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002085614999941754,
"count": 2,
"is_parallel": true,
"self": 0.002085614999941754
}
}
},
"UnityEnvironment.step": {
"total": 0.027195098999982292,
"count": 1,
"is_parallel": true,
"self": 0.00028512100004718377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017259599997032637,
"count": 1,
"is_parallel": true,
"self": 0.00017259599997032637
},
"communicator.exchange": {
"total": 0.0260209880000275,
"count": 1,
"is_parallel": true,
"self": 0.0260209880000275
},
"steps_from_proto": {
"total": 0.0007163939999372815,
"count": 1,
"is_parallel": true,
"self": 0.0002354879999302284,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004809060000070531,
"count": 2,
"is_parallel": true,
"self": 0.0004809060000070531
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1164.7948255570384,
"count": 231986,
"is_parallel": true,
"self": 33.7580920360017,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.85369392308314,
"count": 231986,
"is_parallel": true,
"self": 74.85369392308314
},
"communicator.exchange": {
"total": 964.5595393909435,
"count": 231986,
"is_parallel": true,
"self": 964.5595393909435
},
"steps_from_proto": {
"total": 91.62350020701001,
"count": 231986,
"is_parallel": true,
"self": 37.570323990957604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.05317621605241,
"count": 463972,
"is_parallel": true,
"self": 54.05317621605241
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 453.06550033006556,
"count": 231987,
"self": 5.683320064021586,
"children": {
"process_trajectory": {
"total": 142.43971130604598,
"count": 231987,
"self": 141.27044113404713,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1692701719988463,
"count": 10,
"self": 1.1692701719988463
}
}
},
"_update_policy": {
"total": 304.942468959998,
"count": 97,
"self": 252.11383328100442,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.82863567899358,
"count": 2910,
"self": 52.82863567899358
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.990000424091704e-07,
"count": 1,
"self": 8.990000424091704e-07
},
"TrainerController._save_models": {
"total": 0.11540899900001023,
"count": 1,
"self": 0.002106365999679838,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11330263300033039,
"count": 1,
"self": 0.11330263300033039
}
}
}
}
}
}
}