ppo-Huggy / run_logs /timers.json
jmsalvi's picture
Huggy
58b1fe8
raw
history blame
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4115371704101562,
"min": 1.4115371704101562,
"max": 1.4304763078689575,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69306.4765625,
"min": 68988.7890625,
"max": 76478.390625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.45207956600362,
"min": 80.2183908045977,
"max": 391.8203125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49467.0,
"min": 48766.0,
"max": 50153.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999923.0,
"min": 49940.0,
"max": 1999923.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999923.0,
"min": 49940.0,
"max": 1999923.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3586559295654297,
"min": 0.131281778216362,
"max": 2.45984148979187,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1304.3367919921875,
"min": 16.672786712646484,
"max": 1469.0914306640625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.667770275047847,
"min": 2.0512009913057794,
"max": 3.920948320563803,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2028.2769621014595,
"min": 260.50252589583397,
"max": 2278.98466783762,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.667770275047847,
"min": 2.0512009913057794,
"max": 3.920948320563803,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2028.2769621014595,
"min": 260.50252589583397,
"max": 2278.98466783762,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016899755167024622,
"min": 0.013661170341947582,
"max": 0.020558865014027105,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05069926550107387,
"min": 0.027322340683895163,
"max": 0.06064619927953269,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05638637008766333,
"min": 0.021339635054270428,
"max": 0.07188540132095417,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16915911026299,
"min": 0.042679270108540855,
"max": 0.18488538240393004,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1050489650166726e-06,
"min": 3.1050489650166726e-06,
"max": 0.00029532510155830005,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.315146895050018e-06,
"min": 9.315146895050018e-06,
"max": 0.0008440908186364,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10103498333333334,
"min": 0.10103498333333334,
"max": 0.19844169999999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30310495000000004,
"min": 0.20728210000000002,
"max": 0.5813636000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.164566833333344e-05,
"min": 6.164566833333344e-05,
"max": 0.004922240830000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018493700500000034,
"min": 0.00018493700500000034,
"max": 0.014070043639999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670439861",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670442269"
},
"total": 2408.161839026,
"count": 1,
"self": 0.39296544199987693,
"children": {
"run_training.setup": {
"total": 0.11611243699996976,
"count": 1,
"self": 0.11611243699996976
},
"TrainerController.start_learning": {
"total": 2407.652761147,
"count": 1,
"self": 4.15853305706878,
"children": {
"TrainerController._reset_env": {
"total": 9.441568495999945,
"count": 1,
"self": 9.441568495999945
},
"TrainerController.advance": {
"total": 2393.9390391859315,
"count": 232203,
"self": 4.399848814990037,
"children": {
"env_step": {
"total": 1888.533511716008,
"count": 232203,
"self": 1582.0551956349454,
"children": {
"SubprocessEnvManager._take_step": {
"total": 303.60490915294736,
"count": 232203,
"self": 15.656520873063073,
"children": {
"TorchPolicy.evaluate": {
"total": 287.9483882798843,
"count": 222940,
"self": 71.67128815291017,
"children": {
"TorchPolicy.sample_actions": {
"total": 216.27710012697412,
"count": 222940,
"self": 216.27710012697412
}
}
}
}
},
"workers": {
"total": 2.8734069281151733,
"count": 232203,
"self": 0.0,
"children": {
"worker_root": {
"total": 2399.327145317024,
"count": 232203,
"is_parallel": true,
"self": 1102.724129625864,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018555219999143446,
"count": 1,
"is_parallel": true,
"self": 0.0003910009999117392,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014645210000026054,
"count": 2,
"is_parallel": true,
"self": 0.0014645210000026054
}
}
},
"UnityEnvironment.step": {
"total": 0.030903972999908547,
"count": 1,
"is_parallel": true,
"self": 0.00034596499995132035,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034969000000728556,
"count": 1,
"is_parallel": true,
"self": 0.00034969000000728556
},
"communicator.exchange": {
"total": 0.029338716999973258,
"count": 1,
"is_parallel": true,
"self": 0.029338716999973258
},
"steps_from_proto": {
"total": 0.0008696009999766829,
"count": 1,
"is_parallel": true,
"self": 0.0003064850000100705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005631159999666124,
"count": 2,
"is_parallel": true,
"self": 0.0005631159999666124
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1296.6030156911602,
"count": 232202,
"is_parallel": true,
"self": 36.29968849606348,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.79077629199173,
"count": 232202,
"is_parallel": true,
"self": 82.79077629199173
},
"communicator.exchange": {
"total": 1078.1693571060719,
"count": 232202,
"is_parallel": true,
"self": 1078.1693571060719
},
"steps_from_proto": {
"total": 99.343193797033,
"count": 232202,
"is_parallel": true,
"self": 42.85255034110082,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.49064345593217,
"count": 464404,
"is_parallel": true,
"self": 56.49064345593217
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 501.0056786549337,
"count": 232203,
"self": 6.754955714003927,
"children": {
"process_trajectory": {
"total": 165.26270763992864,
"count": 232203,
"self": 164.77217340592836,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49053423400027896,
"count": 4,
"self": 0.49053423400027896
}
}
},
"_update_policy": {
"total": 328.98801530100116,
"count": 97,
"self": 274.0570640029979,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.930951298003265,
"count": 2910,
"self": 54.930951298003265
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.069999578059651e-07,
"count": 1,
"self": 9.069999578059651e-07
},
"TrainerController._save_models": {
"total": 0.11361950099990281,
"count": 1,
"self": 0.0019937769998250587,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11162572400007775,
"count": 1,
"self": 0.11162572400007775
}
}
}
}
}
}
}