ppo-Huggy / run_logs /timers.json
huam's picture
Huggy
4334db8
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4051389694213867,
"min": 1.4051389694213867,
"max": 1.4286291599273682,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70301.9140625,
"min": 68763.59375,
"max": 77292.96875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 110.8803611738149,
"min": 97.3493013972056,
"max": 405.491935483871,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49120.0,
"min": 48772.0,
"max": 50281.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999378.0,
"min": 49959.0,
"max": 1999378.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999378.0,
"min": 49959.0,
"max": 1999378.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.28530216217041,
"min": 0.1324712038040161,
"max": 2.4137399196624756,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1012.3888549804688,
"min": 16.29395866394043,
"max": 1209.28369140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4634831619047417,
"min": 1.7845650036645129,
"max": 3.8491808765267344,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1534.3230407238007,
"min": 219.5014954507351,
"max": 1903.0793465971947,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4634831619047417,
"min": 1.7845650036645129,
"max": 3.8491808765267344,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1534.3230407238007,
"min": 219.5014954507351,
"max": 1903.0793465971947,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016286229335916384,
"min": 0.015214169674679093,
"max": 0.020104073393546668,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03257245867183277,
"min": 0.030428339349358187,
"max": 0.06031222018064,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0486012543241183,
"min": 0.022029708977788685,
"max": 0.05704561819632848,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.0972025086482366,
"min": 0.04405941795557737,
"max": 0.17113685458898545,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.629623456825006e-06,
"min": 4.629623456825006e-06,
"max": 0.0002953488015504,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.259246913650012e-06,
"min": 9.259246913650012e-06,
"max": 0.0008440882686372499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101543175,
"min": 0.101543175,
"max": 0.1984496,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20308635,
"min": 0.20308635,
"max": 0.5813627499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.70044325000001e-05,
"min": 8.70044325000001e-05,
"max": 0.0049226350400000006,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001740088650000002,
"min": 0.0001740088650000002,
"max": 0.014070001225000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670648823",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670651116"
},
"total": 2293.0516266209997,
"count": 1,
"self": 0.3889807549994657,
"children": {
"run_training.setup": {
"total": 0.10880179600007978,
"count": 1,
"self": 0.10880179600007978
},
"TrainerController.start_learning": {
"total": 2292.55384407,
"count": 1,
"self": 3.9873416409832316,
"children": {
"TrainerController._reset_env": {
"total": 9.310540369000023,
"count": 1,
"self": 9.310540369000023
},
"TrainerController.advance": {
"total": 2279.1432771390173,
"count": 231292,
"self": 4.155797744875599,
"children": {
"env_step": {
"total": 1800.2412614300508,
"count": 231292,
"self": 1503.6171548578902,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.87231909419756,
"count": 231292,
"self": 15.091306761233682,
"children": {
"TorchPolicy.evaluate": {
"total": 278.7810123329639,
"count": 223028,
"self": 68.65754038808916,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.12347194487472,
"count": 223028,
"self": 210.12347194487472
}
}
}
}
},
"workers": {
"total": 2.7517874779630347,
"count": 231292,
"self": 0.0,
"children": {
"worker_root": {
"total": 2284.4017329990756,
"count": 231292,
"is_parallel": true,
"self": 1054.2923360761588,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026043780000009065,
"count": 1,
"is_parallel": true,
"self": 0.000364555999908589,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022398220000923175,
"count": 2,
"is_parallel": true,
"self": 0.0022398220000923175
}
}
},
"UnityEnvironment.step": {
"total": 0.028015335000077357,
"count": 1,
"is_parallel": true,
"self": 0.00026429900003677176,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001784749999842461,
"count": 1,
"is_parallel": true,
"self": 0.0001784749999842461
},
"communicator.exchange": {
"total": 0.02683810700000322,
"count": 1,
"is_parallel": true,
"self": 0.02683810700000322
},
"steps_from_proto": {
"total": 0.0007344540000531197,
"count": 1,
"is_parallel": true,
"self": 0.00025381000011748256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004806439999356371,
"count": 2,
"is_parallel": true,
"self": 0.0004806439999356371
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1230.1093969229169,
"count": 231291,
"is_parallel": true,
"self": 35.2707443699137,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.54731417089533,
"count": 231291,
"is_parallel": true,
"self": 80.54731417089533
},
"communicator.exchange": {
"total": 1018.9757199781072,
"count": 231291,
"is_parallel": true,
"self": 1018.9757199781072
},
"steps_from_proto": {
"total": 95.3156184040007,
"count": 231291,
"is_parallel": true,
"self": 41.429262358867504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.8863560451332,
"count": 462582,
"is_parallel": true,
"self": 53.8863560451332
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 474.7462179640909,
"count": 231292,
"self": 6.138163392115075,
"children": {
"process_trajectory": {
"total": 148.94583565597713,
"count": 231292,
"self": 148.46680559697757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4790300589995695,
"count": 4,
"self": 0.4790300589995695
}
}
},
"_update_policy": {
"total": 319.66221891599866,
"count": 96,
"self": 266.0433498870034,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.61886902899528,
"count": 2880,
"self": 53.61886902899528
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.149998732027598e-07,
"count": 1,
"self": 9.149998732027598e-07
},
"TrainerController._save_models": {
"total": 0.11268400599965389,
"count": 1,
"self": 0.002367270999457105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11031673500019679,
"count": 1,
"self": 0.11031673500019679
}
}
}
}
}
}
}