ppo-Huggy / run_logs /timers.json
emre06c's picture
Huggy
2daf9a7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.155360460281372,
"min": 1.155360460281372,
"max": 1.4170268774032593,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 57737.984375,
"min": 56760.9375,
"max": 76102.84375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.7421052631579,
"min": 85.18072289156626,
"max": 416.85833333333335,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49443.0,
"min": 48898.0,
"max": 50237.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999975.0,
"min": 49631.0,
"max": 1999975.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999975.0,
"min": 49631.0,
"max": 1999975.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.419564962387085,
"min": 0.1287817358970642,
"max": 2.419564962387085,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1379.1519775390625,
"min": 15.325026512145996,
"max": 1394.5548095703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.77542090551895,
"min": 2.0270018244490906,
"max": 3.9417839295450476,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2151.9899161458015,
"min": 241.21321710944176,
"max": 2240.0541476607323,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.77542090551895,
"min": 2.0270018244490906,
"max": 3.9417839295450476,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2151.9899161458015,
"min": 241.21321710944176,
"max": 2240.0541476607323,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.06742933498710606,
"min": 0.06638521359917955,
"max": 0.07542959116440215,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.13485866997421211,
"min": 0.1327704271983591,
"max": 0.2157607432206502,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04218596139883933,
"min": 0.013628062972566113,
"max": 0.04218596139883933,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08437192279767866,
"min": 0.027256125945132225,
"max": 0.11252502404580203,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.593623468824987e-06,
"min": 4.593623468824987e-06,
"max": 0.00029533845155385,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.187246937649973e-06,
"min": 9.187246937649973e-06,
"max": 0.0008440638186454,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101531175,
"min": 0.101531175,
"max": 0.19844615,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20306235,
"min": 0.20306235,
"max": 0.5813546,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.64056324999998e-05,
"min": 8.64056324999998e-05,
"max": 0.004922462884999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001728112649999996,
"min": 0.0001728112649999996,
"max": 0.014069594539999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677338458",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677341068"
},
"total": 2610.211858171,
"count": 1,
"self": 0.3970047599996178,
"children": {
"run_training.setup": {
"total": 0.11018421099993247,
"count": 1,
"self": 0.11018421099993247
},
"TrainerController.start_learning": {
"total": 2609.7046692000004,
"count": 1,
"self": 3.9764165088804475,
"children": {
"TrainerController._reset_env": {
"total": 10.470653653,
"count": 1,
"self": 10.470653653
},
"TrainerController.advance": {
"total": 2595.14965524712,
"count": 231383,
"self": 4.490255687065655,
"children": {
"env_step": {
"total": 1772.7544551780393,
"count": 231383,
"self": 1486.0676383619773,
"children": {
"SubprocessEnvManager._take_step": {
"total": 284.08601014701753,
"count": 231383,
"self": 15.444536068925345,
"children": {
"TorchPolicy.evaluate": {
"total": 268.6414740780922,
"count": 222907,
"self": 67.29148799511154,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.34998608298065,
"count": 222907,
"self": 201.34998608298065
}
}
}
}
},
"workers": {
"total": 2.6008066690444593,
"count": 231383,
"self": 0.0,
"children": {
"worker_root": {
"total": 2601.4805156938746,
"count": 231383,
"is_parallel": true,
"self": 1396.5920414289194,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008852260000367096,
"count": 1,
"is_parallel": true,
"self": 0.00030718500011062133,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005780409999260883,
"count": 2,
"is_parallel": true,
"self": 0.0005780409999260883
}
}
},
"UnityEnvironment.step": {
"total": 0.029832400000032067,
"count": 1,
"is_parallel": true,
"self": 0.00031281100041269383,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003714979998221679,
"count": 1,
"is_parallel": true,
"self": 0.0003714979998221679
},
"communicator.exchange": {
"total": 0.02848267199988186,
"count": 1,
"is_parallel": true,
"self": 0.02848267199988186
},
"steps_from_proto": {
"total": 0.0006654189999153459,
"count": 1,
"is_parallel": true,
"self": 0.00021971299975120928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044570600016413664,
"count": 2,
"is_parallel": true,
"self": 0.00044570600016413664
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1204.8884742649552,
"count": 231382,
"is_parallel": true,
"self": 37.57263266155792,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.01672643718689,
"count": 231382,
"is_parallel": true,
"self": 78.01672643718689
},
"communicator.exchange": {
"total": 999.9373294560505,
"count": 231382,
"is_parallel": true,
"self": 999.9373294560505
},
"steps_from_proto": {
"total": 89.3617857101599,
"count": 231382,
"is_parallel": true,
"self": 36.289736526970955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.07204918318894,
"count": 462764,
"is_parallel": true,
"self": 53.07204918318894
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 817.9049443820147,
"count": 231383,
"self": 6.412810027999058,
"children": {
"process_trajectory": {
"total": 158.56997422301652,
"count": 231383,
"self": 157.41246759901696,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1575066239995522,
"count": 10,
"self": 1.1575066239995522
}
}
},
"_update_policy": {
"total": 652.9221601309991,
"count": 96,
"self": 317.9563261770247,
"children": {
"TorchPPOOptimizer.update": {
"total": 334.9658339539744,
"count": 46287,
"self": 334.9658339539744
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.189999789465219e-07,
"count": 1,
"self": 8.189999789465219e-07
},
"TrainerController._save_models": {
"total": 0.10794297199981884,
"count": 1,
"self": 0.0021557760001087445,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10578719599971009,
"count": 1,
"self": 0.10578719599971009
}
}
}
}
}
}
}