ppo-Huggy / run_logs /timers.json
SwampMan's picture
Huggy
5d3be4d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4087187051773071,
"min": 1.4087185859680176,
"max": 1.431748390197754,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71913.6796875,
"min": 67325.7265625,
"max": 77690.9921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 135.57837837837837,
"min": 110.99552572706935,
"max": 416.6115702479339,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50164.0,
"min": 48944.0,
"max": 50410.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999898.0,
"min": 49953.0,
"max": 1999898.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999898.0,
"min": 49953.0,
"max": 1999898.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.1986868381500244,
"min": 0.1227463036775589,
"max": 2.3679516315460205,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 813.51416015625,
"min": 14.7295560836792,
"max": 1037.162841796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4052902365053024,
"min": 1.808807759732008,
"max": 3.643281173619462,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1259.9573875069618,
"min": 217.05693116784096,
"max": 1545.8550307750702,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4052902365053024,
"min": 1.808807759732008,
"max": 3.643281173619462,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1259.9573875069618,
"min": 217.05693116784096,
"max": 1545.8550307750702,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018873930351886278,
"min": 0.01278413924058744,
"max": 0.020758595462151183,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.037747860703772555,
"min": 0.02556827848117488,
"max": 0.05410972224878302,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.046421054067711034,
"min": 0.02001634589396417,
"max": 0.06832639990995328,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09284210813542207,
"min": 0.04003269178792834,
"max": 0.19298415717979273,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.241273586274997e-06,
"min": 4.241273586274997e-06,
"max": 0.000295320076559975,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.482547172549994e-06,
"min": 8.482547172549994e-06,
"max": 0.00084416206861265,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10141372500000001,
"min": 0.10141372500000001,
"max": 0.19844002500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20282745000000002,
"min": 0.20282745000000002,
"max": 0.5813873500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.054487749999996e-05,
"min": 8.054487749999996e-05,
"max": 0.0049221572475,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001610897549999999,
"min": 0.0001610897549999999,
"max": 0.014071228765,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687704032",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687706339"
},
"total": 2307.539091503,
"count": 1,
"self": 0.38623385499977303,
"children": {
"run_training.setup": {
"total": 0.04549411900006817,
"count": 1,
"self": 0.04549411900006817
},
"TrainerController.start_learning": {
"total": 2307.107363529,
"count": 1,
"self": 4.004986817025838,
"children": {
"TrainerController._reset_env": {
"total": 4.143943676999925,
"count": 1,
"self": 4.143943676999925
},
"TrainerController.advance": {
"total": 2298.8369550709745,
"count": 230132,
"self": 4.161105163002958,
"children": {
"env_step": {
"total": 1804.242106797995,
"count": 230132,
"self": 1520.3585297482025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 281.2224626769256,
"count": 230132,
"self": 16.27790403991196,
"children": {
"TorchPolicy.evaluate": {
"total": 264.94455863701364,
"count": 223039,
"self": 264.94455863701364
}
}
},
"workers": {
"total": 2.661114372866791,
"count": 230132,
"self": 0.0,
"children": {
"worker_root": {
"total": 2299.6964831730215,
"count": 230132,
"is_parallel": true,
"self": 1051.863102336054,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000915538000072047,
"count": 1,
"is_parallel": true,
"self": 0.00026198099999419355,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006535570000778534,
"count": 2,
"is_parallel": true,
"self": 0.0006535570000778534
}
}
},
"UnityEnvironment.step": {
"total": 0.028539105999925596,
"count": 1,
"is_parallel": true,
"self": 0.0003801980000162075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002453729999842835,
"count": 1,
"is_parallel": true,
"self": 0.0002453729999842835
},
"communicator.exchange": {
"total": 0.027184189999957198,
"count": 1,
"is_parallel": true,
"self": 0.027184189999957198
},
"steps_from_proto": {
"total": 0.0007293449999679069,
"count": 1,
"is_parallel": true,
"self": 0.00022924099994270364,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005001040000252033,
"count": 2,
"is_parallel": true,
"self": 0.0005001040000252033
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1247.8333808369675,
"count": 230131,
"is_parallel": true,
"self": 38.901876567822,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.90263047706924,
"count": 230131,
"is_parallel": true,
"self": 77.90263047706924
},
"communicator.exchange": {
"total": 1038.350091220012,
"count": 230131,
"is_parallel": true,
"self": 1038.350091220012
},
"steps_from_proto": {
"total": 92.6787825720645,
"count": 230131,
"is_parallel": true,
"self": 33.148309746210884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.53047282585362,
"count": 460262,
"is_parallel": true,
"self": 59.53047282585362
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 490.4337431099766,
"count": 230132,
"self": 6.578197936037213,
"children": {
"process_trajectory": {
"total": 119.28421941494071,
"count": 230132,
"self": 117.97167038094108,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3125490339996304,
"count": 10,
"self": 1.3125490339996304
}
}
},
"_update_policy": {
"total": 364.5713257589987,
"count": 96,
"self": 306.13342187600847,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.43790388299021,
"count": 2880,
"self": 58.43790388299021
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.729998626222368e-07,
"count": 1,
"self": 8.729998626222368e-07
},
"TrainerController._save_models": {
"total": 0.12147709099963322,
"count": 1,
"self": 0.0018244579996462562,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11965263299998696,
"count": 1,
"self": 0.11965263299998696
}
}
}
}
}
}
}