ppo-Huggy / run_logs /timers.json
Dae314's picture
Initial commit
d6bef87
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4058769941329956,
"min": 1.4058769941329956,
"max": 1.4298095703125,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69263.34375,
"min": 67262.28125,
"max": 78167.2109375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.21348314606742,
"min": 71.92128279883381,
"max": 392.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49350.0,
"min": 49289.0,
"max": 50272.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999925.0,
"min": 49834.0,
"max": 1999925.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999925.0,
"min": 49834.0,
"max": 1999925.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.472149133682251,
"min": 0.1476437747478485,
"max": 2.505276679992676,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1540.14892578125,
"min": 18.75075912475586,
"max": 1659.164306640625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.868077176244837,
"min": 1.60128559559349,
"max": 4.093506238093743,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2409.8120808005333,
"min": 203.36327064037323,
"max": 2654.1382831931114,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.868077176244837,
"min": 1.60128559559349,
"max": 4.093506238093743,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2409.8120808005333,
"min": 203.36327064037323,
"max": 2654.1382831931114,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016985711106745943,
"min": 0.013349329444948429,
"max": 0.020423385698813946,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050957133320237824,
"min": 0.026698658889896858,
"max": 0.05732313036375368,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05508493093980684,
"min": 0.022367440878103176,
"max": 0.05614630940059821,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16525479281942052,
"min": 0.04473488175620635,
"max": 0.16606089944640795,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.985098671666661e-06,
"min": 3.985098671666661e-06,
"max": 0.00029527102657632496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1955296014999982e-05,
"min": 1.1955296014999982e-05,
"max": 0.0008439058686980498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10132833333333334,
"min": 0.10132833333333334,
"max": 0.19842367500000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303985,
"min": 0.20778870000000002,
"max": 0.5813019500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.628383333333325e-05,
"min": 7.628383333333325e-05,
"max": 0.0049213413825,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022885149999999976,
"min": 0.00022885149999999976,
"max": 0.014066967304999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683076631",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683078973"
},
"total": 2342.072722713,
"count": 1,
"self": 0.43937914899970565,
"children": {
"run_training.setup": {
"total": 0.047876203000043915,
"count": 1,
"self": 0.047876203000043915
},
"TrainerController.start_learning": {
"total": 2341.585467361,
"count": 1,
"self": 4.145171033927909,
"children": {
"TrainerController._reset_env": {
"total": 3.9490146239999717,
"count": 1,
"self": 3.9490146239999717
},
"TrainerController.advance": {
"total": 2333.367944551072,
"count": 233185,
"self": 4.285349784103801,
"children": {
"env_step": {
"total": 1812.4778549560317,
"count": 233185,
"self": 1534.8689063409442,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.9252716619818,
"count": 233185,
"self": 15.981754577019728,
"children": {
"TorchPolicy.evaluate": {
"total": 258.9435170849621,
"count": 222924,
"self": 258.9435170849621
}
}
},
"workers": {
"total": 2.683676953105646,
"count": 233185,
"self": 0.0,
"children": {
"worker_root": {
"total": 2333.7815478580796,
"count": 233185,
"is_parallel": true,
"self": 1079.5616420881074,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006837239999981648,
"count": 1,
"is_parallel": true,
"self": 0.00020394099999521131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004797830000029535,
"count": 2,
"is_parallel": true,
"self": 0.0004797830000029535
}
}
},
"UnityEnvironment.step": {
"total": 0.03174968999996963,
"count": 1,
"is_parallel": true,
"self": 0.00032284400003845803,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020293299996865244,
"count": 1,
"is_parallel": true,
"self": 0.00020293299996865244
},
"communicator.exchange": {
"total": 0.030531621999955405,
"count": 1,
"is_parallel": true,
"self": 0.030531621999955405
},
"steps_from_proto": {
"total": 0.000692291000007117,
"count": 1,
"is_parallel": true,
"self": 0.00018960900001729897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000502681999989818,
"count": 2,
"is_parallel": true,
"self": 0.000502681999989818
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1254.2199057699722,
"count": 233184,
"is_parallel": true,
"self": 38.16263792380869,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.6568932490369,
"count": 233184,
"is_parallel": true,
"self": 76.6568932490369
},
"communicator.exchange": {
"total": 1048.1253516139686,
"count": 233184,
"is_parallel": true,
"self": 1048.1253516139686
},
"steps_from_proto": {
"total": 91.27502298315812,
"count": 233184,
"is_parallel": true,
"self": 33.3118852632457,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.963137719912424,
"count": 466368,
"is_parallel": true,
"self": 57.963137719912424
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 516.6047398109365,
"count": 233185,
"self": 6.466530915733415,
"children": {
"process_trajectory": {
"total": 133.35455824020391,
"count": 233185,
"self": 131.97532288920365,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3792353510002613,
"count": 10,
"self": 1.3792353510002613
}
}
},
"_update_policy": {
"total": 376.7836506549992,
"count": 97,
"self": 318.3494596829995,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.43419097199967,
"count": 2910,
"self": 58.43419097199967
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.54999904934084e-07,
"count": 1,
"self": 9.54999904934084e-07
},
"TrainerController._save_models": {
"total": 0.12333619700029885,
"count": 1,
"self": 0.0021431880004456616,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12119300899985319,
"count": 1,
"self": 0.12119300899985319
}
}
}
}
}
}
}