Huggy-DRL / run_logs /timers.json
Bala-A87's picture
Train Huggy
9c8423e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4059500694274902,
"min": 1.4059500694274902,
"max": 1.4280493259429932,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71229.6484375,
"min": 68308.390625,
"max": 76338.2265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.3577817531306,
"min": 78.60987261146497,
"max": 390.2421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49392.0,
"min": 48988.0,
"max": 50208.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999910.0,
"min": 49849.0,
"max": 1999910.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999910.0,
"min": 49849.0,
"max": 1999910.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3775813579559326,
"min": 0.06551811099052429,
"max": 2.5020713806152344,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1329.0679931640625,
"min": 8.320799827575684,
"max": 1547.010498046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5326443074640945,
"min": 1.5855354227888303,
"max": 3.9781964300200343,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1974.748167872429,
"min": 201.36299869418144,
"max": 2422.5732063651085,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5326443074640945,
"min": 1.5855354227888303,
"max": 3.9781964300200343,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1974.748167872429,
"min": 201.36299869418144,
"max": 2422.5732063651085,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017488074651434243,
"min": 0.013700645364103063,
"max": 0.021943280301153813,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052464223954302724,
"min": 0.027401290728206126,
"max": 0.05688962720450945,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05667212170859178,
"min": 0.02343122838065028,
"max": 0.06405349324146907,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17001636512577534,
"min": 0.04686245676130056,
"max": 0.18048840512832004,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6940987686666756e-06,
"min": 3.6940987686666756e-06,
"max": 0.00029533950155349994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1082296306000027e-05,
"min": 1.1082296306000027e-05,
"max": 0.0008442507185831001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123133333333333,
"min": 0.10123133333333333,
"max": 0.19844649999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30369399999999996,
"min": 0.20759295,
"max": 0.5814169,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.144353333333348e-05,
"min": 7.144353333333348e-05,
"max": 0.004922480350000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021433060000000046,
"min": 0.00021433060000000046,
"max": 0.01407270331,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687009429",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687011961"
},
"total": 2531.839191976,
"count": 1,
"self": 0.4353196689999095,
"children": {
"run_training.setup": {
"total": 0.04145838100009769,
"count": 1,
"self": 0.04145838100009769
},
"TrainerController.start_learning": {
"total": 2531.362413926,
"count": 1,
"self": 4.5883275900273475,
"children": {
"TrainerController._reset_env": {
"total": 4.048572219999983,
"count": 1,
"self": 4.048572219999983
},
"TrainerController.advance": {
"total": 2522.6017757359728,
"count": 232496,
"self": 4.9625754911717195,
"children": {
"env_step": {
"total": 1982.943243689899,
"count": 232496,
"self": 1674.5339906808886,
"children": {
"SubprocessEnvManager._take_step": {
"total": 305.3997638239729,
"count": 232496,
"self": 17.770256708849388,
"children": {
"TorchPolicy.evaluate": {
"total": 287.6295071151235,
"count": 223064,
"self": 287.6295071151235
}
}
},
"workers": {
"total": 3.0094891850374097,
"count": 232496,
"self": 0.0,
"children": {
"worker_root": {
"total": 2523.172864013907,
"count": 232496,
"is_parallel": true,
"self": 1157.4885469948947,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011855120000063835,
"count": 1,
"is_parallel": true,
"self": 0.00039558999992550525,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007899220000808782,
"count": 2,
"is_parallel": true,
"self": 0.0007899220000808782
}
}
},
"UnityEnvironment.step": {
"total": 0.030262080000056812,
"count": 1,
"is_parallel": true,
"self": 0.0003419160001385535,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023196399990865757,
"count": 1,
"is_parallel": true,
"self": 0.00023196399990865757
},
"communicator.exchange": {
"total": 0.02897258600000896,
"count": 1,
"is_parallel": true,
"self": 0.02897258600000896
},
"steps_from_proto": {
"total": 0.0007156140000006417,
"count": 1,
"is_parallel": true,
"self": 0.00021708399992803606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004985300000726056,
"count": 2,
"is_parallel": true,
"self": 0.0004985300000726056
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1365.6843170190125,
"count": 232495,
"is_parallel": true,
"self": 40.26039725695637,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.38681407899878,
"count": 232495,
"is_parallel": true,
"self": 86.38681407899878
},
"communicator.exchange": {
"total": 1139.9383777060623,
"count": 232495,
"is_parallel": true,
"self": 1139.9383777060623
},
"steps_from_proto": {
"total": 99.09872797699495,
"count": 232495,
"is_parallel": true,
"self": 37.655761536924615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.442966440070336,
"count": 464990,
"is_parallel": true,
"self": 61.442966440070336
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 534.6959565549023,
"count": 232496,
"self": 6.944176132851453,
"children": {
"process_trajectory": {
"total": 144.20137930005217,
"count": 232496,
"self": 142.79475196805197,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4066273320001983,
"count": 10,
"self": 1.4066273320001983
}
}
},
"_update_policy": {
"total": 383.5504011219987,
"count": 97,
"self": 323.4039994229938,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.146401699004855,
"count": 2910,
"self": 60.146401699004855
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2129999049648177e-06,
"count": 1,
"self": 1.2129999049648177e-06
},
"TrainerController._save_models": {
"total": 0.12373716699994475,
"count": 1,
"self": 0.0020136479997745482,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1217235190001702,
"count": 1,
"self": 0.1217235190001702
}
}
}
}
}
}
}