ppo-Huggy / run_logs /timers.json
Eslam25's picture
Huggy
0282d39
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4070372581481934,
"min": 1.4070372581481934,
"max": 1.426780104637146,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 73600.7109375,
"min": 65940.171875,
"max": 76446.8125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 121.00731707317073,
"min": 90.51282051282051,
"max": 396.3307086614173,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49613.0,
"min": 48967.0,
"max": 50334.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999987.0,
"min": 49711.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999987.0,
"min": 49711.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.244126081466675,
"min": 0.16440355777740479,
"max": 2.441737174987793,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 920.0916748046875,
"min": 20.714847564697266,
"max": 1294.0159912109375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.3038453363790743,
"min": 1.7864886385107797,
"max": 3.970253024250269,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1354.5765879154205,
"min": 225.09756845235825,
"max": 2044.1585366725922,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.3038453363790743,
"min": 1.7864886385107797,
"max": 3.970253024250269,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1354.5765879154205,
"min": 225.09756845235825,
"max": 2044.1585366725922,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019770919356879958,
"min": 0.014015318685980875,
"max": 0.020032326967662408,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.059312758070639876,
"min": 0.028342480595407928,
"max": 0.06009698090298723,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04348651880605353,
"min": 0.021646662149578333,
"max": 0.055100522976782584,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1304595564181606,
"min": 0.043293324299156666,
"max": 0.16530156893034775,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3549488817166644e-06,
"min": 3.3549488817166644e-06,
"max": 0.00029536815154395,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0064846645149993e-05,
"min": 1.0064846645149993e-05,
"max": 0.0008442379685873499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111828333333335,
"min": 0.10111828333333335,
"max": 0.19845605,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30335485000000006,
"min": 0.20742574999999994,
"max": 0.5814126500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.580233833333332e-05,
"min": 6.580233833333332e-05,
"max": 0.004922956895000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019740701499999997,
"min": 0.00019740701499999997,
"max": 0.014072491234999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672232141",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672234542"
},
"total": 2401.209687638,
"count": 1,
"self": 0.44201105399997687,
"children": {
"run_training.setup": {
"total": 0.11021809700002905,
"count": 1,
"self": 0.11021809700002905
},
"TrainerController.start_learning": {
"total": 2400.657458487,
"count": 1,
"self": 5.144796594142008,
"children": {
"TrainerController._reset_env": {
"total": 6.354320927999993,
"count": 1,
"self": 6.354320927999993
},
"TrainerController.advance": {
"total": 2389.025791618858,
"count": 231476,
"self": 4.7172547506024785,
"children": {
"env_step": {
"total": 1902.3635448030918,
"count": 231476,
"self": 1596.7013583579792,
"children": {
"SubprocessEnvManager._take_step": {
"total": 302.6286335100242,
"count": 231476,
"self": 16.308641288049103,
"children": {
"TorchPolicy.evaluate": {
"total": 286.3199922219751,
"count": 223130,
"self": 71.67136191698091,
"children": {
"TorchPolicy.sample_actions": {
"total": 214.64863030499419,
"count": 223130,
"self": 214.64863030499419
}
}
}
}
},
"workers": {
"total": 3.033552935088437,
"count": 231476,
"self": 0.0,
"children": {
"worker_root": {
"total": 2391.7692368599833,
"count": 231476,
"is_parallel": true,
"self": 1084.5833515389227,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007913059999964389,
"count": 1,
"is_parallel": true,
"self": 0.00029233399993700004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004989720000594389,
"count": 2,
"is_parallel": true,
"self": 0.0004989720000594389
}
}
},
"UnityEnvironment.step": {
"total": 0.027719493999939004,
"count": 1,
"is_parallel": true,
"self": 0.00027144599994244345,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018798099995365192,
"count": 1,
"is_parallel": true,
"self": 0.00018798099995365192
},
"communicator.exchange": {
"total": 0.026322091999986696,
"count": 1,
"is_parallel": true,
"self": 0.026322091999986696
},
"steps_from_proto": {
"total": 0.0009379750000562126,
"count": 1,
"is_parallel": true,
"self": 0.00023492200011787645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007030529999383361,
"count": 2,
"is_parallel": true,
"self": 0.0007030529999383361
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1307.1858853210606,
"count": 231475,
"is_parallel": true,
"self": 36.417985630106614,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.01897625094466,
"count": 231475,
"is_parallel": true,
"self": 80.01897625094466
},
"communicator.exchange": {
"total": 1089.46922868102,
"count": 231475,
"is_parallel": true,
"self": 1089.46922868102
},
"steps_from_proto": {
"total": 101.27969475898942,
"count": 231475,
"is_parallel": true,
"self": 40.56129080914809,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.71840394984133,
"count": 462950,
"is_parallel": true,
"self": 60.71840394984133
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 481.94499206516355,
"count": 231476,
"self": 7.6248858611525065,
"children": {
"process_trajectory": {
"total": 151.07769789400936,
"count": 231476,
"self": 149.86452877900933,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2131691150000279,
"count": 10,
"self": 1.2131691150000279
}
}
},
"_update_policy": {
"total": 323.2424083100017,
"count": 97,
"self": 267.54165742000885,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.70075088999283,
"count": 2910,
"self": 55.70075088999283
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8119999367627315e-06,
"count": 1,
"self": 1.8119999367627315e-06
},
"TrainerController._save_models": {
"total": 0.13254753399996844,
"count": 1,
"self": 0.002754973999799404,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12979256000016903,
"count": 1,
"self": 0.12979256000016903
}
}
}
}
}
}
}