ppo-Huggy / run_logs /timers.json
srandazzo's picture
Huggy
c250636
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.410020351409912,
"min": 1.410020351409912,
"max": 1.4300894737243652,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70716.75,
"min": 68229.234375,
"max": 77179.03125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.65254237288136,
"min": 77.61635220125787,
"max": 401.128,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49355.0,
"min": 49309.0,
"max": 50141.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999974.0,
"min": 49771.0,
"max": 1999974.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999974.0,
"min": 49771.0,
"max": 1999974.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.455097198486328,
"min": 0.06952320039272308,
"max": 2.458850383758545,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1448.50732421875,
"min": 8.620877265930176,
"max": 1540.728271484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8341768884052665,
"min": 1.7062465673492802,
"max": 3.8858856547829563,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2262.164364159107,
"min": 211.57457435131073,
"max": 2452.7254797816277,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8341768884052665,
"min": 1.7062465673492802,
"max": 3.8858856547829563,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2262.164364159107,
"min": 211.57457435131073,
"max": 2452.7254797816277,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018204781687624443,
"min": 0.012398701646210004,
"max": 0.01906798229611013,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05461434506287333,
"min": 0.024797403292420007,
"max": 0.057203946888330394,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055680584535002714,
"min": 0.022797686389336984,
"max": 0.06049177708725134,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16704175360500814,
"min": 0.04559537277867397,
"max": 0.1753360038002332,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5270988243333363e-06,
"min": 3.5270988243333363e-06,
"max": 0.000295371826542725,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0581296473000009e-05,
"min": 1.0581296473000009e-05,
"max": 0.0008443338185554,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117566666666666,
"min": 0.10117566666666666,
"max": 0.1984572750000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303527,
"min": 0.2074935,
"max": 0.5814446000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.866576666666672e-05,
"min": 6.866576666666672e-05,
"max": 0.004923018022500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020599730000000016,
"min": 0.00020599730000000016,
"max": 0.01407408554,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671047784",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ../config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671050089"
},
"total": 2304.767118873,
"count": 1,
"self": 0.3905148450003253,
"children": {
"run_training.setup": {
"total": 0.1142955319999146,
"count": 1,
"self": 0.1142955319999146
},
"TrainerController.start_learning": {
"total": 2304.2623084959996,
"count": 1,
"self": 4.207752724829334,
"children": {
"TrainerController._reset_env": {
"total": 10.01488955700006,
"count": 1,
"self": 10.01488955700006
},
"TrainerController.advance": {
"total": 2289.9260734991713,
"count": 233073,
"self": 4.306533312313604,
"children": {
"env_step": {
"total": 1806.969882279033,
"count": 233073,
"self": 1517.9483770869977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 286.250777848943,
"count": 233073,
"self": 15.307115541000485,
"children": {
"TorchPolicy.evaluate": {
"total": 270.94366230794253,
"count": 222957,
"self": 68.03794189803261,
"children": {
"TorchPolicy.sample_actions": {
"total": 202.90572040990992,
"count": 222957,
"self": 202.90572040990992
}
}
}
}
},
"workers": {
"total": 2.7707273430924033,
"count": 233073,
"self": 0.0,
"children": {
"worker_root": {
"total": 2296.0407269559464,
"count": 233073,
"is_parallel": true,
"self": 1047.4597239007903,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001829197999995813,
"count": 1,
"is_parallel": true,
"self": 0.0003335520000291581,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001495645999966655,
"count": 2,
"is_parallel": true,
"self": 0.001495645999966655
}
}
},
"UnityEnvironment.step": {
"total": 0.028006623999999647,
"count": 1,
"is_parallel": true,
"self": 0.000295504999712648,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021175100005166314,
"count": 1,
"is_parallel": true,
"self": 0.00021175100005166314
},
"communicator.exchange": {
"total": 0.026571574000172404,
"count": 1,
"is_parallel": true,
"self": 0.026571574000172404
},
"steps_from_proto": {
"total": 0.0009277940000629314,
"count": 1,
"is_parallel": true,
"self": 0.0004579669998747704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046982700018816104,
"count": 2,
"is_parallel": true,
"self": 0.00046982700018816104
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1248.581003055156,
"count": 233072,
"is_parallel": true,
"self": 35.66708520712905,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.09418379306385,
"count": 233072,
"is_parallel": true,
"self": 82.09418379306385
},
"communicator.exchange": {
"total": 1031.7700415080774,
"count": 233072,
"is_parallel": true,
"self": 1031.7700415080774
},
"steps_from_proto": {
"total": 99.04969254688581,
"count": 233072,
"is_parallel": true,
"self": 42.940641420897464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.109051125988344,
"count": 466144,
"is_parallel": true,
"self": 56.109051125988344
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 478.6496579078246,
"count": 233073,
"self": 6.235002980794661,
"children": {
"process_trajectory": {
"total": 153.37382462102983,
"count": 233073,
"self": 152.89304018802954,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48078443300028084,
"count": 4,
"self": 0.48078443300028084
}
}
},
"_update_policy": {
"total": 319.0408303060001,
"count": 97,
"self": 265.52269399799457,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.51813630800552,
"count": 2910,
"self": 53.51813630800552
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.199993655784056e-07,
"count": 1,
"self": 9.199993655784056e-07
},
"TrainerController._save_models": {
"total": 0.11359179499959282,
"count": 1,
"self": 0.0020427790004760027,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11154901599911682,
"count": 1,
"self": 0.11154901599911682
}
}
}
}
}
}
}