ppo-Huggy / run_logs /timers.json
khaled5321's picture
Huggy
5aba973
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4037299156188965,
"min": 1.4037299156188965,
"max": 1.4276964664459229,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68770.1328125,
"min": 68770.1328125,
"max": 76422.0234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 105.10191082802548,
"min": 102.35378323108384,
"max": 363.62589928057554,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49503.0,
"min": 48989.0,
"max": 50544.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999906.0,
"min": 49933.0,
"max": 1999906.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999906.0,
"min": 49933.0,
"max": 1999906.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.306583881378174,
"min": 0.06502743065357208,
"max": 2.364840269088745,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1086.4010009765625,
"min": 8.973785400390625,
"max": 1146.9464111328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5365252492281027,
"min": 1.7338803235603415,
"max": 3.7579018556636217,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1665.7033923864365,
"min": 239.27548465132713,
"max": 1780.8182772994041,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5365252492281027,
"min": 1.7338803235603415,
"max": 3.7579018556636217,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1665.7033923864365,
"min": 239.27548465132713,
"max": 1780.8182772994041,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0188173357208143,
"min": 0.013606221536873716,
"max": 0.022031554040828877,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0376346714416286,
"min": 0.029068732588590745,
"max": 0.06165428824800377,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05176851848761241,
"min": 0.022327616189916927,
"max": 0.06089926964292924,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10353703697522482,
"min": 0.044655232379833855,
"max": 0.1723736647516489,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.168298610600006e-06,
"min": 4.168298610600006e-06,
"max": 0.0002953600515466499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.336597221200012e-06,
"min": 8.336597221200012e-06,
"max": 0.0008436427687857499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10138939999999999,
"min": 0.10138939999999999,
"max": 0.19845334999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20277879999999998,
"min": 0.20277879999999998,
"max": 0.5812142500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.93310600000001e-05,
"min": 7.93310600000001e-05,
"max": 0.004922822164999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001586621200000002,
"min": 0.0001586621200000002,
"max": 0.014062591075,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670957612",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670959721"
},
"total": 2108.590958064,
"count": 1,
"self": 0.42329716400035977,
"children": {
"run_training.setup": {
"total": 0.102134230999809,
"count": 1,
"self": 0.102134230999809
},
"TrainerController.start_learning": {
"total": 2108.065526669,
"count": 1,
"self": 3.610653050068777,
"children": {
"TrainerController._reset_env": {
"total": 6.003229919999967,
"count": 1,
"self": 6.003229919999967
},
"TrainerController.advance": {
"total": 2098.3329733319315,
"count": 230514,
"self": 3.918648490940541,
"children": {
"env_step": {
"total": 1655.193032296945,
"count": 230514,
"self": 1390.0551211128043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.61886315802826,
"count": 230514,
"self": 13.724620592947758,
"children": {
"TorchPolicy.evaluate": {
"total": 248.8942425650805,
"count": 222877,
"self": 63.19002566919221,
"children": {
"TorchPolicy.sample_actions": {
"total": 185.7042168958883,
"count": 222877,
"self": 185.7042168958883
}
}
}
}
},
"workers": {
"total": 2.5190480261123867,
"count": 230514,
"self": 0.0,
"children": {
"worker_root": {
"total": 2100.5296902990817,
"count": 230514,
"is_parallel": true,
"self": 957.5579792281769,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010236789998998574,
"count": 1,
"is_parallel": true,
"self": 0.0003975599997829704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000626119000116887,
"count": 2,
"is_parallel": true,
"self": 0.000626119000116887
}
}
},
"UnityEnvironment.step": {
"total": 0.026182786000163105,
"count": 1,
"is_parallel": true,
"self": 0.0002527620001728792,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017507100005786924,
"count": 1,
"is_parallel": true,
"self": 0.00017507100005786924
},
"communicator.exchange": {
"total": 0.024863592999963657,
"count": 1,
"is_parallel": true,
"self": 0.024863592999963657
},
"steps_from_proto": {
"total": 0.0008913599999686994,
"count": 1,
"is_parallel": true,
"self": 0.00023072700014381553,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006606329998248839,
"count": 2,
"is_parallel": true,
"self": 0.0006606329998248839
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1142.9717110709048,
"count": 230513,
"is_parallel": true,
"self": 33.664489930812806,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.69774333100509,
"count": 230513,
"is_parallel": true,
"self": 73.69774333100509
},
"communicator.exchange": {
"total": 945.6477998410908,
"count": 230513,
"is_parallel": true,
"self": 945.6477998410908
},
"steps_from_proto": {
"total": 89.96167796799614,
"count": 230513,
"is_parallel": true,
"self": 37.17214223489236,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.789535733103776,
"count": 461026,
"is_parallel": true,
"self": 52.789535733103776
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 439.22129254404604,
"count": 230514,
"self": 5.576811211966287,
"children": {
"process_trajectory": {
"total": 132.26752881708012,
"count": 230514,
"self": 131.77097565007966,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4965531670004566,
"count": 4,
"self": 0.4965531670004566
}
}
},
"_update_policy": {
"total": 301.37695251499963,
"count": 96,
"self": 249.65281393298733,
"children": {
"TorchPPOOptimizer.update": {
"total": 51.7241385820123,
"count": 2880,
"self": 51.7241385820123
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.559998943586834e-07,
"count": 1,
"self": 9.559998943586834e-07
},
"TrainerController._save_models": {
"total": 0.11866941099970063,
"count": 1,
"self": 0.002043199999661738,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11662621100003889,
"count": 1,
"self": 0.11662621100003889
}
}
}
}
}
}
}