ppo-Huggy / run_logs /timers.json
jeliasherrero's picture
Guided attempt Huggy
0690a2c verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4071663618087769,
"min": 1.4071663618087769,
"max": 1.429805040359497,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70651.0078125,
"min": 69088.015625,
"max": 77923.6640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.75359712230215,
"min": 84.08333333333333,
"max": 411.5655737704918,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49347.0,
"min": 48859.0,
"max": 50228.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49914.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49914.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.376584768295288,
"min": 0.01556811761111021,
"max": 2.47477650642395,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1321.381103515625,
"min": 1.8837422132492065,
"max": 1415.37060546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.633535781459843,
"min": 1.7246707475875034,
"max": 3.9174064031271176,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2020.2458944916725,
"min": 208.68516045808792,
"max": 2205.0983549952507,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.633535781459843,
"min": 1.7246707475875034,
"max": 3.9174064031271176,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2020.2458944916725,
"min": 208.68516045808792,
"max": 2205.0983549952507,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017118497305071086,
"min": 0.013655837808134189,
"max": 0.019625719767322556,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051355491915213256,
"min": 0.027311675616268377,
"max": 0.05887715930196767,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06174136623740196,
"min": 0.022578109304110208,
"max": 0.06385263906170925,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18522409871220588,
"min": 0.045156218608220416,
"max": 0.18522409871220588,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4996988334666703e-06,
"min": 3.4996988334666703e-06,
"max": 0.00029537827654057495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0499096500400011e-05,
"min": 1.0499096500400011e-05,
"max": 0.0008442117185961,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116653333333332,
"min": 0.10116653333333332,
"max": 0.19845942500000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034996,
"min": 0.20749665,
"max": 0.5814039,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.821001333333339e-05,
"min": 6.821001333333339e-05,
"max": 0.0049231253075,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020463004000000016,
"min": 0.00020463004000000016,
"max": 0.014072054610000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713690270",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713692795"
},
"total": 2525.294856629,
"count": 1,
"self": 0.4387811239998882,
"children": {
"run_training.setup": {
"total": 0.061585153000009996,
"count": 1,
"self": 0.061585153000009996
},
"TrainerController.start_learning": {
"total": 2524.794490352,
"count": 1,
"self": 4.39106445496509,
"children": {
"TrainerController._reset_env": {
"total": 2.810813157000098,
"count": 1,
"self": 2.810813157000098
},
"TrainerController.advance": {
"total": 2517.4656382880353,
"count": 231903,
"self": 4.83903371695169,
"children": {
"env_step": {
"total": 2039.2108768210471,
"count": 231903,
"self": 1689.888980147043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 346.29376735199673,
"count": 231903,
"self": 17.97169843200186,
"children": {
"TorchPolicy.evaluate": {
"total": 328.3220689199949,
"count": 222968,
"self": 328.3220689199949
}
}
},
"workers": {
"total": 3.0281293220074303,
"count": 231903,
"self": 0.0,
"children": {
"worker_root": {
"total": 2517.192847538942,
"count": 231903,
"is_parallel": true,
"self": 1156.6531402669275,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009506949999149583,
"count": 1,
"is_parallel": true,
"self": 0.0002350959999830593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000715598999931899,
"count": 2,
"is_parallel": true,
"self": 0.000715598999931899
}
}
},
"UnityEnvironment.step": {
"total": 0.03199209299998529,
"count": 1,
"is_parallel": true,
"self": 0.0004003899999815985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022442099998443155,
"count": 1,
"is_parallel": true,
"self": 0.00022442099998443155
},
"communicator.exchange": {
"total": 0.03056044100003419,
"count": 1,
"is_parallel": true,
"self": 0.03056044100003419
},
"steps_from_proto": {
"total": 0.0008068409999850701,
"count": 1,
"is_parallel": true,
"self": 0.0002204359999495864,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005864050000354837,
"count": 2,
"is_parallel": true,
"self": 0.0005864050000354837
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1360.5397072720143,
"count": 231902,
"is_parallel": true,
"self": 40.60614673611235,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.11931314597257,
"count": 231902,
"is_parallel": true,
"self": 89.11931314597257
},
"communicator.exchange": {
"total": 1134.6639098379942,
"count": 231902,
"is_parallel": true,
"self": 1134.6639098379942
},
"steps_from_proto": {
"total": 96.15033755193497,
"count": 231902,
"is_parallel": true,
"self": 36.56847512787783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.581862424057135,
"count": 463804,
"is_parallel": true,
"self": 59.581862424057135
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 473.41572775003624,
"count": 231903,
"self": 6.664784880084085,
"children": {
"process_trajectory": {
"total": 163.37573918595172,
"count": 231903,
"self": 161.97400794495172,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4017312409999931,
"count": 10,
"self": 1.4017312409999931
}
}
},
"_update_policy": {
"total": 303.37520368400044,
"count": 97,
"self": 241.78639997700952,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.588803706990916,
"count": 2910,
"self": 61.588803706990916
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0289995771017857e-06,
"count": 1,
"self": 1.0289995771017857e-06
},
"TrainerController._save_models": {
"total": 0.12697342299998127,
"count": 1,
"self": 0.002307155999915267,
"children": {
"RLTrainer._checkpoint": {
"total": 0.124666267000066,
"count": 1,
"self": 0.124666267000066
}
}
}
}
}
}
}