ppo-Huggy / run_logs /timers.json
andrewsiah's picture
Huggy
104789b
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406927227973938,
"min": 1.406927227973938,
"max": 1.428769826889038,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70018.546875,
"min": 68220.6328125,
"max": 77445.8515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 132.872,
"min": 91.06998158379373,
"max": 384.6793893129771,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49827.0,
"min": 48938.0,
"max": 50393.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999613.0,
"min": 49955.0,
"max": 1999613.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999613.0,
"min": 49955.0,
"max": 1999613.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.2671597003936768,
"min": 0.15929746627807617,
"max": 2.384432315826416,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 852.4520263671875,
"min": 20.70867156982422,
"max": 1233.52685546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.3908320108943797,
"min": 1.7909623205661773,
"max": 3.8640098866209924,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1274.9528360962868,
"min": 232.82510167360306,
"max": 2009.7998051643372,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.3908320108943797,
"min": 1.7909623205661773,
"max": 3.8640098866209924,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1274.9528360962868,
"min": 232.82510167360306,
"max": 2009.7998051643372,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0132809779414553,
"min": 0.0132809779414553,
"max": 0.020505904977108004,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0265619558829106,
"min": 0.0265619558829106,
"max": 0.060299617767062344,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.038285991673668224,
"min": 0.020951938225577275,
"max": 0.06354543711576197,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.07657198334733645,
"min": 0.04190387645115455,
"max": 0.1906363113472859,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.408673530474999e-06,
"min": 4.408673530474999e-06,
"max": 0.00029535052654982495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.817347060949999e-06,
"min": 8.817347060949999e-06,
"max": 0.0008443404185532,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101469525,
"min": 0.101469525,
"max": 0.198450175,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20293905,
"min": 0.20293905,
"max": 0.5814468000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.332929749999996e-05,
"min": 8.332929749999996e-05,
"max": 0.0049226637325,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016665859499999993,
"min": 0.00016665859499999993,
"max": 0.014074195320000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687047273",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687049597"
},
"total": 2324.435963293,
"count": 1,
"self": 0.5977025989996037,
"children": {
"run_training.setup": {
"total": 0.04172141899999815,
"count": 1,
"self": 0.04172141899999815
},
"TrainerController.start_learning": {
"total": 2323.796539275,
"count": 1,
"self": 4.065421892948052,
"children": {
"TrainerController._reset_env": {
"total": 4.269979696999997,
"count": 1,
"self": 4.269979696999997
},
"TrainerController.advance": {
"total": 2315.266302218052,
"count": 231458,
"self": 4.321952933211833,
"children": {
"env_step": {
"total": 1809.9517668070112,
"count": 231458,
"self": 1528.044112137777,
"children": {
"SubprocessEnvManager._take_step": {
"total": 279.13139678622474,
"count": 231458,
"self": 16.439107199272996,
"children": {
"TorchPolicy.evaluate": {
"total": 262.69228958695174,
"count": 223100,
"self": 262.69228958695174
}
}
},
"workers": {
"total": 2.776257883009521,
"count": 231458,
"self": 0.0,
"children": {
"worker_root": {
"total": 2316.0948341398284,
"count": 231458,
"is_parallel": true,
"self": 1065.3243277448305,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010407820000182255,
"count": 1,
"is_parallel": true,
"self": 0.00027034100003220374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007704409999860218,
"count": 2,
"is_parallel": true,
"self": 0.0007704409999860218
}
}
},
"UnityEnvironment.step": {
"total": 0.04832471899999291,
"count": 1,
"is_parallel": true,
"self": 0.0003528320000327767,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002413969999679466,
"count": 1,
"is_parallel": true,
"self": 0.0002413969999679466
},
"communicator.exchange": {
"total": 0.046990107999988595,
"count": 1,
"is_parallel": true,
"self": 0.046990107999988595
},
"steps_from_proto": {
"total": 0.0007403820000035921,
"count": 1,
"is_parallel": true,
"self": 0.00019682699996792508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000543555000035667,
"count": 2,
"is_parallel": true,
"self": 0.000543555000035667
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1250.770506394998,
"count": 231457,
"is_parallel": true,
"self": 38.68743735794442,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.14293716208084,
"count": 231457,
"is_parallel": true,
"self": 78.14293716208084
},
"communicator.exchange": {
"total": 1041.1124437379867,
"count": 231457,
"is_parallel": true,
"self": 1041.1124437379867
},
"steps_from_proto": {
"total": 92.82768813698601,
"count": 231457,
"is_parallel": true,
"self": 32.83356885497318,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.99411928201283,
"count": 462914,
"is_parallel": true,
"self": 59.99411928201283
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 500.9925824778288,
"count": 231458,
"self": 6.357669653803953,
"children": {
"process_trajectory": {
"total": 125.01174927002495,
"count": 231458,
"self": 123.57592335202418,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4358259180007735,
"count": 10,
"self": 1.4358259180007735
}
}
},
"_update_policy": {
"total": 369.6231635539999,
"count": 96,
"self": 310.883741777999,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.73942177600094,
"count": 2880,
"self": 58.73942177600094
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.359000179945724e-06,
"count": 1,
"self": 1.359000179945724e-06
},
"TrainerController._save_models": {
"total": 0.1948341080001228,
"count": 1,
"self": 0.002648459999818442,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19218564800030435,
"count": 1,
"self": 0.19218564800030435
}
}
}
}
}
}
}