ppo-Huggy / run_logs /timers.json
jennielees's picture
Huggy
cb791b4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402003526687622,
"min": 1.402003526687622,
"max": 1.4235150814056396,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69692.1953125,
"min": 68501.4453125,
"max": 77261.5078125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 106.2547770700637,
"min": 85.52961672473867,
"max": 380.7651515151515,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50046.0,
"min": 48989.0,
"max": 50261.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999778.0,
"min": 49831.0,
"max": 1999778.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999778.0,
"min": 49831.0,
"max": 1999778.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.336700916290283,
"min": -0.03799854964017868,
"max": 2.420351505279541,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1100.586181640625,
"min": -4.977809906005859,
"max": 1359.320068359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.530277326861258,
"min": 1.7705591807838614,
"max": 3.9192370868864512,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1662.7606209516525,
"min": 231.94325268268585,
"max": 2201.5442913770676,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.530277326861258,
"min": 1.7705591807838614,
"max": 3.9192370868864512,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1662.7606209516525,
"min": 231.94325268268585,
"max": 2201.5442913770676,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017498455344078442,
"min": 0.013429122308419514,
"max": 0.02070954283588738,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.034996910688156885,
"min": 0.026858244616839028,
"max": 0.05752095837960951,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04404068787892659,
"min": 0.024671760077277817,
"max": 0.05993577173600595,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08808137575785319,
"min": 0.049343520154555634,
"max": 0.1731555551290512,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.63059845650001e-06,
"min": 4.63059845650001e-06,
"max": 0.00029532157655947494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.26119691300002e-06,
"min": 9.26119691300002e-06,
"max": 0.0008441206686264497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10154350000000001,
"min": 0.10154350000000001,
"max": 0.19844052500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20308700000000002,
"min": 0.20308700000000002,
"max": 0.5813735500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.702065000000018e-05,
"min": 8.702065000000018e-05,
"max": 0.0049221821975000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017404130000000037,
"min": 0.00017404130000000037,
"max": 0.014070540145000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680781569",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680784061"
},
"total": 2491.636011404,
"count": 1,
"self": 0.4389899850002621,
"children": {
"run_training.setup": {
"total": 0.11960203500001398,
"count": 1,
"self": 0.11960203500001398
},
"TrainerController.start_learning": {
"total": 2491.0774193839998,
"count": 1,
"self": 4.6508671290757775,
"children": {
"TrainerController._reset_env": {
"total": 4.501188755999976,
"count": 1,
"self": 4.501188755999976
},
"TrainerController.advance": {
"total": 2481.791092759924,
"count": 231727,
"self": 4.796878434859536,
"children": {
"env_step": {
"total": 1953.0167671010743,
"count": 231727,
"self": 1655.2316312090786,
"children": {
"SubprocessEnvManager._take_step": {
"total": 294.66283759002175,
"count": 231727,
"self": 17.70253434191102,
"children": {
"TorchPolicy.evaluate": {
"total": 276.96030324811073,
"count": 223042,
"self": 276.96030324811073
}
}
},
"workers": {
"total": 3.122298301973899,
"count": 231727,
"self": 0.0,
"children": {
"worker_root": {
"total": 2482.397707393125,
"count": 231727,
"is_parallel": true,
"self": 1126.957464741105,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009388929999545326,
"count": 1,
"is_parallel": true,
"self": 0.00026525300000912466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006736399999454079,
"count": 2,
"is_parallel": true,
"self": 0.0006736399999454079
}
}
},
"UnityEnvironment.step": {
"total": 0.07978955200002247,
"count": 1,
"is_parallel": true,
"self": 0.0003221830000939008,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002134049999540366,
"count": 1,
"is_parallel": true,
"self": 0.0002134049999540366
},
"communicator.exchange": {
"total": 0.0785295119999887,
"count": 1,
"is_parallel": true,
"self": 0.0785295119999887
},
"steps_from_proto": {
"total": 0.0007244519999858312,
"count": 1,
"is_parallel": true,
"self": 0.00020391699996480384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005205350000210274,
"count": 2,
"is_parallel": true,
"self": 0.0005205350000210274
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1355.44024265202,
"count": 231726,
"is_parallel": true,
"self": 40.041921258037746,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.47655306998695,
"count": 231726,
"is_parallel": true,
"self": 83.47655306998695
},
"communicator.exchange": {
"total": 1137.5264727130032,
"count": 231726,
"is_parallel": true,
"self": 1137.5264727130032
},
"steps_from_proto": {
"total": 94.39529561099226,
"count": 231726,
"is_parallel": true,
"self": 35.45729636103664,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.937999249955624,
"count": 463452,
"is_parallel": true,
"self": 58.937999249955624
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 523.9774472239905,
"count": 231727,
"self": 7.278057776078867,
"children": {
"process_trajectory": {
"total": 135.53127448690952,
"count": 231727,
"self": 134.22150154191036,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3097729449991675,
"count": 10,
"self": 1.3097729449991675
}
}
},
"_update_policy": {
"total": 381.16811496100206,
"count": 96,
"self": 320.88931636999513,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.27879859100693,
"count": 2880,
"self": 60.27879859100693
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.659997886046767e-07,
"count": 1,
"self": 9.659997886046767e-07
},
"TrainerController._save_models": {
"total": 0.13426977300014187,
"count": 1,
"self": 0.002718404000006558,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1315513690001353,
"count": 1,
"self": 0.1315513690001353
}
}
}
}
}
}
}