ppo-Pyramids / run_logs /timers.json
hendoo's picture
First Push
fd23f5b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.148697167634964,
"min": 0.14655016362667084,
"max": 1.4400190114974976,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4429.98583984375,
"min": 4370.7119140625,
"max": 43684.41796875,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999980.0,
"min": 29872.0,
"max": 2999980.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999980.0,
"min": 29872.0,
"max": 2999980.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8112884759902954,
"min": -0.09664986282587051,
"max": 0.8748363852500916,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 238.51881408691406,
"min": -23.292617797851562,
"max": 263.32574462890625,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.003164477413520217,
"min": -0.018619002774357796,
"max": 0.5386466979980469,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.9303563833236694,
"min": -5.418129920959473,
"max": 127.65927124023438,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06805946247402549,
"min": 0.06362116651975418,
"max": 0.07303607658938314,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9528324746363568,
"min": 0.49625312430742624,
"max": 1.0753516525922655,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015584823555410552,
"min": 0.00017549941191155572,
"max": 0.021775186316622185,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21818752977574773,
"min": 0.0022814923548502243,
"max": 0.24532018247797774,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4421138050428539e-06,
"min": 1.4421138050428539e-06,
"max": 0.00029841211481500957,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0189593270599954e-05,
"min": 2.0189593270599954e-05,
"max": 0.003842087719304133,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10048067142857144,
"min": 0.10048067142857144,
"max": 0.1994707047619048,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4067294000000001,
"min": 1.3962949333333334,
"max": 2.7075835,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.801907571428561e-05,
"min": 5.801907571428561e-05,
"max": 0.009947123405714284,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008122670599999985,
"min": 0.0008122670599999985,
"max": 0.12808151708,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008934753946959972,
"min": 0.008934753946959972,
"max": 0.7499951124191284,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12508656084537506,
"min": 0.12508656084537506,
"max": 5.249965667724609,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 252.23333333333332,
"min": 207.63503649635035,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30268.0,
"min": 16543.0,
"max": 32818.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6810899874816339,
"min": -0.9998839228383957,
"max": 1.7785652033120827,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 201.73079849779606,
"min": -31.995601654052734,
"max": 247.83219727873802,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6810899874816339,
"min": -0.9998839228383957,
"max": 1.7785652033120827,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 201.73079849779606,
"min": -31.995601654052734,
"max": 247.83219727873802,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02329218836578851,
"min": 0.019804823936546978,
"max": 15.631133132121143,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.795062603894621,
"min": 2.694162781976047,
"max": 265.7292632460594,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674157844",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674164515"
},
"total": 6670.91085009,
"count": 1,
"self": 0.4238718010001321,
"children": {
"run_training.setup": {
"total": 0.10057918200027416,
"count": 1,
"self": 0.10057918200027416
},
"TrainerController.start_learning": {
"total": 6670.386399106999,
"count": 1,
"self": 3.8295353578196227,
"children": {
"TrainerController._reset_env": {
"total": 5.991216741999779,
"count": 1,
"self": 5.991216741999779
},
"TrainerController.advance": {
"total": 6660.481593723181,
"count": 194149,
"self": 3.952910311037158,
"children": {
"env_step": {
"total": 4702.027774666071,
"count": 194149,
"self": 4391.57380591227,
"children": {
"SubprocessEnvManager._take_step": {
"total": 308.171479822759,
"count": 194149,
"self": 13.089163853567698,
"children": {
"TorchPolicy.evaluate": {
"total": 295.0823159691913,
"count": 187553,
"self": 100.22128000133307,
"children": {
"TorchPolicy.sample_actions": {
"total": 194.86103596785824,
"count": 187553,
"self": 194.86103596785824
}
}
}
}
},
"workers": {
"total": 2.282488931042735,
"count": 194149,
"self": 0.0,
"children": {
"worker_root": {
"total": 6657.056847677078,
"count": 194149,
"is_parallel": true,
"self": 2567.522345817315,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018587320000733598,
"count": 1,
"is_parallel": true,
"self": 0.0006794000009904266,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011793319990829332,
"count": 8,
"is_parallel": true,
"self": 0.0011793319990829332
}
}
},
"UnityEnvironment.step": {
"total": 0.0464648480001415,
"count": 1,
"is_parallel": true,
"self": 0.0005040770001869532,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004542300002867705,
"count": 1,
"is_parallel": true,
"self": 0.0004542300002867705
},
"communicator.exchange": {
"total": 0.04393311499961783,
"count": 1,
"is_parallel": true,
"self": 0.04393311499961783
},
"steps_from_proto": {
"total": 0.001573426000049949,
"count": 1,
"is_parallel": true,
"self": 0.0003908460003003711,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011825799997495778,
"count": 8,
"is_parallel": true,
"self": 0.0011825799997495778
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4089.5345018597627,
"count": 194148,
"is_parallel": true,
"self": 82.16249479905173,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 69.78667233804481,
"count": 194148,
"is_parallel": true,
"self": 69.78667233804481
},
"communicator.exchange": {
"total": 3627.9499288657994,
"count": 194148,
"is_parallel": true,
"self": 3627.9499288657994
},
"steps_from_proto": {
"total": 309.6354058568668,
"count": 194148,
"is_parallel": true,
"self": 67.69661054560038,
"children": {
"_process_rank_one_or_two_observation": {
"total": 241.9387953112664,
"count": 1553184,
"is_parallel": true,
"self": 241.9387953112664
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1954.5009087460726,
"count": 194149,
"self": 7.120703637174756,
"children": {
"process_trajectory": {
"total": 452.1543288198809,
"count": 194149,
"self": 451.61997439988,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5343544200009092,
"count": 6,
"self": 0.5343544200009092
}
}
},
"_update_policy": {
"total": 1495.225876289017,
"count": 1395,
"self": 567.1248574650199,
"children": {
"TorchPPOOptimizer.update": {
"total": 928.101018823997,
"count": 68331,
"self": 928.101018823997
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.499999578110874e-07,
"count": 1,
"self": 9.499999578110874e-07
},
"TrainerController._save_models": {
"total": 0.08405233399935241,
"count": 1,
"self": 0.0013608429999294458,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08269149099942297,
"count": 1,
"self": 0.08269149099942297
}
}
}
}
}
}
}