Pyramids / run_logs /timers.json
justinhoang's picture
First Push
9dd6ecf
raw
history blame
18.7 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3694153130054474,
"min": 0.3516484797000885,
"max": 1.3783494234085083,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11005.62109375,
"min": 10605.7177734375,
"max": 41813.609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989973.0,
"min": 29964.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989973.0,
"min": 29964.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.44047150015830994,
"min": -0.08551229536533356,
"max": 0.532673180103302,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 119.3677749633789,
"min": -20.608463287353516,
"max": 145.95245361328125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018904829397797585,
"min": 0.00659258384257555,
"max": 0.481662780046463,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.123208522796631,
"min": 1.7668124437332153,
"max": 114.6357421875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07185489498910361,
"min": 0.06521275469632302,
"max": 0.07340627096742404,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0059685298474506,
"min": 0.4885585838452118,
"max": 1.0605305007735368,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015788222948945725,
"min": 0.0011771563171932918,
"max": 0.016644898547221804,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22103512128524017,
"min": 0.015303032123512793,
"max": 0.24967347820832708,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.354640405628571e-06,
"min": 7.354640405628571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001029649656788,
"min": 0.0001029649656788,
"max": 0.0033698908767031,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245151428571429,
"min": 0.10245151428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343212,
"min": 1.3886848,
"max": 2.485018,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002549062771428573,
"min": 0.0002549062771428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035686878800000018,
"min": 0.0035686878800000018,
"max": 0.11234736031,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01559495460242033,
"min": 0.01559495460242033,
"max": 0.5800966620445251,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21832937002182007,
"min": 0.21832937002182007,
"max": 4.060676574707031,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 402.5,
"min": 355.7439024390244,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31395.0,
"min": 16763.0,
"max": 33796.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.366666645862353,
"min": -0.9999936006722911,
"max": 1.5954633954821564,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 106.59999837726355,
"min": -30.999801620841026,
"max": 130.82799842953682,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.366666645862353,
"min": -0.9999936006722911,
"max": 1.5954633954821564,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 106.59999837726355,
"min": -30.999801620841026,
"max": 130.82799842953682,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06418764727333418,
"min": 0.05911815181060796,
"max": 12.299280141863752,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.0066364873200655,
"min": 4.760508427745663,
"max": 209.0877624116838,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687756102",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687758259"
},
"total": 2157.539778556,
"count": 1,
"self": 0.5396797139992486,
"children": {
"run_training.setup": {
"total": 0.03856886500011569,
"count": 1,
"self": 0.03856886500011569
},
"TrainerController.start_learning": {
"total": 2156.9615299770003,
"count": 1,
"self": 1.4410516570160326,
"children": {
"TrainerController._reset_env": {
"total": 4.110654813999872,
"count": 1,
"self": 4.110654813999872
},
"TrainerController.advance": {
"total": 2151.316207591984,
"count": 63794,
"self": 1.4212593930224102,
"children": {
"env_step": {
"total": 1511.8703586710071,
"count": 63794,
"self": 1401.2819521610747,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.76041412397626,
"count": 63794,
"self": 4.758396318935411,
"children": {
"TorchPolicy.evaluate": {
"total": 105.00201780504085,
"count": 62552,
"self": 105.00201780504085
}
}
},
"workers": {
"total": 0.8279923859561222,
"count": 63794,
"self": 0.0,
"children": {
"worker_root": {
"total": 2151.8663873529626,
"count": 63794,
"is_parallel": true,
"self": 862.6293943669234,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017776110000795597,
"count": 1,
"is_parallel": true,
"self": 0.0005371920001380204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012404189999415394,
"count": 8,
"is_parallel": true,
"self": 0.0012404189999415394
}
}
},
"UnityEnvironment.step": {
"total": 0.04833694200010541,
"count": 1,
"is_parallel": true,
"self": 0.0005744500001583219,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004891529999895283,
"count": 1,
"is_parallel": true,
"self": 0.0004891529999895283
},
"communicator.exchange": {
"total": 0.04522051300000385,
"count": 1,
"is_parallel": true,
"self": 0.04522051300000385
},
"steps_from_proto": {
"total": 0.0020528259999537113,
"count": 1,
"is_parallel": true,
"self": 0.0005551400001877482,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001497685999765963,
"count": 8,
"is_parallel": true,
"self": 0.001497685999765963
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1289.2369929860392,
"count": 63793,
"is_parallel": true,
"self": 32.78914919804788,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.470799334000503,
"count": 63793,
"is_parallel": true,
"self": 22.470799334000503
},
"communicator.exchange": {
"total": 1134.332310628964,
"count": 63793,
"is_parallel": true,
"self": 1134.332310628964
},
"steps_from_proto": {
"total": 99.64473382502683,
"count": 63793,
"is_parallel": true,
"self": 19.583462106054185,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.06127171897265,
"count": 510344,
"is_parallel": true,
"self": 80.06127171897265
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 638.0245895279545,
"count": 63794,
"self": 2.7446232089544083,
"children": {
"process_trajectory": {
"total": 107.8393611290021,
"count": 63794,
"self": 107.63674225600198,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2026188730001195,
"count": 2,
"self": 0.2026188730001195
}
}
},
"_update_policy": {
"total": 527.440605189998,
"count": 451,
"self": 337.9720266579841,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.4685785320139,
"count": 22803,
"self": 189.4685785320139
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0339999789721332e-06,
"count": 1,
"self": 1.0339999789721332e-06
},
"TrainerController._save_models": {
"total": 0.09361488000013196,
"count": 1,
"self": 0.0014174369998727343,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09219744300025923,
"count": 1,
"self": 0.09219744300025923
}
}
}
}
}
}
}