Pyramids / run_logs /timers.json
SparkleDark's picture
First Push
4c9c8b0 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.23059949278831482,
"min": 0.2092013955116272,
"max": 1.4689029455184937,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 6833.1240234375,
"min": 6235.875,
"max": 44560.640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989984.0,
"min": 29952.0,
"max": 989984.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989984.0,
"min": 29952.0,
"max": 989984.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6300671100616455,
"min": -0.09959281235933304,
"max": 0.6300671100616455,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.93905639648438,
"min": -24.10146141052246,
"max": 178.93905639648438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.02351466566324234,
"min": -0.02351466566324234,
"max": 0.837783932685852,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.678164958953857,
"min": -6.678164958953857,
"max": 201.06814575195312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.04682658843618508,
"min": 0.043318963728822935,
"max": 0.05363043942065042,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.6555722381065912,
"min": 0.3376627987419765,
"max": 0.7719579144225766,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01568849246472209,
"min": 0.0018288794884607127,
"max": 0.02307932882519838,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21963889450610927,
"min": 0.020895796373744297,
"max": 0.2568191456375643,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.643497452199998e-06,
"min": 7.643497452199998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010700896433079998,
"min": 0.00010700896433079998,
"max": 0.0033782231739257,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254780000000001,
"min": 0.10254780000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356692000000002,
"min": 1.3691136000000002,
"max": 2.5260743,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002645252199999999,
"min": 0.0002645252199999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003703353079999999,
"min": 0.003703353079999999,
"max": 0.11263482257,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.03494398295879364,
"min": 0.03494398295879364,
"max": 1.0783345699310303,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.48921576142311096,
"min": 0.48921576142311096,
"max": 7.548341751098633,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 312.89690721649487,
"min": 310.5816326530612,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30351.0,
"min": 15984.0,
"max": 32768.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6188632426973508,
"min": -1.0000000521540642,
"max": 1.6486535861934584,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 158.64859778434038,
"min": -32.000001668930054,
"max": 159.91939786076546,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6188632426973508,
"min": -1.0000000521540642,
"max": 1.6486535861934584,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 158.64859778434038,
"min": -32.000001668930054,
"max": 159.91939786076546,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.11371335865364277,
"min": 0.11371335865364277,
"max": 14.923394791781902,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 11.143909148056991,
"min": 10.510322347516194,
"max": 246.4539710879326,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713803344",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713805513"
},
"total": 2169.7593581450005,
"count": 1,
"self": 0.6222621180004353,
"children": {
"run_training.setup": {
"total": 0.05733711799985031,
"count": 1,
"self": 0.05733711799985031
},
"TrainerController.start_learning": {
"total": 2169.0797589090002,
"count": 1,
"self": 1.331322142017143,
"children": {
"TrainerController._reset_env": {
"total": 3.032007926999995,
"count": 1,
"self": 3.032007926999995
},
"TrainerController.advance": {
"total": 2164.5957668149836,
"count": 64088,
"self": 1.3580681649737016,
"children": {
"env_step": {
"total": 1644.1352480769892,
"count": 64088,
"self": 1494.3837621958332,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.95300700406915,
"count": 64088,
"self": 4.5565507709738995,
"children": {
"TorchPolicy.evaluate": {
"total": 144.39645623309525,
"count": 62552,
"self": 144.39645623309525
}
}
},
"workers": {
"total": 0.7984788770868363,
"count": 64088,
"self": 0.0,
"children": {
"worker_root": {
"total": 2164.207150963995,
"count": 64088,
"is_parallel": true,
"self": 787.0951049519731,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005625324999982695,
"count": 1,
"is_parallel": true,
"self": 0.004020959000627045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016043659993556503,
"count": 8,
"is_parallel": true,
"self": 0.0016043659993556503
}
}
},
"UnityEnvironment.step": {
"total": 0.09752607200016428,
"count": 1,
"is_parallel": true,
"self": 0.002407318000223313,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004636029998437152,
"count": 1,
"is_parallel": true,
"self": 0.0004636029998437152
},
"communicator.exchange": {
"total": 0.09308504199998424,
"count": 1,
"is_parallel": true,
"self": 0.09308504199998424
},
"steps_from_proto": {
"total": 0.001570109000113007,
"count": 1,
"is_parallel": true,
"self": 0.0003146420003758976,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012554669997371093,
"count": 8,
"is_parallel": true,
"self": 0.0012554669997371093
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1377.112046012022,
"count": 64087,
"is_parallel": true,
"self": 34.417418083825396,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.502484587011622,
"count": 64087,
"is_parallel": true,
"self": 23.502484587011622
},
"communicator.exchange": {
"total": 1220.6057320191198,
"count": 64087,
"is_parallel": true,
"self": 1220.6057320191198
},
"steps_from_proto": {
"total": 98.58641132206503,
"count": 64087,
"is_parallel": true,
"self": 19.607892513246952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.97851880881808,
"count": 512696,
"is_parallel": true,
"self": 78.97851880881808
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 519.1024505730206,
"count": 64088,
"self": 2.6458544808056104,
"children": {
"process_trajectory": {
"total": 145.5982032582151,
"count": 64088,
"self": 145.19442767721466,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4037755810004455,
"count": 2,
"self": 0.4037755810004455
}
}
},
"_update_policy": {
"total": 370.8583928339999,
"count": 450,
"self": 250.81502910500922,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.0433637289907,
"count": 11337,
"self": 120.0433637289907
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.99999429041054e-07,
"count": 1,
"self": 9.99999429041054e-07
},
"TrainerController._save_models": {
"total": 0.12066102500011766,
"count": 1,
"self": 0.0015415469997606124,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11911947800035705,
"count": 1,
"self": 0.11911947800035705
}
}
}
}
}
}
}