ppo-PyramidsRND / run_logs /timers.json
pyflynn's picture
PyramidsRND init
06ea120
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4333283603191376,
"min": 0.4333283603191376,
"max": 1.4513765573501587,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13041.4501953125,
"min": 13041.4501953125,
"max": 44028.9609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989955.0,
"min": 29954.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989955.0,
"min": 29954.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6434844732284546,
"min": -0.21459785103797913,
"max": 0.646626353263855,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 181.46261596679688,
"min": -51.07428741455078,
"max": 183.6418914794922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0046362304128706455,
"min": -0.0329447016119957,
"max": 0.20006230473518372,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.3074169158935547,
"min": -9.092737197875977,
"max": 47.614830017089844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06726432779812187,
"min": 0.0652045147837184,
"max": 0.07400026815594174,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9417005891737062,
"min": 0.46093137402203865,
"max": 1.0545143190042032,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01569777039237254,
"min": 0.00012193852079089874,
"max": 0.01691390941448931,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21976878549321555,
"min": 0.0015852007702816836,
"max": 0.24319581939683607,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.734675993235715e-06,
"min": 7.734675993235715e-06,
"max": 0.0002952383158729429,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010828546390530001,
"min": 0.00010828546390530001,
"max": 0.0034932712355763,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257819285714287,
"min": 0.10257819285714287,
"max": 0.19841277142857144,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360947000000002,
"min": 1.3888894,
"max": 2.4843749,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002675614664285715,
"min": 0.0002675614664285715,
"max": 0.009841435865714286,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037458605300000006,
"min": 0.0037458605300000006,
"max": 0.11645592763000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008233352564275265,
"min": 0.007951054722070694,
"max": 0.32259663939476013,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11526694148778915,
"min": 0.11131476610898972,
"max": 2.258176565170288,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 319.6938775510204,
"min": 284.19191919191917,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31330.0,
"min": 16625.0,
"max": 32504.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6598877433155264,
"min": -0.9998625521548092,
"max": 1.6956020036731103,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 162.6689988449216,
"min": -31.995601668953896,
"max": 179.13239781558514,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6598877433155264,
"min": -0.9998625521548092,
"max": 1.6956020036731103,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 162.6689988449216,
"min": -31.995601668953896,
"max": 179.13239781558514,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.026690961974667034,
"min": 0.024472770212975413,
"max": 5.995643255136469,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6157142735173693,
"min": 2.302817031944869,
"max": 101.92593533731997,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677078600",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1677080897"
},
"total": 2297.3749748360005,
"count": 1,
"self": 0.7842815580006572,
"children": {
"run_training.setup": {
"total": 0.11616144399999939,
"count": 1,
"self": 0.11616144399999939
},
"TrainerController.start_learning": {
"total": 2296.474531834,
"count": 1,
"self": 1.2617642349769085,
"children": {
"TrainerController._reset_env": {
"total": 7.330515240000068,
"count": 1,
"self": 7.330515240000068
},
"TrainerController.advance": {
"total": 2287.7478637140234,
"count": 64009,
"self": 1.3035556110439757,
"children": {
"env_step": {
"total": 1542.3696443549936,
"count": 64009,
"self": 1432.0021802581384,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.57144662090514,
"count": 64009,
"self": 4.502111117901904,
"children": {
"TorchPolicy.evaluate": {
"total": 105.06933550300323,
"count": 62551,
"self": 35.380679598002416,
"children": {
"TorchPolicy.sample_actions": {
"total": 69.68865590500081,
"count": 62551,
"self": 69.68865590500081
}
}
}
}
},
"workers": {
"total": 0.796017475950066,
"count": 64009,
"self": 0.0,
"children": {
"worker_root": {
"total": 2291.4635597060565,
"count": 64009,
"is_parallel": true,
"self": 969.9566315750371,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020346209998933773,
"count": 1,
"is_parallel": true,
"self": 0.0007853729998714698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012492480000219075,
"count": 8,
"is_parallel": true,
"self": 0.0012492480000219075
}
}
},
"UnityEnvironment.step": {
"total": 0.15937558699988585,
"count": 1,
"is_parallel": true,
"self": 0.0005328129998360964,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044492000006357557,
"count": 1,
"is_parallel": true,
"self": 0.00044492000006357557
},
"communicator.exchange": {
"total": 0.15682251599992014,
"count": 1,
"is_parallel": true,
"self": 0.15682251599992014
},
"steps_from_proto": {
"total": 0.00157533800006604,
"count": 1,
"is_parallel": true,
"self": 0.00040030600052887166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011750319995371683,
"count": 8,
"is_parallel": true,
"self": 0.0011750319995371683
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1321.5069281310193,
"count": 64008,
"is_parallel": true,
"self": 31.524923952073777,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.918640926977105,
"count": 64008,
"is_parallel": true,
"self": 22.918640926977105
},
"communicator.exchange": {
"total": 1173.9548895520043,
"count": 64008,
"is_parallel": true,
"self": 1173.9548895520043
},
"steps_from_proto": {
"total": 93.10847369996418,
"count": 64008,
"is_parallel": true,
"self": 21.381979484919384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.7264942150448,
"count": 512064,
"is_parallel": true,
"self": 71.7264942150448
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 744.074663747986,
"count": 64009,
"self": 2.4000900849671325,
"children": {
"process_trajectory": {
"total": 163.61284334302172,
"count": 64009,
"self": 163.35649317002117,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2563501730005555,
"count": 2,
"self": 0.2563501730005555
}
}
},
"_update_policy": {
"total": 578.0617303199972,
"count": 453,
"self": 221.35309623898797,
"children": {
"TorchPPOOptimizer.update": {
"total": 356.7086340810092,
"count": 22791,
"self": 356.7086340810092
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1319998520775698e-06,
"count": 1,
"self": 1.1319998520775698e-06
},
"TrainerController._save_models": {
"total": 0.1343875129996377,
"count": 1,
"self": 0.0021611969996229163,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13222631600001478,
"count": 1,
"self": 0.13222631600001478
}
}
}
}
}
}
}