ppo-PyramidsRND / run_logs /timers.json
jwoods's picture
Trained model
f42c4d3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4530212879180908,
"min": 0.4530212879180908,
"max": 1.4745392799377441,
"count": 28
},
"Pyramids.Policy.Entropy.sum": {
"value": 13612.3837890625,
"min": 13612.3837890625,
"max": 44731.625,
"count": 28
},
"Pyramids.Step.mean": {
"value": 839929.0,
"min": 29923.0,
"max": 839929.0,
"count": 28
},
"Pyramids.Step.sum": {
"value": 839929.0,
"min": 29923.0,
"max": 839929.0,
"count": 28
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6553109884262085,
"min": -0.10782083868980408,
"max": 0.6553109884262085,
"count": 28
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 185.4530029296875,
"min": -26.09264373779297,
"max": 185.4530029296875,
"count": 28
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.015051504597067833,
"min": -0.027884522452950478,
"max": 0.39482107758522034,
"count": 28
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -4.259575843811035,
"min": -7.61247444152832,
"max": 93.57259368896484,
"count": 28
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07133219251686354,
"min": 0.06629738234221891,
"max": 0.07448596894253223,
"count": 28
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9986506952360895,
"min": 0.614651782770272,
"max": 1.0921571219417576,
"count": 28
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014269942190453763,
"min": 0.000417585136017765,
"max": 0.016432991460074903,
"count": 28
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1997791906663527,
"min": 0.00501102163221318,
"max": 0.20924611313751446,
"count": 28
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0002174927846452714,
"min": 0.0002174927846452714,
"max": 0.0002982635783565851,
"count": 28
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0030448989850337996,
"min": 0.0026843722052092662,
"max": 0.004162607312464267,
"count": 28
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1724975857142857,
"min": 0.1724975857142857,
"max": 0.1994211925925926,
"count": 28
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4149662,
"min": 1.7947907333333335,
"max": 2.8875357333333334,
"count": 28
},
"Pyramids.Policy.Beta.mean": {
"value": 0.007252508812857142,
"min": 0.007252508812857142,
"max": 0.009942177140000001,
"count": 28
},
"Pyramids.Policy.Beta.sum": {
"value": 0.10153512338,
"min": 0.08947959426000002,
"max": 0.13876481976000002,
"count": 28
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009404017589986324,
"min": 0.009403275325894356,
"max": 0.44545331597328186,
"count": 28
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1316562443971634,
"min": 0.13164585828781128,
"max": 4.009079933166504,
"count": 28
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 285.3009708737864,
"min": 285.3009708737864,
"max": 991.1764705882352,
"count": 28
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29386.0,
"min": 16850.0,
"max": 33931.0,
"count": 28
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6952795885430956,
"min": -0.9265394445621606,
"max": 1.6952795885430956,
"count": 28
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 174.61379761993885,
"min": -30.5758016705513,
"max": 174.61379761993885,
"count": 28
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6952795885430956,
"min": -0.9265394445621606,
"max": 1.6952795885430956,
"count": 28
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 174.61379761993885,
"min": -30.5758016705513,
"max": 174.61379761993885,
"count": 28
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027941982839674716,
"min": 0.027941982839674716,
"max": 10.727661699056625,
"count": 28
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8780242324864957,
"min": 2.7949321906198747,
"max": 182.37024888396263,
"count": 28
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 28
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 28
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684981242",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684983173"
},
"total": 1931.593726478,
"count": 1,
"self": 0.5274269509998248,
"children": {
"run_training.setup": {
"total": 0.04259902300009344,
"count": 1,
"self": 0.04259902300009344
},
"TrainerController.start_learning": {
"total": 1931.023700504,
"count": 1,
"self": 1.1416421229835123,
"children": {
"TrainerController._reset_env": {
"total": 3.9345341390001067,
"count": 1,
"self": 3.9345341390001067
},
"TrainerController.advance": {
"total": 1925.9462294010166,
"count": 54952,
"self": 1.1571584419650662,
"children": {
"env_step": {
"total": 1366.6647152060104,
"count": 54952,
"self": 1273.0030594940122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 92.9809465459798,
"count": 54952,
"self": 4.089458481962765,
"children": {
"TorchPolicy.evaluate": {
"total": 88.89148806401704,
"count": 53725,
"self": 88.89148806401704
}
}
},
"workers": {
"total": 0.6807091660184597,
"count": 54951,
"self": 0.0,
"children": {
"worker_root": {
"total": 1926.7442980209464,
"count": 54951,
"is_parallel": true,
"self": 751.218667466914,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018132830000467948,
"count": 1,
"is_parallel": true,
"self": 0.0005797229996460374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012335600004007574,
"count": 8,
"is_parallel": true,
"self": 0.0012335600004007574
}
}
},
"UnityEnvironment.step": {
"total": 0.047585327999968285,
"count": 1,
"is_parallel": true,
"self": 0.0005638200000248617,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005202790000566893,
"count": 1,
"is_parallel": true,
"self": 0.0005202790000566893
},
"communicator.exchange": {
"total": 0.044637645999955566,
"count": 1,
"is_parallel": true,
"self": 0.044637645999955566
},
"steps_from_proto": {
"total": 0.0018635829999311682,
"count": 1,
"is_parallel": true,
"self": 0.00039176299992504937,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014718200000061188,
"count": 8,
"is_parallel": true,
"self": 0.0014718200000061188
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1175.5256305540324,
"count": 54950,
"is_parallel": true,
"self": 27.44596810098642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.481681209019598,
"count": 54950,
"is_parallel": true,
"self": 19.481681209019598
},
"communicator.exchange": {
"total": 1044.8769958690586,
"count": 54950,
"is_parallel": true,
"self": 1044.8769958690586
},
"steps_from_proto": {
"total": 83.72098537496777,
"count": 54950,
"is_parallel": true,
"self": 16.89457854696684,
"children": {
"_process_rank_one_or_two_observation": {
"total": 66.82640682800093,
"count": 439600,
"is_parallel": true,
"self": 66.82640682800093
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 558.1243557530411,
"count": 54951,
"self": 2.2870294229937826,
"children": {
"process_trajectory": {
"total": 93.29248101004737,
"count": 54951,
"self": 93.14386159304763,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14861941699973613,
"count": 1,
"self": 0.14861941699973613
}
}
},
"_update_policy": {
"total": 462.5448453199999,
"count": 394,
"self": 300.76553456998704,
"children": {
"TorchPPOOptimizer.update": {
"total": 161.77931075001288,
"count": 19581,
"self": 161.77931075001288
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3180001587898005e-06,
"count": 1,
"self": 1.3180001587898005e-06
},
"TrainerController._save_models": {
"total": 0.001293522999731067,
"count": 1,
"self": 2.678099963304703e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.00126674200009802,
"count": 1,
"self": 0.00126674200009802
}
}
}
}
}
}
}