ppo-Pyramids / run_logs /timers.json
jrad98's picture
First Push
2c11a72
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2128964066505432,
"min": 0.21030749380588531,
"max": 1.5014017820358276,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 6417.54931640625,
"min": 6352.96875,
"max": 45546.5234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989942.0,
"min": 29939.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989942.0,
"min": 29939.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.31374651193618774,
"min": -0.08951158821582794,
"max": 0.31374651193618774,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 81.5740966796875,
"min": -21.66180419921875,
"max": 81.5740966796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.004393260460346937,
"min": -0.012115906924009323,
"max": 0.23532558977603912,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.1422476768493652,
"min": -3.1380198001861572,
"max": 56.47814178466797,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06752732259199201,
"min": 0.06522106252393847,
"max": 0.07279608260828112,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9453825162878882,
"min": 0.5798006398301124,
"max": 1.0843246984683599,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012615614256425206,
"min": 0.0011345364677769788,
"max": 0.012615614256425206,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17661859958995288,
"min": 0.015883510548877704,
"max": 0.17661859958995288,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.41896895561429e-06,
"min": 7.41896895561429e-06,
"max": 0.0002948414642195125,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010386556537860007,
"min": 0.00010386556537860007,
"max": 0.003634944488351899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247295714285716,
"min": 0.10247295714285716,
"max": 0.1982804875,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346214000000002,
"min": 1.4346214000000002,
"max": 2.6116481,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002570484185714287,
"min": 0.0002570484185714287,
"max": 0.009828220701250001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035986778600000024,
"min": 0.0035986778600000024,
"max": 0.12118364519000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008793679997324944,
"min": 0.008624312467873096,
"max": 0.30082982778549194,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12311152368783951,
"min": 0.12224704027175903,
"max": 2.4066386222839355,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 510.0175438596491,
"min": 510.0175438596491,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29071.0,
"min": 16482.0,
"max": 33890.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1038982205485042,
"min": -0.9999750521965325,
"max": 1.1683428294158407,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 62.922198571264744,
"min": -31.99920167028904,
"max": 65.42719844728708,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1038982205485042,
"min": -0.9999750521965325,
"max": 1.1683428294158407,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 62.922198571264744,
"min": -31.99920167028904,
"max": 65.42719844728708,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04689537300784427,
"min": 0.04689537300784427,
"max": 6.564111180165234,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6730362614471233,
"min": 2.656817455776036,
"max": 111.58989006280899,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700217974",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700220260"
},
"total": 2285.5228324429995,
"count": 1,
"self": 0.43572933299901706,
"children": {
"run_training.setup": {
"total": 0.04166331700025694,
"count": 1,
"self": 0.04166331700025694
},
"TrainerController.start_learning": {
"total": 2285.0454397930002,
"count": 1,
"self": 1.5995892299156367,
"children": {
"TrainerController._reset_env": {
"total": 4.784148884999922,
"count": 1,
"self": 4.784148884999922
},
"TrainerController.advance": {
"total": 2278.586472650084,
"count": 63598,
"self": 1.7262340438956016,
"children": {
"env_step": {
"total": 1625.3484271650227,
"count": 63598,
"self": 1480.2039698739654,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.18083631203308,
"count": 63598,
"self": 4.871128785942801,
"children": {
"TorchPolicy.evaluate": {
"total": 139.30970752609028,
"count": 62570,
"self": 139.30970752609028
}
}
},
"workers": {
"total": 0.9636209790241992,
"count": 63598,
"self": 0.0,
"children": {
"worker_root": {
"total": 2279.820372581928,
"count": 63598,
"is_parallel": true,
"self": 930.9025903088218,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002430161999654956,
"count": 1,
"is_parallel": true,
"self": 0.0006677939982182579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017623680014366983,
"count": 8,
"is_parallel": true,
"self": 0.0017623680014366983
}
}
},
"UnityEnvironment.step": {
"total": 0.05362268099997891,
"count": 1,
"is_parallel": true,
"self": 0.0005593279997810896,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005006799997318012,
"count": 1,
"is_parallel": true,
"self": 0.0005006799997318012
},
"communicator.exchange": {
"total": 0.04883407600027567,
"count": 1,
"is_parallel": true,
"self": 0.04883407600027567
},
"steps_from_proto": {
"total": 0.003728597000190348,
"count": 1,
"is_parallel": true,
"self": 0.0003880680005750037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033405289996153442,
"count": 8,
"is_parallel": true,
"self": 0.0033405289996153442
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1348.9177822731062,
"count": 63597,
"is_parallel": true,
"self": 35.88211202110733,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.179195308036924,
"count": 63597,
"is_parallel": true,
"self": 26.179195308036924
},
"communicator.exchange": {
"total": 1181.33779482701,
"count": 63597,
"is_parallel": true,
"self": 1181.33779482701
},
"steps_from_proto": {
"total": 105.51868011695205,
"count": 63597,
"is_parallel": true,
"self": 22.222502548862394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.29617756808966,
"count": 508776,
"is_parallel": true,
"self": 83.29617756808966
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 651.5118114411657,
"count": 63598,
"self": 3.1603776352335444,
"children": {
"process_trajectory": {
"total": 129.76795496093155,
"count": 63598,
"self": 129.5387627909322,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22919216999935088,
"count": 2,
"self": 0.22919216999935088
}
}
},
"_update_policy": {
"total": 518.5834788450006,
"count": 457,
"self": 308.4481009559704,
"children": {
"TorchPPOOptimizer.update": {
"total": 210.13537788903022,
"count": 22767,
"self": 210.13537788903022
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.960003808373585e-07,
"count": 1,
"self": 9.960003808373585e-07
},
"TrainerController._save_models": {
"total": 0.07522803200026829,
"count": 1,
"self": 0.0012851960009356844,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0739428359993326,
"count": 1,
"self": 0.0739428359993326
}
}
}
}
}
}
}