testpyramidsrnd / run_logs /timers.json
heriosousa's picture
First Pyramids
9ad4c7d
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6862469911575317,
"min": 0.6862469911575317,
"max": 1.412160038948059,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 20554.470703125,
"min": 20554.470703125,
"max": 42839.28515625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479902.0,
"min": 29952.0,
"max": 479902.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479902.0,
"min": 29952.0,
"max": 479902.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.043696556240320206,
"min": -0.11794018745422363,
"max": -0.029356027022004128,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -10.53087043762207,
"min": -28.659465789794922,
"max": -6.957378387451172,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.034689053893089294,
"min": 0.023891914635896683,
"max": 0.49538207054138184,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.360061645507812,
"min": 5.7340593338012695,
"max": 117.40554809570312,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0688603321515974,
"min": 0.0651057852697958,
"max": 0.07289142580433874,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9640446501223635,
"min": 0.4828183464833064,
"max": 1.0204799612607425,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.002170354041003602,
"min": 0.00017892138657097317,
"max": 0.0076368311433971875,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.03038495657405043,
"min": 0.002325978025422651,
"max": 0.05485889393251044,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.096975015297143e-05,
"min": 2.096975015297143e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00029357650214160004,
"min": 0.00029357650214160004,
"max": 0.0033182419939194003,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10698988571428569,
"min": 0.10698988571428569,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4978583999999997,
"min": 1.3773696000000002,
"max": 2.4224856000000003,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007082895828571429,
"min": 0.0007082895828571429,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009916054160000002,
"min": 0.009916054160000002,
"max": 0.11062745194,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.023987699300050735,
"min": 0.023987699300050735,
"max": 0.4914165139198303,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3358277976512909,
"min": 0.3358277976512909,
"max": 3.439915657043457,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 976.6451612903226,
"min": 906.6363636363636,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30276.0,
"min": 15984.0,
"max": 32465.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.6545290850823925,
"min": -1.0000000521540642,
"max": -0.4222242884112127,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -20.29040163755417,
"min": -31.992401644587517,
"max": -13.933401517570019,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.6545290850823925,
"min": -1.0000000521540642,
"max": -0.4222242884112127,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -20.29040163755417,
"min": -31.992401644587517,
"max": -13.933401517570019,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.23917870455601764,
"min": 0.2368117618007642,
"max": 9.578871441073716,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 7.414539841236547,
"min": 7.414539841236547,
"max": 153.26194305717945,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1658511186",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1658512173"
},
"total": 987.0061175570002,
"count": 1,
"self": 0.4839275640002825,
"children": {
"run_training.setup": {
"total": 0.04233199399993737,
"count": 1,
"self": 0.04233199399993737
},
"TrainerController.start_learning": {
"total": 986.4798579989999,
"count": 1,
"self": 0.7051505339910591,
"children": {
"TrainerController._reset_env": {
"total": 9.283431921000101,
"count": 1,
"self": 9.283431921000101
},
"TrainerController.advance": {
"total": 976.3964635390088,
"count": 31589,
"self": 0.7438110289948554,
"children": {
"env_step": {
"total": 607.2333256690127,
"count": 31589,
"self": 551.3323171290245,
"children": {
"SubprocessEnvManager._take_step": {
"total": 55.51560935600742,
"count": 31589,
"self": 2.3960192740006505,
"children": {
"TorchPolicy.evaluate": {
"total": 53.119590082006766,
"count": 31315,
"self": 18.076592242019046,
"children": {
"TorchPolicy.sample_actions": {
"total": 35.04299783998772,
"count": 31315,
"self": 35.04299783998772
}
}
}
}
},
"workers": {
"total": 0.38539918398078044,
"count": 31589,
"self": 0.0,
"children": {
"worker_root": {
"total": 984.2733045910014,
"count": 31589,
"is_parallel": true,
"self": 484.98025382802155,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006300643000031414,
"count": 1,
"is_parallel": true,
"self": 0.004936916000019664,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013637270000117496,
"count": 8,
"is_parallel": true,
"self": 0.0013637270000117496
}
}
},
"UnityEnvironment.step": {
"total": 0.04461161400001856,
"count": 1,
"is_parallel": true,
"self": 0.00048283399996762455,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045131400008813216,
"count": 1,
"is_parallel": true,
"self": 0.00045131400008813216
},
"communicator.exchange": {
"total": 0.04208954399996401,
"count": 1,
"is_parallel": true,
"self": 0.04208954399996401
},
"steps_from_proto": {
"total": 0.0015879219999987981,
"count": 1,
"is_parallel": true,
"self": 0.0004250099999580925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011629120000407056,
"count": 8,
"is_parallel": true,
"self": 0.0011629120000407056
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 499.2930507629799,
"count": 31588,
"is_parallel": true,
"self": 14.210155919980025,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.018304755014583,
"count": 31588,
"is_parallel": true,
"self": 12.018304755014583
},
"communicator.exchange": {
"total": 424.94029096798727,
"count": 31588,
"is_parallel": true,
"self": 424.94029096798727
},
"steps_from_proto": {
"total": 48.124299119998,
"count": 31588,
"is_parallel": true,
"self": 11.99372052398246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.13057859601554,
"count": 252704,
"is_parallel": true,
"self": 36.13057859601554
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 368.4193268410012,
"count": 31589,
"self": 1.2559131539493364,
"children": {
"process_trajectory": {
"total": 84.08955183004991,
"count": 31589,
"self": 83.97182682604989,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11772500400002173,
"count": 1,
"self": 0.11772500400002173
}
}
},
"_update_policy": {
"total": 283.07386185700193,
"count": 217,
"self": 109.91755481000212,
"children": {
"TorchPPOOptimizer.update": {
"total": 173.1563070469998,
"count": 11400,
"self": 173.1563070469998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0450000900164014e-06,
"count": 1,
"self": 1.0450000900164014e-06
},
"TrainerController._save_models": {
"total": 0.0948109599999043,
"count": 1,
"self": 0.0015478959999200015,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0932630639999843,
"count": 1,
"self": 0.0932630639999843
}
}
}
}
}
}
}